歡迎來到Linux教程網
Linux教程網
Linux教程網
Linux教程網
Linux教程網 >> Linux基礎 >> Linux技術 >> ceph os zfs 單機版搭建流程

ceph os zfs 單機版搭建流程

日期:2017/3/3 13:52:27   编辑:Linux技術
[root@centos72 current]# lsb_release -a
LSB Version: :core-4.1-amd64:core-4.1-noarch
Distributor ID: CentOS
Description: CentOS Linux release 7.2.1511 (Core)
Release: 7.2.1511
Codename: Core
# yum localinstall --nogpgcheck -yhttp://archive.zfsonlinux.org/epel/zfs-release.el7.noarch.rpm
# yum install -y zfs
(1/9): libnvpair1-0.6.5.6-1.el7.centos.x86_64.rpm
(2/9): dkms-2.2.0.3-30.git.7c3e7c5.el7.noarch.rpm
(3/9): libuutil1-0.6.5.6-1.el7.centos.x86_64.rpm
(4/9): libzfs2-0.6.5.6-1.el7.centos.x86_64.rpm
(5/9): spl-0.6.5.6-1.el7.centos.x86_64.rpm
(6/9): libzpool2-0.6.5.6-1.el7.centos.x86_64.rpm
(7/9): zfs-0.6.5.6-1.el7.centos.x86_64.rpm
(8/9): spl-dkms-0.6.5.6-1.el7.centos.noarch.rpm
(9/9): zfs-dkms-0.6.5.6-1.el7.centos.noarch.rpm
# modprobe zfs
# dmesg | egrep "SPL|ZFS"
ceph的版本 : ceph-10.2.0.tar.gz
編譯zfs的代碼
# rpm -ql libzfs2-devel-0.6.5.6-1.el7.centos.x86_64
/usr/include/libzfs/libzfs.h
執行下面的配置把現有的配置帶上
export LIBZFS_LIBS="/usr/include/libzfs/"
export LIBZFS_CFLAGS="-I/usr/include/libzfs -I/usr/include/libspl"
# ./configure --with-libzfs --with-libxfs --with-radosgw
# make -j 8 && make install
vim /etc/ceph/ceph.conf
[global]
max open files = 131072
log file = /var/log/ceph/ceph-$name.log
pid file = /var/run/ceph/$name.pid
auth cluster required = cephx
service required = cephx
auth client required = cephx
osd pool default size = 1
[mon]
debug mon = 10
debug paxos = 0
mon data = /data/mon/mon.$id
[mon.0]
host = 127.0.0.1
mon addr = 127.0.0.1:6789
[osd]
osd data = /data/osd/osd.$id
osd journal = /data/osd/osd.$id/journal
osd journal size = 1000
debug osd = 10
journal_dio = false
journal_aio = false
filestore_zfs_snap = false
osd max object name len = 1024
filestore_max_xattr_value_size_other = 20480
[osd.0]
host = 127.0.0.1
[osd.1]
host = 127.0.0.1
創建ceph集群的步驟
mkdir -p /data/mon/mon.0
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
monmaptool --create --add 0 127.0.0.1 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap
ceph-mon -i 0 -c /etc/ceph/ceph.conf --mkfs --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring --mon-data /data/mon/mon.0 --debug_mon 10 --fsid=a7f64266-0894-4f1e-a635-d0aeaca0e993
ceph-mon -i 0
ceph osd create
mkdir -p /data/osd/osd.0
zpool create disk0 /dev/vdb -f
zfs set mountpoint=/data/osd/osd.0 disk0
zfs set xattr=sa disk0
ceph-osd -i 0 --mkfs --osd-data=/data/osd/osd.0 -c /etc/ceph/ceph.conf --debug_osd 20 --mkkey
ceph auth add osd.0 osd 'allow *' mon 'allow rwx' -i /data/osd/osd.0/keyring
ceph-osd -i 0
ceph osd create
mkdir -p /data/osd/osd.1
zpool create disk1 /dev/vdc -f
zfs set mountpoint=/data/osd/osd.1 disk1
zfs set xattr=sa disk1
ceph-osd -i 1 --mkfs --osd-data=/data/osd/osd.1 -c /etc/ceph/ceph.conf --debug_osd 20 --mkkey
ceph auth add osd.1 osd 'allow *' mon 'allow rwx' -i /data/osd/osd.1/keyring
ceph-osd -i 1
ceph osd crush add-bucket unkownrack rack
ceph osd tree
ceph osd crush add-bucket host0 host
ceph osd crush add-bucket host1 host
ceph osd crush move host0 rack=unkownrack
ceph osd crush move host1 rack=unkownrack
ceph osd crush move unkownrack root=default
ceph osd crush create-or-move osd.0 1.0 host=host0 rack=unkownrack root=default
ceph osd crush create-or-move osd.1 1.0 host=host1 rack=unkownrack root=default
啟動之後ceph集群的轉改如下
[root@centos72 ceph]# ceph -s
cluster a7f64266-0894-4f1e-a635-d0aeaca0e993
health HEALTH_OK
monmap e1: 1 mons at {0=127.0.0.1:6789/0}
election epoch 3, quorum 0 0
osdmap e23: 2 osds: 2 up, 2 in
flags sortbitwise
pgmap v48: 64 pgs, 1 pools, 0 bytes data, 0 objects
2000 MB used, 37430 MB / 39430 MB avail
64 active+clean
[root@centos72 ceph]# zpool status -v
pool: disk0
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
disk0 ONLINE 0 0 0
vdb ONLINE 0 0 0
pool: disk1
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
disk1 ONLINE 0 0 0
vdc ONLINE 0 0 0
# df -h
disk0 20G 1001M 19G 6% /data/osd/osd.0
disk0/current 19G 55M 19G 1% /data/osd/osd.0/current
disk1 20G 1001M 19G 6% /data/osd/osd.1
disk1/current 19G 37M 19G 1% /data/osd/osd.1/current
Copyright © Linux教程網 All Rights Reserved