참고사이트:

https://orebibou.com/2016/03/centos-7%E3%81%AB%E5%88%86%E6%95%A3%E3%82%AA%E3%83%96%E3%82%B8%E3%82%A7%E3%82%AF%E3%83%88%E3%82%B9%E3%83%88%E3%83%AC%E3%83%BC%E3%82%B8%E3%80%8Eceph%E3%80%8F%E3%82%92%E3%82%A4%E3%83%B3%E3%82%B9/

http://docs.ceph.com/docs/master/start/quick-ceph-deploy/

http://www.nminoru.jp/~nminoru/unix/ceph/how-to-use-rbd.html

https://www.redhat.com/archives/virt-tools-list/2016-January/msg00007.html


1. kvm 가상서버세팅

192.168.122.11/ceph-node-admin/CentOS Linux release 7.3.1611 (Core)

192.168.122.12/ceph-node-001/CentOS Linux release 7.3.1611 (Core) 

192.168.122.13/ceph-node-002/CentOS Linux release 7.3.1611 (Core)

192.168.122.14/ceph-node-003/CentOS Linux release 7.3.1611 (Core)


2. ceph-node-admin, ceph-node-001 ~ 003 공통작업


]# yum update

]# yum install net-tools


""" 시간동기화툴 설치 및 시작 """

]# yum install chrony

]# systemctl enable chronyd

]# systemctl start chronyd


""" SELINUX 비활성화"""

]# vi /etc/selinux/config

- SELINUX=enforcing

+ SELINUX=disabled


""" firewalld 종료 """

]# systemctl stop firewalld

]# systemctl disable firewalld


""" hostname 변경""

ceph-node-admin

]# hostnamectl set-hostname ceph-node-admin


ceph-node-001

]# hostnamectl set-hostname ceph-node-001


ceph-node-002

]# hostnamectl set-hostname ceph-node-002


ceph-node-003

]# hostnamectl set-hostname ceph-node-003


""" NetworkManager 종료 """

]# systemctl stop NetworkManager

]# systemctl disable NetworkManager


""" /etc/hosts 수정 """

]# vi /etc/hosts

+ 192.168.122.11 ceph-node-admin

+ 192.168.122.12 ceph-node-001

+ 192.168.122.13 ceph-node-002

+ 192.168.122.14 ceph-node-003


""" ceph 계정생성 """

ceph-node-001~003

]# useradd -d /home/ceph -m ceph

]# passwd ceph

]# echo -e 'Defaults:ceph !requiretty\nceph ALL = (root) NOPASSWD:ALL' | tee /etc/sudoers.d/ceph

]# chmod 440 /etc/sudoers.d/ceph


ceph-node-admin

--------------------------------------------------------------------------------------------------------------

admin 에서 구지 ceph 계정을 만들어서 사용할 필요가 있을까? 그냥 root 로 사용하자.

]# useradd -d /home/ceph -m ceph

]# passwd ceph

]# echo -e 'Defaults:ceph !requiretty\nceph ALL = (root) NOPASSWD:ALL' | tee /etc/sudoers.d/ceph

]# chmod 440 /etc/sudoers.d/ceph

]# su - ceph

--------------------------------------------------------------------------------------------------------------


]# vi /root/.ssh/config

+ Host ceph-node-001

+    Hostname ceph-node-001

+    User ceph

+ Host ceph-node-002

+    Hostname ceph-node-002

+    User ceph

+ Host ceph-node-003

+    Hostname ceph-node-003

+    User ceph


]# ssh-keygen

]# ssh-copy-id ceph-node-001

]# ssh-copy-id ceph-node-002

]# ssh-copy-id ceph-node-003 


""" ceph repo 등록 ""

]# yum install http://download.ceph.com/rpm-jewel/el7/noarch/ceph-release-1-1.el7.noarch.rpm


""" 재시작 """

]# reboot


3. ceph-node-admin 작업

--------------------------------------------------------------------------------------------------------------

]# yum install ceph-deploy

]# su - ceph

]$ mkdir my-cluster

]$ cd my-cluster

]$ ceph-deploy new ceph-node-001 ceph-node-002 ceph-node-003

]$ vi ceph.conf

+ public_network = 192.168.122.0/24

+ cluster_network = 192.168.122.0/24

]$ ceph-deploy install ceph-node-001 ceph-node-002 ceph-node-003 ceph-node-admin

]$ cceph-deploy mon create-initial

]$ ceph-deploy admin ceph-node-001 ceph-node-002 ceph-node-003

]$ exit

ceph-node-admin, ceph-node-001 ~ ceph-node-003

]# chmod +r /etc/ceph/ceph.client.admin.keyring

]# su - ceph

]$ ceph health detail

--------------------------------------------------------------------------------------------------------------


]# yum install ceph-deploy
]# mkdir my-cluster
]# cd my-cluster

]# ceph-deploy new ceph-node-001 ceph-node-002 ceph-node-003

]# vi ceph.conf

+ public_network = 192.168.122.0/24

+ cluster_network = 192.168.122.0/24

+ osd pool default size = 2

]# ceph-deploy install ceph-node-admin ceph-node-001 ceph-node-002 ceph-node-003

]# ceph-deploy mon create-initial

]# ceph-deploy osd create ceph-node-001:vdb --zap

]# ceph-deploy osd create ceph-node-002:vdb --zap

]# ceph-deploy osd create ceph-node-003:vdb --zap

]# ceph health detail

]# ceph osd tree;                 

ID WEIGHT  TYPE NAME              UP/DOWN REWEIGHT PRIMARY-AFFINITY 

-1 0.04376 root default                                             

-2 0.01459     host ceph-node-001                                   

 0 0.01459         osd.0               up  1.00000          1.00000 

-3 0.01459     host ceph-node-002                                   

 1 0.01459         osd.1               up  1.00000          1.00000 

-4 0.01459     host ceph-node-003                                   

 2 0.01459         osd.2               up  1.00000          1.00000 


4. ceph-node-001 에서 작업

]# rbd create vm001 --size 20000 --image-feature layering  

]# rbd ls

vm001

]# rbd info vm001

rbd image 'vm001':

        size 20000 MB in 5000 objects

        order 22 (4096 kB objects)

        block_name_prefix: rbd_data.10332ae8944a

        format: 2

        features: layering

        flags: 

]# ceph auth get-or-create client.vmimages mon 'allow r' osd 'allow rwx pool=rbd'

[client.vmimages]

        key = AQBBu4FYTDZuKBAAXnlDNpSLzkwmYo84u0I9oQ==


]# ceph auth list

installed auth entries:


osd.0

        key: AQAzuIFYPSfQIBAASzGIUb+VQKk3c48Zg5bu9Q==

        caps: [mon] allow profile osd

        caps: [osd] allow *

osd.1

        key: AQBQuIFYXS8LGBAA7qUoQwbIaEnYTboeJYrUgw==

        caps: [mon] allow profile osd

        caps: [osd] allow *

osd.2

        key: AQBmuIFYSSbJFxAAxgN1NWiJ7SOJvMl1nZll8w==

        caps: [mon] allow profile osd

        caps: [osd] allow *

client.admin

        key: AQAjtoFY6S21MhAA71nAPC3LGIXOGYi9lAB5pg==

        caps: [mds] allow *

        caps: [mon] allow *

        caps: [osd] allow *

client.bootstrap-mds

        key: AQAltoFYOnCBMhAAQ+IsKhiRQXMEj6y9nRkTmg==

        caps: [mon] allow profile bootstrap-mds

client.bootstrap-osd

        key: AQAktoFYqpAVHBAA9D4xX+DannHKJumh4LWrGQ==

        caps: [mon] allow profile bootstrap-osd

client.bootstrap-rgw

        key: AQAltoFYzZj3DRAA2a8pbGitRlaQH31z/pdTgQ==

        caps: [mon] allow profile bootstrap-rgw

client.vmimages

        key: AQBBu4FYTDZuKBAAXnlDNpSLzkwmYo84u0I9oQ==

        caps: [mon] allow r

        caps: [osd] allow rwx pool=rbd


5. kvm 에서 작업

]# echo "<secret ephemeral='no' private='no'> <usage type='ceph'> <name>client.vmimages secret</name></usage></secret>" > secret.xml

]# virsh secret-set-value 92447fe3-b22f-4e88-b07a-01ab839664d8 AQBBu4FYTDZuKBAAXnlDNpSLzkwmYo84u0I9oQ==

]# vi ceph-pool.xml

+<pool type="rbd">

+   <name>ceph-pool</name>

+  <source>

+     <name>rbd</name>

+     <host name="192.168.122.12" port="6789" />

+     <auth username='vmimages' type='ceph'>

+       <secret uuid='92447fe3-b22f-4e88-b07a-01ab839664d8'/>

+     </auth>

+   </source>

+</pool>


]# vi ceph-clinet.xml

+    <disk type='network' device='disk'>

+      <driver name='qemu' type='raw' cache='none'/>

+      <auth username='vmimages'>

+        <secret type='ceph' uuid='92447fe3-b22f-4e88-b07a-01ab839664d8'/>

+      </auth>

+      <source protocol='rbd' name='rbd/vm002'>

+        <host name='192.168.122.12' port='6789'/>

+      </source>

+      <target dev='vdb' bus='virtio'/>

+      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>

+    </disk>


virt-manager 로 생성한 rbd 는 사용이 가능하나 ceph 문제에 나온대로 아래 명령어를 사용하여 생산한 rbd 는 header 를 읽을 수 없다는 에러가 발생한다.

qemu-img create -f rbd rbd:rbd/vm003 20G


vm001, vm002 는 virt-manager 로 생성한것이고 vm003 은 위커맨드로 생성한것 


[root@ceph-node-001 home]# rbd ls

vm001

vm002

vm003

[root@ceph-node-001 home]# rbd info vm001

rbd image 'vm001':

        size 20480 MB in 5120 objects

        order 22 (4096 kB objects)

        block_name_prefix: rbd_data.116d116ae494

        format: 2

        features: layering, striping

        flags: 

        stripe unit: 4096 kB

        stripe count: 1

[root@ceph-node-001 home]# rbd info vm002

rbd image 'vm002':

        size 20480 MB in 5120 objects

        order 22 (4096 kB objects)

        block_name_prefix: rbd_data.10e0684a481a

        format: 2

        features: layering, striping

        flags: 

        stripe unit: 4096 kB

        stripe count: 1

[root@ceph-node-001 home]# rbd info vm003

rbd image 'vm003':

        size 20480 MB in 5120 objects

        order 22 (4096 kB objects)

        block_name_prefix: rbd_data.118274b0dc51

        format: 2

        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten

        flags: 


원인은 rbd의 features 때문이었음...

]# rbd create vm004 --size 20000 --image-feature layering

로 생성하고 마운트해보면 정상동작함

features 에 대한 문서를 찾아 봐야알듯.

'Virtualization' 카테고리의 다른 글

LVM2  (0) 2017.01.12
kvm install  (0) 2017.01.03

+ Recent posts