참고사이트:

https://orebibou.com/2016/03/centos-7%E3%81%AB%E5%88%86%E6%95%A3%E3%82%AA%E3%83%96%E3%82%B8%E3%82%A7%E3%82%AF%E3%83%88%E3%82%B9%E3%83%88%E3%83%AC%E3%83%BC%E3%82%B8%E3%80%8Eceph%E3%80%8F%E3%82%92%E3%82%A4%E3%83%B3%E3%82%B9/

http://docs.ceph.com/docs/master/start/quick-ceph-deploy/

http://www.nminoru.jp/~nminoru/unix/ceph/how-to-use-rbd.html

https://www.redhat.com/archives/virt-tools-list/2016-January/msg00007.html


1. kvm 가상서버세팅

192.168.122.11/ceph-node-admin/CentOS Linux release 7.3.1611 (Core)

192.168.122.12/ceph-node-001/CentOS Linux release 7.3.1611 (Core) 

192.168.122.13/ceph-node-002/CentOS Linux release 7.3.1611 (Core)

192.168.122.14/ceph-node-003/CentOS Linux release 7.3.1611 (Core)


2. ceph-node-admin, ceph-node-001 ~ 003 공통작업


]# yum update

]# yum install net-tools


""" 시간동기화툴 설치 및 시작 """

]# yum install chrony

]# systemctl enable chronyd

]# systemctl start chronyd


""" SELINUX 비활성화"""

]# vi /etc/selinux/config

- SELINUX=enforcing

+ SELINUX=disabled


""" firewalld 종료 """

]# systemctl stop firewalld

]# systemctl disable firewalld


""" hostname 변경""

ceph-node-admin

]# hostnamectl set-hostname ceph-node-admin


ceph-node-001

]# hostnamectl set-hostname ceph-node-001


ceph-node-002

]# hostnamectl set-hostname ceph-node-002


ceph-node-003

]# hostnamectl set-hostname ceph-node-003


""" NetworkManager 종료 """

]# systemctl stop NetworkManager

]# systemctl disable NetworkManager


""" /etc/hosts 수정 """

]# vi /etc/hosts

+ 192.168.122.11 ceph-node-admin

+ 192.168.122.12 ceph-node-001

+ 192.168.122.13 ceph-node-002

+ 192.168.122.14 ceph-node-003


""" ceph 계정생성 """

ceph-node-001~003

]# useradd -d /home/ceph -m ceph

]# passwd ceph

]# echo -e 'Defaults:ceph !requiretty\nceph ALL = (root) NOPASSWD:ALL' | tee /etc/sudoers.d/ceph

]# chmod 440 /etc/sudoers.d/ceph


ceph-node-admin

--------------------------------------------------------------------------------------------------------------

admin 에서 구지 ceph 계정을 만들어서 사용할 필요가 있을까? 그냥 root 로 사용하자.

]# useradd -d /home/ceph -m ceph

]# passwd ceph

]# echo -e 'Defaults:ceph !requiretty\nceph ALL = (root) NOPASSWD:ALL' | tee /etc/sudoers.d/ceph

]# chmod 440 /etc/sudoers.d/ceph

]# su - ceph

--------------------------------------------------------------------------------------------------------------


]# vi /root/.ssh/config

+ Host ceph-node-001

+    Hostname ceph-node-001

+    User ceph

+ Host ceph-node-002

+    Hostname ceph-node-002

+    User ceph

+ Host ceph-node-003

+    Hostname ceph-node-003

+    User ceph


]# ssh-keygen

]# ssh-copy-id ceph-node-001

]# ssh-copy-id ceph-node-002

]# ssh-copy-id ceph-node-003 


""" ceph repo 등록 ""

]# yum install http://download.ceph.com/rpm-jewel/el7/noarch/ceph-release-1-1.el7.noarch.rpm


""" 재시작 """

]# reboot


3. ceph-node-admin 작업

--------------------------------------------------------------------------------------------------------------

]# yum install ceph-deploy

]# su - ceph

]$ mkdir my-cluster

]$ cd my-cluster

]$ ceph-deploy new ceph-node-001 ceph-node-002 ceph-node-003

]$ vi ceph.conf

+ public_network = 192.168.122.0/24

+ cluster_network = 192.168.122.0/24

]$ ceph-deploy install ceph-node-001 ceph-node-002 ceph-node-003 ceph-node-admin

]$ cceph-deploy mon create-initial

]$ ceph-deploy admin ceph-node-001 ceph-node-002 ceph-node-003

]$ exit

ceph-node-admin, ceph-node-001 ~ ceph-node-003

]# chmod +r /etc/ceph/ceph.client.admin.keyring

]# su - ceph

]$ ceph health detail

--------------------------------------------------------------------------------------------------------------


]# yum install ceph-deploy
]# mkdir my-cluster
]# cd my-cluster

]# ceph-deploy new ceph-node-001 ceph-node-002 ceph-node-003

]# vi ceph.conf

+ public_network = 192.168.122.0/24

+ cluster_network = 192.168.122.0/24

+ osd pool default size = 2

]# ceph-deploy install ceph-node-admin ceph-node-001 ceph-node-002 ceph-node-003

]# ceph-deploy mon create-initial

]# ceph-deploy osd create ceph-node-001:vdb --zap

]# ceph-deploy osd create ceph-node-002:vdb --zap

]# ceph-deploy osd create ceph-node-003:vdb --zap

]# ceph health detail

]# ceph osd tree;                 

ID WEIGHT  TYPE NAME              UP/DOWN REWEIGHT PRIMARY-AFFINITY 

-1 0.04376 root default                                             

-2 0.01459     host ceph-node-001                                   

 0 0.01459         osd.0               up  1.00000          1.00000 

-3 0.01459     host ceph-node-002                                   

 1 0.01459         osd.1               up  1.00000          1.00000 

-4 0.01459     host ceph-node-003                                   

 2 0.01459         osd.2               up  1.00000          1.00000 


4. ceph-node-001 에서 작업

]# rbd create vm001 --size 20000 --image-feature layering  

]# rbd ls

vm001

]# rbd info vm001

rbd image 'vm001':

        size 20000 MB in 5000 objects

        order 22 (4096 kB objects)

        block_name_prefix: rbd_data.10332ae8944a

        format: 2

        features: layering

        flags: 

]# ceph auth get-or-create client.vmimages mon 'allow r' osd 'allow rwx pool=rbd'

[client.vmimages]

        key = AQBBu4FYTDZuKBAAXnlDNpSLzkwmYo84u0I9oQ==


]# ceph auth list

installed auth entries:


osd.0

        key: AQAzuIFYPSfQIBAASzGIUb+VQKk3c48Zg5bu9Q==

        caps: [mon] allow profile osd

        caps: [osd] allow *

osd.1

        key: AQBQuIFYXS8LGBAA7qUoQwbIaEnYTboeJYrUgw==

        caps: [mon] allow profile osd

        caps: [osd] allow *

osd.2

        key: AQBmuIFYSSbJFxAAxgN1NWiJ7SOJvMl1nZll8w==

        caps: [mon] allow profile osd

        caps: [osd] allow *

client.admin

        key: AQAjtoFY6S21MhAA71nAPC3LGIXOGYi9lAB5pg==

        caps: [mds] allow *

        caps: [mon] allow *

        caps: [osd] allow *

client.bootstrap-mds

        key: AQAltoFYOnCBMhAAQ+IsKhiRQXMEj6y9nRkTmg==

        caps: [mon] allow profile bootstrap-mds

client.bootstrap-osd

        key: AQAktoFYqpAVHBAA9D4xX+DannHKJumh4LWrGQ==

        caps: [mon] allow profile bootstrap-osd

client.bootstrap-rgw

        key: AQAltoFYzZj3DRAA2a8pbGitRlaQH31z/pdTgQ==

        caps: [mon] allow profile bootstrap-rgw

client.vmimages

        key: AQBBu4FYTDZuKBAAXnlDNpSLzkwmYo84u0I9oQ==

        caps: [mon] allow r

        caps: [osd] allow rwx pool=rbd


5. kvm 에서 작업

]# echo "<secret ephemeral='no' private='no'> <usage type='ceph'> <name>client.vmimages secret</name></usage></secret>" > secret.xml

]# virsh secret-set-value 92447fe3-b22f-4e88-b07a-01ab839664d8 AQBBu4FYTDZuKBAAXnlDNpSLzkwmYo84u0I9oQ==

]# vi ceph-pool.xml

+<pool type="rbd">

+   <name>ceph-pool</name>

+  <source>

+     <name>rbd</name>

+     <host name="192.168.122.12" port="6789" />

+     <auth username='vmimages' type='ceph'>

+       <secret uuid='92447fe3-b22f-4e88-b07a-01ab839664d8'/>

+     </auth>

+   </source>

+</pool>


]# vi ceph-clinet.xml

+    <disk type='network' device='disk'>

+      <driver name='qemu' type='raw' cache='none'/>

+      <auth username='vmimages'>

+        <secret type='ceph' uuid='92447fe3-b22f-4e88-b07a-01ab839664d8'/>

+      </auth>

+      <source protocol='rbd' name='rbd/vm002'>

+        <host name='192.168.122.12' port='6789'/>

+      </source>

+      <target dev='vdb' bus='virtio'/>

+      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>

+    </disk>


virt-manager 로 생성한 rbd 는 사용이 가능하나 ceph 문제에 나온대로 아래 명령어를 사용하여 생산한 rbd 는 header 를 읽을 수 없다는 에러가 발생한다.

qemu-img create -f rbd rbd:rbd/vm003 20G


vm001, vm002 는 virt-manager 로 생성한것이고 vm003 은 위커맨드로 생성한것 


[root@ceph-node-001 home]# rbd ls

vm001

vm002

vm003

[root@ceph-node-001 home]# rbd info vm001

rbd image 'vm001':

        size 20480 MB in 5120 objects

        order 22 (4096 kB objects)

        block_name_prefix: rbd_data.116d116ae494

        format: 2

        features: layering, striping

        flags: 

        stripe unit: 4096 kB

        stripe count: 1

[root@ceph-node-001 home]# rbd info vm002

rbd image 'vm002':

        size 20480 MB in 5120 objects

        order 22 (4096 kB objects)

        block_name_prefix: rbd_data.10e0684a481a

        format: 2

        features: layering, striping

        flags: 

        stripe unit: 4096 kB

        stripe count: 1

[root@ceph-node-001 home]# rbd info vm003

rbd image 'vm003':

        size 20480 MB in 5120 objects

        order 22 (4096 kB objects)

        block_name_prefix: rbd_data.118274b0dc51

        format: 2

        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten

        flags: 


원인은 rbd의 features 때문이었음...

]# rbd create vm004 --size 20000 --image-feature layering

로 생성하고 마운트해보면 정상동작함

features 에 대한 문서를 찾아 봐야알듯.

'Virtualization' 카테고리의 다른 글

LVM2  (0) 2017.01.12
kvm install  (0) 2017.01.03

참조: http://kit2013.tistory.com/199


# dd sparse image

]# dd if=/dev/zero of=test1.img bs=10M count=0 seek=1K 

]# dd if=/dev/zero of=test2.img bs=10M count=0 seek=1K


]# ls -alh

-rw-r--r--   1 root  root   10G Jan 12 14:28 test1.img

-rw-r--r--   1 root  root   10G Jan 12 14:28 test2.img


]# du -sh *

0       test1.img

0       test2.img


# losetup 

]# losetup -a

/dev/loop0: [2051]:1316303 (/opt/stack/data/stack-volumes-default-backing-file)

/dev/loop1: [2051]:1316304 (/opt/stack/data/stack-volumes-lvmdriver-1-backing-file)


]# losetup /dev/loop2 test1.img 

[root@localhost home]# losetup -a

/dev/loop0: [2051]:1316303 (/opt/stack/data/stack-volumes-default-backing-file)

/dev/loop1: [2051]:1316304 (/opt/stack/data/stack-volumes-lvmdriver-1-backing-file)

/dev/loop2: [2050]:12 (/home/test1.img)


]# pvscan 

  PV /dev/loop1   VG stack-volumes-lvmdriver-1   lvm2 [10.01 GiB / 8.01 GiB free]

  PV /dev/loop0   VG stack-volumes-default       lvm2 [10.01 GiB / 10.01 GiB free]

  Total: 2 [20.02 GiB] / in use: 2 [20.02 GiB] / in no VG: 0 [0   ]


]# pvcreate /dev/loop2

  Physical volume "/dev/loop2" successfully created.

[root@localhost home]# pvscan 

  PV /dev/loop1   VG stack-volumes-lvmdriver-1   lvm2 [10.01 GiB / 8.01 GiB free]

  PV /dev/loop0   VG stack-volumes-default       lvm2 [10.01 GiB / 10.01 GiB free]

  PV /dev/loop2                                  lvm2 [10.00 GiB]

  Total: 3 [30.02 GiB] / in use: 2 [20.02 GiB] / in no VG: 1 [10.00 GiB]


]# vgscan 

  Reading volume groups from cache.

  Found volume group "stack-volumes-lvmdriver-1" using metadata type lvm2

  Found volume group "stack-volumes-default" using metadata type lvm2

[root@localhost home]# vgdisplay

  --- Volume group ---

  VG Name               stack-volumes-lvmdriver-1

  System ID             

  Format                lvm2

  Metadata Areas        1

  Metadata Sequence No  7

  VG Access             read/write

  VG Status             resizable

  MAX LV                0

  Cur LV                2

  Open LV               1

  Max PV                0

  Cur PV                1

  Act PV                1

  VG Size               10.01 GiB

  PE Size               4.00 MiB

  Total PE              2562

  Alloc PE / Size       512 / 2.00 GiB

  Free  PE / Size       2050 / 8.01 GiB

  VG UUID               Evn3J0-O09c-9dei-2eKI-Y8Yl-lwhF-FqvTlb

   

  --- Volume group ---

  VG Name               stack-volumes-default

  System ID             

  Format                lvm2

  Metadata Areas        1

  Metadata Sequence No  1

  VG Access             read/write

  VG Status             resizable

  MAX LV                0

  Cur LV                0

  Open LV               0

  Max PV                0

  Cur PV                1

  Act PV                1

  VG Size               10.01 GiB

  PE Size               4.00 MiB

  Total PE              2562

  Alloc PE / Size       0 / 0   

  Free  PE / Size       2562 / 10.01 GiB

  VG UUID               jsQjQe-wiwI-1ZA2-lvBY-flvp-Bn5G-CGJrb1


]# vgcreate vg1 /dev/loop2

  Volume group "vg1" successfully created

[root@localhost home]# vgscan                 

  Reading volume groups from cache.

  Found volume group "stack-volumes-lvmdriver-1" using metadata type lvm2

  Found volume group "vg1" using metadata type lvm2

  Found volume group "stack-volumes-default" using metadata type lvm2

[root@localhost home]# vgdisplay              

  --- Volume group ---

  VG Name               stack-volumes-lvmdriver-1

  System ID             

  Format                lvm2

  Metadata Areas        1

  Metadata Sequence No  7

  VG Access             read/write

  VG Status             resizable

  MAX LV                0

  Cur LV                2

  Open LV               1

  Max PV                0

  Cur PV                1

  Act PV                1

  VG Size               10.01 GiB

  PE Size               4.00 MiB

  Total PE              2562

  Alloc PE / Size       512 / 2.00 GiB

  Free  PE / Size       2050 / 8.01 GiB

  VG UUID               Evn3J0-O09c-9dei-2eKI-Y8Yl-lwhF-FqvTlb

   

  --- Volume group ---

  VG Name               vg1

  System ID             

  Format                lvm2

  Metadata Areas        1

  Metadata Sequence No  1

  VG Access             read/write

  VG Status             resizable

  MAX LV                0

  Cur LV                0

  Open LV               0

  Max PV                0

  Cur PV                1

  Act PV                1

  VG Size               10.00 GiB

  PE Size               4.00 MiB

  Total PE              2559

  Alloc PE / Size       0 / 0   

  Free  PE / Size       2559 / 10.00 GiB

  VG UUID               qCfSGd-ymgV-DWJz-p35j-ZHNr-gCIK-Pgcaw3

   

  --- Volume group ---

  VG Name               stack-volumes-default

  System ID             

  Format                lvm2

  Metadata Areas        1

  Metadata Sequence No  1

  VG Access             read/write

  VG Status             resizable

  MAX LV                0

  Cur LV                0

  Open LV               0

  Max PV                0

  Cur PV                1

  Act PV                1

  VG Size               10.01 GiB

  PE Size               4.00 MiB

  Total PE              2562

  Alloc PE / Size       0 / 0   

  Free  PE / Size       2562 / 10.01 GiB

  VG UUID               jsQjQe-wiwI-1ZA2-lvBY-flvp-Bn5G-CGJrb1


]# lvcreate -n vg1_lo1 -L 1G vg1  

  Logical volume "vg1_lo1" created.

[root@localhost home]# vgdisplay vg1

  --- Volume group ---

  VG Name               vg1

  System ID             

  Format                lvm2

  Metadata Areas        1

  Metadata Sequence No  2

  VG Access             read/write

  VG Status             resizable

  MAX LV                0

  Cur LV                1

  Open LV               0

  Max PV                0

  Cur PV                1

  Act PV                1

  VG Size               10.00 GiB

  PE Size               4.00 MiB

  Total PE              2559

  Alloc PE / Size       256 / 1.00 GiB

  Free  PE / Size       2303 / 9.00 GiB

  VG UUID               qCfSGd-ymgV-DWJz-p35j-ZHNr-gCIK-Pgcaw3


]# fdisk  -l


Disk /dev/mapper/vg1-vg1_lo1: 1073 MB, 1073741824 bytes, 2097152 sectors

Units = sectors of 1 * 512 = 512 bytes

Sector size (logical/physical): 512 bytes / 512 bytes

I/O size (minimum/optimal): 512 bytes / 512 bytes



## add 

]# losetup /dev/loop3 test2.img 

[root@localhost home]# losetup -a

/dev/loop0: [2051]:1316303 (/opt/stack/data/stack-volumes-default-backing-file)

/dev/loop1: [2051]:1316304 (/opt/stack/data/stack-volumes-lvmdriver-1-backing-file)

/dev/loop2: [2050]:12 (/home/test1.img)

/dev/loop3: [2050]:13 (/home/test2.img)


]# pvcreate /dev/loop3

  WARNING: Not using lvmetad because duplicate PVs were found.

  WARNING: Use multipath or vgimportclone to resolve duplicate PVs?

  WARNING: After duplicates are resolved, run "pvscan --cache" to enable lvmetad.

  Physical volume "/dev/loop3" successfully created.


]# pvdisplay 

  WARNING: Not using lvmetad because duplicate PVs were found.

  WARNING: Use multipath or vgimportclone to resolve duplicate PVs?

  WARNING: After duplicates are resolved, run "pvscan --cache" to enable lvmetad.

  --- Physical volume ---

  PV Name               /dev/loop2

  VG Name               vg1

  PV Size               10.00 GiB / not usable 4.00 MiB

  Allocatable           yes 

  PE Size               4.00 MiB

  Total PE              2559

  Free PE               2303

  Allocated PE          256

  PV UUID               9l5ff5-zqk7-qH8n-IEx9-tgjG-2Cjg-CT2L41

   

  --- Physical volume ---

  PV Name               /dev/loop1

  VG Name               stack-volumes-lvmdriver-1

  PV Size               10.01 GiB / not usable 2.00 MiB

  Allocatable           yes 

  PE Size               4.00 MiB

  Total PE              2562

  Free PE               2050

  Allocated PE          512

  PV UUID               DVNkcA-Syfm-5du0-0XFe-QnIE-D5zi-9h23wK

   

  --- Physical volume ---

  PV Name               /dev/loop0

  VG Name               stack-volumes-default

  PV Size               10.01 GiB / not usable 2.00 MiB

  Allocatable           yes 

  PE Size               4.00 MiB

  Total PE              2562

  Free PE               2562

  Allocated PE          0

  PV UUID               TOdLHi-diqH-I9eb-eJ6A-jv3P-YXUx-geAIwx

   

  "/dev/loop3" is a new physical volume of "10.00 GiB"

  --- NEW Physical volume ---

  PV Name               /dev/loop3

  VG Name               

  PV Size               10.00 GiB

  Allocatable           NO

  PE Size               0   

  Total PE              0

  Free PE               0

  Allocated PE          0

  PV UUID               PL6niV-hc1Y-JLTz-zx7k-8rlL-WHIf-8ekmvg


]# vgextend vg1 /dev/loop3 

  WARNING: Not using lvmetad because duplicate PVs were found.

  WARNING: Use multipath or vgimportclone to resolve duplicate PVs?

  WARNING: After duplicates are resolved, run "pvscan --cache" to enable lvmetad.

  Volume group "vg1" successfully extended


]# vgdisplay  vg1

  WARNING: Not using lvmetad because duplicate PVs were found.

  WARNING: Use multipath or vgimportclone to resolve duplicate PVs?

  WARNING: After duplicates are resolved, run "pvscan --cache" to enable lvmetad.

  --- Volume group ---

  VG Name               vg1

  System ID             

  Format                lvm2

  Metadata Areas        2

  Metadata Sequence No  3

  VG Access             read/write

  VG Status             resizable

  MAX LV                0

  Cur LV                1

  Open LV               0

  Max PV                0

  Cur PV                2

  Act PV                2

  VG Size               19.99 GiB

  PE Size               4.00 MiB

  Total PE              5118

  Alloc PE / Size       256 / 1.00 GiB

  Free  PE / Size       4862 / 18.99 GiB

  VG UUID               qCfSGd-ymgV-DWJz-p35j-ZHNr-gCIK-Pgcaw3


]# lvextend -L+10G /dev/vg1/vg1_lo1     

  WARNING: Not using lvmetad because duplicate PVs were found.

  WARNING: Use multipath or vgimportclone to resolve duplicate PVs?

  WARNING: After duplicates are resolved, run "pvscan --cache" to enable lvmetad.

  Size of logical volume vg1/vg1_lo1 changed from 1.00 GiB (256 extents) to 11.00 GiB (2816 extents).

  Logical volume vg1/vg1_lo1 successfully resized.


fdisk -l /dev/mapper/vg1-vg1_lo1 


Disk /dev/mapper/vg1-vg1_lo1: 11.8 GB, 11811160064 bytes, 23068672 sectors

Units = sectors of 1 * 512 = 512 bytes

Sector size (logical/physical): 512 bytes / 512 bytes

I/O size (minimum/optimal): 512 bytes / 512 bytes


## remove


]# lvremove /dev/vg1/vg1_lo1

  WARNING: Not using lvmetad because duplicate PVs were found.

  WARNING: Use multipath or vgimportclone to resolve duplicate PVs?

  WARNING: After duplicates are resolved, run "pvscan --cache" to enable lvmetad.

Do you really want to remove active logical volume vg1/vg1_lo1? [y/n]: y

  Logical volume "vg1_lo1" successfully removed


]# vgremove vg1

  WARNING: Not using lvmetad because duplicate PVs were found.

  WARNING: Use multipath or vgimportclone to resolve duplicate PVs?

  WARNING: After duplicates are resolved, run "pvscan --cache" to enable lvmetad.

  Volume group "vg1" successfully removed


]# pvremove /dev/loop2 /dev/loop3

  WARNING: Not using lvmetad because duplicate PVs were found.

  WARNING: Use multipath or vgimportclone to resolve duplicate PVs?

  WARNING: After duplicates are resolved, run "pvscan --cache" to enable lvmetad.

  Labels on physical volume "/dev/loop2" successfully wiped.

  Labels on physical volume "/dev/loop3" successfully wiped.


]# losetup -d /dev/loop2

[root@localhost home]# losetup -d /dev/loop3

[root@localhost home]# losetup -a

/dev/loop0: [2051]:1316303 (/opt/stack/data/stack-volumes-default-backing-file)

/dev/loop1: [2051]:1316304 (/opt/stack/data/stack-volumes-lvmdriver-1-backing-file)


'Virtualization' 카테고리의 다른 글

ceph  (0) 2017.01.19
kvm install  (0) 2017.01.03

os: centos7 minimal


partition:

   Device Boot      Start         End      Blocks   Id  System

/dev/sda1   *        2048      976895      487424   83  Linux

/dev/sda2          976896    98631679    48827392   83  Linux

/dev/sda3        98631680   129880063    15624192   82  Linux swap / Solaris

/dev/sda4       129880064   488396799   179258368    5  Extended

/dev/sda5       129882112   488388607   179253248   83  Linux


network:

]# vi /etc/sysconfig/network-scripts/ifcfg-enp0s25

TYPE=Ethernet

BOOTPROTO=static

DEFROUTE=yes

PEERDNS=yes

PEERROUTES=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_PEERDNS=yes

IPV6_PEERROUTES=yes

IPV6_FAILURE_FATAL=no

NAME=enp0s25

UUID=f5aad67b-0b2e-46b9-b17d-126c9eaae4b1

DEVICE=enp0s25

ONBOOT=yes

IPADDR=123.140.248.88

NETMASK=255.255.255.0

GATEWAY=123.140.248.254


]# vi /etc/resolv.conf

search .

nameserver 210.220.163.82


selinux:

]# vi /etc/selinux/config

SELINUX=disabled


update:

]# yum update

]# yum install net-tools

]# reboot


kvm install:

# xming install in pc

https://sourceforge.net/projects/xming/


]# yum install qemu-kvm qemu-kvm-tools libvirt virt-install virt-manager virt-viewer virt-top dejavu-lgc-sans-fonts xorg-x11-xauth wget vim

]# systemctl start libvirtd

]# systemctl enable libvirtd

]# export NO_AT_BRIDGE=1

]# alis vi=vim

]# setterm -blength 0

]# mkdir /home/isos

]# cd /home/isos

]# wget http://ftp.daumkakao.com/centos/7/isos/x86_64/CentOS-7-x86_64-Minimal-1611.iso 

]# virsh net-destroy default

]# virsh net-autostart default --disable

]# mkdir /root/rpms

]# cd /root/rpms

]# wget https://rdo.fedorapeople.org/openstack/openstack-kilo/rdo-release-kilo.rpm

]# rpm -Uvh rdo-release-kilo.rpm

]# yum install openvswitch

]# systemctl start openvswitch

]# systemctl enable openvswitch

]# vi /etc/sysconfig/network-scripts/ifcfg-ovsbr0

DEVICE=ovsbr0

ONBOOT=yes

DEVICETYPE=ovs

TYPE=OVSBridge

BOOTPROTO=static

IPADDR=192.168.100.1

NETMASK=255.255.255.0

HOTPLUG=no

ZONE=trusted


]# ifup ovsbr0

]# iptables -A POSTROUTING -s 192.168.100.0/24 -t nat -j MASQUERADE


]# vi /etc/libvirt/qemu/networks/public.xml

<network>

  <name>public</name>

  <forward mode='bridge'/>

  <bridge name='ovsbr0'/>

  <virtualport type='openvswitch'/>

</network>


]# vi /etc/libvirt/qemu/networks/private.xml

<network>

  <name>private</name>

  <forward mode='nat'/>

  <bridge name='virbr0' stp='on' delay='0'/>

  <mac address='52:54:00:e3:83:e1'/>

  <ip address='192.168.122.1' netmask='255.255.255.0'>

    <dhcp>

      <range start='192.168.122.2' end='192.168.122.254'/>

      <host mac='52:54:00:e5:22:c1' name='test-001' ip='192.168.122.2'/>

      <host mac='52:54:00:e5:22:c2' name='test-002' ip='192.168.122.3'/>

      <host mac='52:54:00:e5:22:c3' name='test-003' ip='192.168.122.4'/>

      <host mac='52:54:00:e5:22:c4' name='test-004' ip='192.168.122.5'/>

    </dhcp>

  </ip>

</network>


]# virsh net-define /etc/libvirt/qemu/networks/public.xml

]# virsh net-define /etc/libvirt/qemu/networks/private.xml

]# virsh net-start public

]# virsh net-start private

]# virsh net-autostart public

]# virsh net-autostart private


]# vi /etc/libvirt/qemu/test-001.xml

]# vi /etc/libvirt/qemu/test-002.xml

]# vi /etc/libvirt/qemu/test-003.xml

]# vi /etc/libvirt/qemu/test-004.xml

<domain type='kvm'>

  <name>test-001</name>

  <memory unit='KiB'>1048576</memory>

  <currentMemory unit='KiB'>1048576</currentMemory>

  <vcpu placement='static'>1</vcpu>

  <os>

    <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>

    <boot dev='hd'/>

  </os>

  <features>

    <acpi/>

    <apic/>

  </features>

  <cpu mode='custom' match='exact'>

    <model fallback='allow'>Penryn</model>

  </cpu>

  <clock offset='utc'>

    <timer name='rtc' tickpolicy='catchup'/>

    <timer name='pit' tickpolicy='delay'/>

    <timer name='hpet' present='no'/>

  </clock>

  <on_poweroff>destroy</on_poweroff>

  <on_reboot>restart</on_reboot>

  <on_crash>restart</on_crash>

  <pm>

    <suspend-to-mem enabled='no'/>

    <suspend-to-disk enabled='no'/>

  </pm>

  <devices>

    <emulator>/usr/libexec/qemu-kvm</emulator>

    <disk type='file' device='disk'>

      <driver name='qemu' type='raw'/>

      <source file='/var/lib/libvirt/images/test-001.img'/>

      <target dev='vda' bus='virtio'/>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>

    </disk>

    <controller type='usb' index='0' model='ich9-ehci1'>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x7'/>

    </controller>

    <controller type='usb' index='0' model='ich9-uhci1'>

      <master startport='0'/>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0' multifunction='on'/>

    </controller>

    <controller type='usb' index='0' model='ich9-uhci2'>

      <master startport='2'/>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x1'/>

    </controller>

    <controller type='usb' index='0' model='ich9-uhci3'>

      <master startport='4'/>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x2'/>

    </controller>

    <controller type='pci' index='0' model='pci-root'/>

    <controller type='virtio-serial' index='0'>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>

    </controller>

    <interface type='network'>

      <mac address='52:54:00:05:d1:c1'/>

      <source network='public'/>

      <model type='virtio'/>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>

    </interface>

    <interface type='network'>

      <mac address='52:54:00:e5:22:c1'/>

      <source network='private'/>

      <model type='virtio'/>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>

    </interface>

    <serial type='pty'>

      <target port='0'/>

    </serial>

    <console type='pty'>

      <target type='serial' port='0'/>

    </console>

    <channel type='unix'>

      <target type='virtio' name='org.qemu.guest_agent.0'/>

      <address type='virtio-serial' controller='0' bus='0' port='1'/>

    </channel>

    <channel type='spicevmc'>

      <target type='virtio' name='com.redhat.spice.0'/>

      <address type='virtio-serial' controller='0' bus='0' port='2'/>

    </channel>

    <input type='tablet' bus='usb'>

      <address type='usb' bus='0' port='1'/>

    </input>

    <input type='mouse' bus='ps2'/>

    <input type='keyboard' bus='ps2'/>

    <graphics type='spice' autoport='yes'>

      <listen type='address'/>

      <image compression='off'/>

    </graphics>

    <sound model='ich6'>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>

    </sound>

    <video>

      <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1' primary='yes'/>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>

    </video>

    <redirdev bus='usb' type='spicevmc'>

      <address type='usb' bus='0' port='2'/>

    </redirdev>

    <redirdev bus='usb' type='spicevmc'>

      <address type='usb' bus='0' port='3'/>

    </redirdev>

    <memballoon model='virtio'>

      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>

    </memballoon>

  </devices>

</domain>


]# qemu-img convert test-001.img test-002.img 

]# qemu-img convert test-001.img test-003.img 

]# qemu-img convert test-001.img test-004.img 


]# virsh define test-001.xml

]# virsh define test-002.xml

]# virsh define test-003.xml

]# virsh define test-004.xml


]# virsh start test-001

]# virsh start test-002

]# virsh start test-003

]# virsh start test-004



test-001~ test-004

]# cat /etc/sysconfig/network-scripts/ifcfg-eth0

DEVICE=eth0

ONBOOT=yes

BOOTPROTO=static

IPADDR=192.168.100.2

NETMASK=255.255.255.0

GATEWAY=192.168.100.1


]# cat /etc/sysconfig/network-scripts/ifcfg-eth1

DEVICE=eth1

BOOTPROTO=dhcp

ONBOOT=yes


]# yum remove NetworkManager


]# /usr/sbin/dhclient-script

506c506

<     make_resolv_conf

---

>     #make_resolv_conf

592c592

<             make_resolv_conf

---

>             #make_resolv_conf

608c608

<                 make_resolv_conf

---

>                 #make_resolv_conf


]# vi /etc/resolv.conf

search .

nameserver 210.220.163.82


etc

]# ovs-vsctl list-ports ovsbr0

vnet0

vnet2

vnet4

vnet6

[root@localhost qemu]# brctl show

bridge name     bridge id               STP enabled     interfaces

virbr0          8000.525400e383e1       yes             virbr0-nic

                                                        vnet1

                                                        vnet3

                                                        vnet5

                                                        vnet7




http://prolinuxhub.com/configure-start-up-scripts-for-ovs-on-centos-and-red-hat/


[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-enp0s25

TYPE=OVSPort

BOOTPROTO=none

DEVICE=enp0s25

DEVICETYPE=ovs

ONBOOT=yes

HOTPLUG=no

OVS_BRIDGE=ovsbr0


[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-ovsbr0

DEVICE=ovsbr0

ONBOOT=yes

DEVICETYPE=ovs

TYPE=OVSBridge

BOOTPROTO=static

IPADDR=123.140.248.88

NETMASK=255.255.255.0

GATEWAY=13.140.248.254

HOTPLUG=no










'Virtualization' 카테고리의 다른 글

ceph  (0) 2017.01.19
LVM2  (0) 2017.01.12

+ Recent posts