root@node-1:~# nova interface-list 95985dfe-8356-4bb6-8ec7-46730d1b4c41
+------------+--------------------------------------+--------------------------------------+----------------+-------------------+
| Port State | Port ID | Net ID | IP addresses | MAC Addr |
+------------+--------------------------------------+--------------------------------------+----------------+-------------------+
| ACTIVE | fc618310-b936-4247-a5e4-2389a9d8c50e | de2df80a-5f69-4630-85ac-4081ce70de98 | 192.168.111.14 | fa:16:3e:5e:99:0c |
+------------+--------------------------------------+--------------------------------------+----------------+-------------------+
root@node-4:~# brctl show
bridge name bridge id STP enabled interfaces
br-aux 8000.e41d2d0f1091 no bond0
p_e52381cd-0
br-fw-admin 8000.3863bb3350c8 no eth2
br-mgmt 8000.e41d2d0f1091 no bond0.101
br-storage 8000.e41d2d0f1091 no bond0.103
qbrfc618310-b9 8000.26d598f1427a no qvbfc618310-b9
tapfc618310-b9
[root@eayun-admin ceph]# ceph-deploy disk list eayun-compute1
1
2
3
4
5
6
7
8
9
10
11
12
[ceph_deploy.cli][INFO ] Invoked (1.3.5): /usr/bin/ceph-deploy disk list eayun-compute1
[eayun-compute1][DEBUG ] connected to host: eayun-compute1
[eayun-compute1][DEBUG ] detect platform information from remote host
[eayun-compute1][DEBUG ] detect machine type
[ceph_deploy.osd][INFO ] Distro info: CentOS 6.5 Final
[ceph_deploy.osd][DEBUG ] Listing disks on eayun-compute1...
[eayun-compute1][INFO ] Running command: ceph-disk list
[eayun-compute1][DEBUG ] /dev/sda :
[eayun-compute1][DEBUG ] /dev/sda1 other, ext4, mounted on /
[eayun-compute1][DEBUG ] /dev/sda2 other, swap
[eayun-compute1][DEBUG ] /dev/sdb other, unknown
[eayun-compute1][DEBUG ] /dev/sdc other, unknown
[root@eayun-admin ceph]# ceph-deploy disk zap eayun-compute1:/dev/sdb
1
2
3
4
5
6
7
8
9
10
11
12
[ceph_deploy.cli][INFO ] Invoked (1.3.5): /usr/bin/ceph-deploy disk zap eayun-compute1:/dev/sdb
[ceph_deploy.osd][DEBUG ] zapping /dev/sdb on eayun-compute1
[eayun-compute1][DEBUG ] connected to host: eayun-compute1
[eayun-compute1][DEBUG ] detect platform information from remote host
[eayun-compute1][DEBUG ] detect machine type
[ceph_deploy.osd][INFO ] Distro info: CentOS 6.5 Final
[eayun-compute1][DEBUG ] zeroing last few blocks of device
[eayun-compute1][INFO ] Running command: sgdisk --zap-all --clear --mbrtogpt -- /dev/sdb
[eayun-compute1][DEBUG ] Creating new GPT entries.
[eayun-compute1][DEBUG ] GPT data structures destroyed! You may now partition the disk using fdisk or
[eayun-compute1][DEBUG ] other utilities.
[eayun-compute1][DEBUG ] The operation has completed successfully.
[root@eayun-admin ceph]# ceph-deploy disk prepare eayun-compute1:/dev/sdb
error: Failed to start domain instance-00000003
error: internal error Process exited while reading console log output: char device redirected to /dev/pts/0
qemu-kvm: -drive file=rbd:volumes/volume-59d0b231-f5a3-45d5-98a2-007319f65529:id=libvirt:key=AQCF6DRTOGGrEBAAnanwo1Qs5pONEGB2lCe49Q==:auth_supported=cephx\;none:mon_host=192.168.11.210\:6789,if=none,id=drive-ide0-0-1: error connecting
qemu-kvm: -drive file=rbd:volumes/volume-59d0b231-f5a3-45d5-98a2-007319f65529:id=libvirt:key=AQCF6DRTOGGrEBAAnanwo1Qs5pONEGB2lCe49Q==:auth_supported=cephx\;none:mon_host=192.168.11.210\:6789,if=none,id=drive-ide0-0-1: could not open disk image rbd:volumes/volume-59d0b231-f5a3-45d5-98a2-007319f65529:id=libvirt:key=AQCF6DRTOGGrEBAAnanwo1Qs5pONEGB2lCe49Q==:auth_supported=cephx\;none:mon_host=192.168.11.210\:6789: Operation not permitted
//ceph.log
2014-03-28 09:09:26.488788 7ffcd6eeb700 0 cephx server client.libvirt: couldn't find entity name: client.libvirt
###CEPH相关操作
####About Pool
To show a pool’s utilization statistics, execute:
[root@eayun-admin ceph]# rados df
1
2
3
4
5
6
7
8
9
pool name category KB objects clones degraded unfound rd rd KB wr wr KB
data - 1 1 0 0 0 17 13 4 3
images - 0 0 0 0 0 0 0 0 0
metadata - 0 0 0 0 0 0 0 0 0
rbd - 0 0 0 0 0 0 0 0 0
volumes - 0 0 0 0 0 0 0 0 0
total used 235868 1
total avail 18570796
total space 18806664
[root@eayun-admin ceph]# ceph osd pool get images size
1
2
3
4
5
6
7
8
9
10
11
12
13
size: 3 >[root@eayun-admin ceph]# ceph osd pool get images pg_num
pg_num: 1000 >[root@eayun-admin ceph]# ceph osd pool get images pgp_num
pgp_num: 1000 >[root@eayun-admin ceph]# ceph osd pool set images pgp_num 99
set pool 4 pgp_num to 99 >[root@eayun-admin ceph]# ceph osd pool set images pg_num 99
specified pg_num 99 <= current 1000 不能把一个Pool的pg_num缩小!!! >[root@eayun-admin ceph]# ceph osd pool get images pg_num
pg_num: 1000 删除一个Pool. yes, i really really mean it :^) >[root@eayun-admin ceph]# ceph osd pool delete images
Error EPERM: WARNING: this will *PERMANENTLY DESTROY* all data stored in pool images. If you are *ABSOLUTELY CERTAIN* that is what you want, pass the pool name *twice*, followed by --yes-i-really-really-mean-it.
[root@eayun-admin ceph]# ceph osd pool delete images images –yes-i-really-really-mean-it
root@glos-manager:/# vgdisplay cinder-volumes
--- Volume group ---
VG Name cinder-volumes
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 4
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 2
Open LV 2
Max PV 0
Cur PV 1
Act PV 1
VG Size 5.95 GiB
PE Size 4.00 MiB
Total PE 1522
Alloc PE / Size 1338 / 5.23 GiB
Free PE / Size 184 / 736.00 MiB
VG UUID EIKfKf-mIvG-Sabt-JlV4-19Ot-XqSK-3dEXE0
root@glos-manager:/# lvdisplay
--- Logical volume ---
LV Name /dev/cinder-volumes/nova-instances
VG Name cinder-volumes
LV UUID IQCf3L-Zebu-CfrD-H5ct-qlg6-DnTI-SaQWef
LV Write Access read/write
LV Status available
# open 1
LV Size 1.99 GiB
Current LE 509
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 252:0
--- Logical volume ---
LV Name /dev/cinder-volumes/system-root
VG Name cinder-volumes
LV UUID XVAGEN-Orq2-OnRI-FXvY-G3yd-dRiE-JdDlmw
LV Write Access read/write
LV Status available
# open 1
LV Size 3.73 GiB
Current LE 954
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 252:1
root@glos-manager:/# resize2fs -p /dev/mapper/cinder--volumes-nova--instances
resize2fs 1.42 (29-Nov-2011)
Filesystem at /dev/mapper/cinder--volumes-nova--instances is mounted on /var/lib/nova/instances; on-line resizing required
old_desc_blocks = 1, new_desc_blocks = 1
Performing an on-line resize of /dev/mapper/cinder--volumes-nova--instances to 521216 (4k) blocks.
The filesystem on /dev/mapper/cinder--volumes-nova--instances is now 521216 blocks long.