首页 > 技术文章 > openstack高可用集群18-Ceph和openstack的对接

dexter-wang 2020-02-05 23:08 原文

Ceph对接Openstack
官方文档: https://docs.ceph.com/docs/master/rbd/rbd-openstack/
 
Ceph的一个使用场景是结合Openstack来提供云存储服务,Openstack到Ceph之间的调用堆栈就是下面这个结构:
三大模块
Openstack对接Ceph,有3大模块可以使用Ceph:
镜像
Openstack的Glance组件提供镜像服务,可以将Image直接存储在Ceph中。
操作系统盘
Openstack的Nova组件提供计算服务,一个虚机的创建必然需要操作系统,也就少不了系统盘,系统盘可以使用Ceph来提供。
非操作系统盘
Openstack的Cinder组件提供块存储服务,也就是我们物理机中的普通磁盘,也可以通过Ceph来提供。
以上3个组件从Openstack角度来说,是不同的3个模块,提供的服务也不同,但对于Ceph来说,都是一个Rbd Image,也就是一个块存储。
走的是同样的API,但是在一些属性和参数间存在一些差异。
 
在三大模块中增加ceph相关配置
1.Glance配置
# 在运行glance-api服务的节点修改glance-api.conf文件,含3个控制节点,以controller01节点为例
# 以下只列出涉及glance集成ceph的section
[root@node1 ~]# vim /etc/glance/glance-api.conf
# 打开copy-on-write功能
[DEFAULT]
show_image_direct_url = True
# 变更默认使用的本地文件存储为ceph rbd存储;
# 注意红色字体部分前后一致
[glance_store]
#stores = file,http
#default_store = file
#filesystem_store_datadir = /var/lib/glance/images/
stores = rbd
default_store = rbd
rbd_store_chunk_size = 8
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
 
整体配置如下:
[root@node1 ~]# cat /etc/glance/glance-api.conf
[DEFAULT]
show_image_direct_url = True
[cors]
[database]
connection = mysql+pymysql://glance:glance@10.30.1.208/glance
[glance_store]
#stores = file,http
#default_store = file
#filesystem_store_datadir = /var/lib/glance/images/
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8
[image_format]
[keystone_authtoken]
auth_uri = http://10.30.1.208:5000/v3
auth_url = http://10.30.1.208:35357/v3
memcached_servers = 10.30.1.208:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
 
# 变更配置文件,重启服务
[root@node1 ~]# systemctl restart openstack-glance-api.service
[root@node1 ~]# systemctl restart openstack-glance-registry.service
上传镜像
# 镜像上传后,默认地址为ceph集群(ID)的images pool下
[root@node1 ~]# openstack image create "centos7.5_x86_64"  --file /tmp/centos7.5.qcow2 --disk-format qcow2 --container-format bare --public
# 检查
[root@node1 ~]# openstack image list
+--------------------------------------+------------------+--------+
| ID                                   | Name             | Status |
+--------------------------------------+------------------+--------+
| 61fcdc23-5e82-4fac-9816-e7b93781188d | centos7.5_x86_64 | active |
+--------------------------------------+------------------+--------+
[root@node1 ~]# rbd ls images
61fcdc23-5e82-4fac-9816-e7b93781188d
 
 
定义pool类型
# images启用后,ceph集群状态变为:HEALTH_WARN
[root@controller01 ~]# ceph -s
# 使用”ceph health detail”,能给出解决办法;
# 未定义pool池类型,可定义为'cephfs', 'rbd', 'rgw'等
[root@controller01 ~]# ceph health detail
# 同时解决volumes与vms两个pool的问题
[root@controller01 ~]# ceph osd pool application enable images rbd
[root@controller01 ~]# ceph osd pool application enable volumes rbd
[root@controller01 ~]# ceph osd pool application enable vms rbd
# 查看
[root@controller01 ~]# ceph health detail
[root@controller01 ~]# ceph osd pool application get images
[root@controller01 ~]# ceph osd pool application get volumes
[root@controller01 ~]# ceph osd pool application get vms
 
修改副本数量和最小副本数以提升冗余性
[root@node1 ~]# ceph osd pool ls detail
pool 9 'images' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 823 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd
    removed_snaps [1~3]
    removed_snaps_queue [2~1]
[root@node1 ~]# ceph osd pool get images size
size: 2
[root@node1 ~]# ceph osd pool set images size 3
set pool 9 size to 3
[root@node1 ~]# ceph osd pool set images min_size 2
set pool 9 min_size to 2
[root@node1 ~]# ceph osd pool ls detail
pool 9 'images' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 831 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd
    removed_snaps [1~3]
    removed_snaps_queue [2~1]
 
 
2.Cinder配置
# cinder利用插件式结构,支持同时使用多种后端存储;
# 在cinder-volume所在节点设置cinder.conf中设置相应的ceph rbd驱动即可;
# 含2个Cinder存储节点,以node5节点为例;
# 后端使用ceph存储[DEFAULT]
enabled_backends = ceph
# 新增[ceph] section;
# 注意红色字体部分前后一致[ceph]
# ceph rbd驱动
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
# 如果配置多后端,则“glance_api_version”必须配置在[DEFAULT] section
glance_api_version = 2
rbd_user = cinder
rbd_secret_uuid = 29355b97-1fd8-4135-a26e-d7efeaa27b0a
volume_backend_name = ceph
# 变更配置文件,重启服务
 
整体配置如下:
[root@node5 ~]# cat /etc/cinder/cinder.conf
[DEFAULT]
glance_api_servers = http://10.30.1.208:9292
transport_url = rabbit://openstack:openstack@10.30.1.208
auth_strategy = keystone
enabled_backends = ceph
glance_api_version = 2
[database]
connection = mysql+pymysql://cinder:cinder@10.30.1.208/cinder
[keystone_authtoken]
www_authenticate_uri =  http://10.30.1.208:5000
auth_url = http://10.30.1.208:35357
memcached_servers = 10.30.1.208:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_rabbit]
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = 29355b97-1fd8-4135-a26e-d7efeaa27b0a
volume_backend_name = ceph
 
 
cinder节点
[root@node5 ]# systemctl restart openstack-cinder-volume.service
 
重启controller的cinder服务
[root@node1 ~]# systemctl restart openstack-cinder-scheduler
[root@node1 ~]# systemctl restart openstack-cinder-api  
 
 
注:1.volume_driver = cinder.volume.drivers.rbd.RBDDriver r和/usr/lib/python2.7/site-packages/cinder/volume/drivers/rbd.py是对应的
查看服务状态:  
[root@node1 ~]# cinder service-list
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
| Binary           | Host            | Zone | Status  | State | Updated_at                 | Disabled Reason |
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
| cinder-scheduler | node1           | nova | enabled | up    | 2020-02-02T12:24:51.000000 | -               |
| cinder-volume    | node5@ceph      | nova | enabled | up    | 2020-02-02T12:24:54.000000 | -               |
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
controller建立type
[root@node1 ~]#cinder type-create ceph
+--------------------------------------+------+-------------+-----------+
| ID                                   | Name | Description | Is_Public |
+--------------------------------------+------+-------------+-----------+
| 3714372c-d4a2-46c8-acb5-8ef4bfd28c8a | ceph | -           | True      |
+--------------------------------------+------+-------------+-----------+
 
controller节点配置cinder-type和volume_backend_name联动
[root@node1 ~]# cinder type-key ceph set volume_backend_name=ceph 
#查看type的设置情况
[root@node1 ~]# cinder extra-specs-list
+--------------------------------------+-----------+------------------------------------------+
| ID                                   | Name      | extra_specs                              |
+--------------------------------------+-----------+------------------------------------------+
| 3714372c-d4a2-46c8-acb5-8ef4bfd28c8a | ceph      | {'volume_backend_name': 'ceph'}          |
+--------------------------------------+-----------+------------------------------------------+
重启controller的cinder服务
[root@node1 ~]# systemctl restart openstack-cinder-scheduler openstack-cinder-api  
 
 
 
测试
在cinder控制节点创建一个volume
[root@node1 ~]# cinder create --volume-type ceph 20 --name ceph-v1  #相同效果的命令-->openstack volume create --type ceph --size 20 ceph-v1
+--------------------------------+--------------------------------------+
| Property                       | Value                                |
+--------------------------------+--------------------------------------+
| attachments                    | []                                   |
| availability_zone              | nova                                 |
| bootable                       | false                                |
| consistencygroup_id            | None                                 |
| created_at                     | 2020-02-05T03:17:58.000000           |
| description                    | None                                 |
| encrypted                      | False                                |
| id                             | f36bba2d-8a8b-46b9-ab0c-8e0d0d62ed04 |
| metadata                       | {}                                   |
| migration_status               | None                                 |
| multiattach                    | False                                |
| name                           | ceph-v1                              |
| os-vol-host-attr:host          | None                                 |
| os-vol-mig-status-attr:migstat | None                                 |
| os-vol-mig-status-attr:name_id | None                                 |
| os-vol-tenant-attr:tenant_id   | 75aed7016c86445198356e78dddde4ba     |
| replication_status             | None                                 |
| size                           | 20                                   |
| snapshot_id                    | None                                 |
| source_volid                   | None                                 |
| status                         | creating                             |
| updated_at                     | None                                 |
| user_id                        | 51ffe09d0ed342f4bf4e443e454055cc     |
| volume_type                    | ceph                                 |
+--------------------------------+--------------------------------------+
[root@node1 ~]# cinder list
+--------------------------------------+-----------+---------+------+-------------+----------+-------------+
| ID                                   | Status    | Name    | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+---------+------+-------------+----------+-------------+
| f36bba2d-8a8b-46b9-ab0c-8e0d0d62ed04 | available | ceph-v1 | 20   | ceph        | false    |             |
+--------------------------------------+-----------+---------+------+-------------+----------+-------------+
[root@node5 ~]# tail -f /var/log/cinder/volume.log
75aed7016c86445198356e78dddde4ba - d84340dbe16341f48681b8ea3e22e6da d84340dbe16341f48681b8ea3e22e6da] Volume f36bba2d-8a8b-46b9-ab0c-8e0d0d62ed04: being created as raw with specification: {'status': u'creating', 'volume_size': 20, 'volume_name': u'volume-f36bba2d-8a8b-46b9-ab0c-8e0d0d62ed04'}
2020-02-05 11:18:02.379 6119 INFO cinder.volume.flows.manager.create_volume [req-dc412fdd-507b-4f5d-ab2a-70a2cf44b79f 51ffe09d0ed342f4bf4e443e454055cc 75aed7016c86445198356e78dddde4ba - d84340dbe16341f48681b8ea3e22e6da d84340dbe16341f48681b8ea3e22e6da] Volume volume-f36bba2d-8a8b-46b9-ab0c-8e0d0d62ed04 (f36bba2d-8a8b-46b9-ab0c-8e0d0d62ed04): created successfully
2020-02-05 11:18:02.431 6119 INFO cinder.volume.manager [req-dc412fdd-507b-4f5d-ab2a-70a2cf44b79f 51ffe09d0ed342f4bf4e443e454055cc 75aed7016c86445198356e78dddde4ba - d84340dbe16341f48681b8ea3e22e6da d84340dbe16341f48681b8ea3e22e6da] Created volume successfully.
 
 
在web页面创建卷
 
 
 
3.Nova配置
 
配置ceph.conf
# 如果需要从ceph rbd中启动虚拟机,必须将ceph配置为nova的临时后端;
# 推荐在计算节点的配置文件中启用rbd cache功能;
# 为了便于故障排查,配置admin socket参数,这样每个使用ceph rbd的虚拟机都有1个socket将有利于虚拟机性能分析与故障解决;
# 相关配置只涉及全部计算节点ceph.conf文件的[client]与[client.cinder]字段,以compute01节点为例
[root@node3 ~]# vim /etc/ceph/ceph.conf
[client]
rbd cache = true
rbd cache writethrough until flush = true
admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok
log file = /var/log/qemu/qemu-guest-$pid.log
rbd concurrent management ops = 20
[client.cinder]
keyring = /etc/ceph/ceph.client.cinder.keyring
# 创建ceph.conf文件中指定的socker与log相关的目录,并更改属主
[root@node3~]# mkdir -p /var/run/ceph/guests/ /var/log/qemu/
[root@node3 ~]# chown qemu:libvirt /var/run/ceph/guests/ /var/log/qemu/
 
注:生产环境发现/var/run/ceph/guests目录老是会在服务器重启后消失,并导致计算节点不可用(无法创建和删除云主机),所以我在下方写了一个定时检测并创建/var/run/ceph/guests/目录的任务
echo '*/3 * * * * root if [ ! -d /var/run/ceph/guests/ ] ;then mkdir -pv /var/run/ceph/guests/ /var/log/qemu/ && chown qemu:libvirt /var/run/ceph/guests/ /var/log/qemu/ && systemctl restart libvirtd.service openstack-nova-compute.service ;fi' >>/etc/crontab
 
# 在全部计算节点配置nova后端使用ceph集群的vms池,以node3节点为例
[root@node3 ~]# vim /etc/nova/nova.conf
[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
# uuid前后一致
rbd_secret_uuid = 29355b97-1fd8-4135-a26e-d7efeaa27b0a
disk_cachemodes="network=writeback"
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"# 禁用文件注入
inject_password = false
inject_key = false
inject_partition = -2
# 虚拟机临时root磁盘discard功能,”unmap”参数在scsi接口类型磁盘释放后可立即释放空间
hw_disk_discard = unmap
# 原有配置
virt_type=kvm
 
[root@node3 ~]# cat /etc/nova/nova.conf
[DEFAULT]
cpu_allocation_ratio=8
ram_allocation_ratio=2
disk_allocation_ratio=2
resume_guests_state_on_host_boot=true
reserved_host_disk_mb=20480
baremetal_enabled_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack@10.30.1.208
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.30.1.208:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_uri = http://10.30.1.208:5000
auth_url = http://10.30.1.208:35357
memcached_servers = 10.30.1.208:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = 29355b97-1fd8-4135-a26e-d7efeaa27b0a
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
inject_password = false
inject_key = false
inject_partition = -2
hw_disk_discard = unmap
virt_type=kvm
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://10.30.1.208:9696
auth_url = http://10.30.1.208:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.30.1.208:35357/v3
username = placement
password = placement
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = 10.30.1.203
novncproxy_base_url = http://10.30.1.208:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
重启nova服务
       计算节点
[root@node3 ~]# systemctl restart libvirtd.service openstack-nova-compute.service
  控制节点
[root@node1 ~]# systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service
 
 
注:重启nova服务后最好查看服务启动是否正常,如果openstack-nova-compute服务启动异常可以通过查看/var/log/nova/nova-compute.log日志排查
systemctl status libvirtd.service openstack-nova-compute.service
 
配置live-migration
修改/etc/libvirt/libvirtd.conf
# 在全部计算节点操作,以compute01节点为例;
# 以下给出libvirtd.conf文件的修改处所在的行num
[root@node3 ~]# egrep -vn "^$|^#" /etc/libvirt/libvirtd.conf
# 取消以下三行的注释22:listen_tls = 0
33:listen_tcp = 1
45:tcp_port = "16509"# 取消注释,并修改监听端口55:listen_addr = "172.30.200.41"# 取消注释,同时取消认证158:auth_tcp = "none"
 
修改/etc/sysconfig/libvirtd
# 在全部计算节点操作,以compute01节点为例;
# 以下给出libvirtd文件的修改处所在的行num
[root@node3 ~]# egrep -vn "^$|^#" /etc/sysconfig/libvirtd
# 取消注释9:LIBVIRTD_ARGS="--listen"
 
设置iptables
# live-migration时,源计算节点主动连接目的计算节点tcp16509端口,可以使用”virsh -c qemu+tcp://{node_ip or node_name}/system”连接目的计算节点测试;
# 迁移前后,在源目计算节点上的被迁移instance使用tcp49152~49161端口做临时通信;
# 因虚拟机已经启用iptables相关规则,此时切忌随意重启iptables服务,尽量使用插入的方式添加规则;
# 同时以修改配置文件的方式写入相关规则,切忌使用”iptables saved”命令;
# 在全部计算节点操作,以compute01节点为例
[root@node3 ~]# iptables -I INPUT -p tcp -m state --state NEW -m tcp --dport 16509 -j ACCEPT
[root@node3 ~]# iptables -I INPUT -p tcp -m state --state NEW -m tcp --dport 49152:49161 -j ACCEPT
 
重启服务
# libvirtd与nova-compute服务都需要重启
[root@node3 ~]# systemctl restart libvirtd.service openstack-nova-compute.service
 
 
# 查看服务
[root@node3 ~]# netstat -tunlp | grep 16509
tcp        0      0 10.30.1.203:16509       0.0.0.0:*               LISTEN      9229/libvirtd  
 
 
验证
如果使用ceph提供的volume做启动盘,即虚拟机运行镜像文件存放在共享存储上,此时可以方便地进行live-migration。
1)创建基于ceph存储的bootable存储卷
# 当nova从rbd启动instance时,镜像格式必须是raw格式,否则虚拟机在启动时glance-api与cinder均会报错;
# 首先进行格式转换,将*.img文件转换为*.raw文件
[root@node1 ~]# qemu-img convert -f qcow2 cirros-0.3.5-x86_64-disk.img -O raw  cirros-0.3.5-x86_64-disk.raw
注:如果原来的镜像较大,我们在转换的过程中最好进行压缩,示例如下
virt-sparsify -x centos7.5.qcow2 --convert raw centos7.5.raw
# 生成raw格式镜像
[root@node1 ~]# openstack image create "cirros3.5"  --file cirros-0.3.5-x86_64-disk.raw --disk-format raw --container-format bare --public
+------------------+----------------------------------------------------------------------------------------------------------+
| Field            | Value                                                                                                    |
+------------------+----------------------------------------------------------------------------------------------------------+
| checksum         | 4bda4108d1a74dd73a6ae6d0ba369916                                                                         |
| container_format | bare                                                                                                     |
| created_at       | 2020-02-05T07:09:47Z                                                                                     |
| disk_format      | raw                                                                                                      |
| file             | /v2/images/1333fc90-d9a6-4df8-9a8b-391b035770c0/file                                                     |
| id               | 1333fc90-d9a6-4df8-9a8b-391b035770c0                                                                     |
| min_disk         | 0                                                                                                        |
| min_ram          | 0                                                                                                        |
| name             | cirros3.5                                                                                                |
| owner            | 75aed7016c86445198356e78dddde4ba                                                                         |
| properties       | direct_url='rbd://272905d2-fd66-4ef6-a772-9cd73a274683/images/1333fc90-d9a6-4df8-9a8b-391b035770c0/snap' |
| protected        | False                                                                                                    |
| schema           | /v2/schemas/image                                                                                        |
| size             | 41126400                                                                                                 |
| status           | active                                                                                                   |
| tags             |                                                                                                          |
| updated_at       | 2020-02-05T07:10:09Z                                                                                     |
| virtual_size     | None                                                                                                     |
| visibility       | public                                                                                                   |
+------------------+----------------------------------------------------------------------------------------------------------+
 
[root@node5 ~]# rbd ls images
1333fc90-d9a6-4df8-9a8b-391b035770c0
61fcdc23-5e82-4fac-9816-e7b93781188d
# 使用新镜像创建bootable卷
[root@node1 ~]# cinder create --image-id 1333fc90-d9a6-4df8-9a8b-391b035770c0 --volume-type ceph --name ceph-bootable1 2
+--------------------------------+--------------------------------------+
| Property                       | Value                                |
+--------------------------------+--------------------------------------+
| attachments                    | []                                   |
| availability_zone              | nova                                 |
| bootable                       | false                                |
| consistencygroup_id            | None                                 |
| created_at                     | 2020-02-05T09:44:50.000000           |
| description                    | None                                 |
| encrypted                      | False                                |
| id                             | d5e8af24-7a9e-4509-82b8-6a2999aeeeb2 |
| metadata                       | {}                                   |
| migration_status               | None                                 |
| multiattach                    | False                                |
| name                           | ceph-bootable1                       |
| os-vol-host-attr:host          | None                                 |
| os-vol-mig-status-attr:migstat | None                                 |
| os-vol-mig-status-attr:name_id | None                                 |
| os-vol-tenant-attr:tenant_id   | 75aed7016c86445198356e78dddde4ba     |
| replication_status             | None                                 |
| size                           | 2                                    |
| snapshot_id                    | None                                 |
| source_volid                   | None                                 |
| status                         | creating                             |
| updated_at                     | None                                 |
| user_id                        | 51ffe09d0ed342f4bf4e443e454055cc     |
| volume_type                    | ceph                                 |
+--------------------------------+--------------------------------------+
 
# 查看新创建的bootable卷
[root@node1 ~]# cinder list
+--------------------------------------+-----------+----------------+------+-------------+----------+-------------+
| ID                                   | Status    | Name           | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+----------------+------+-------------+----------+-------------+
| d5e8af24-7a9e-4509-82b8-6a2999aeeeb2 | available | ceph-bootable1 | 2    | ceph        | true     |             |
| f36bba2d-8a8b-46b9-ab0c-8e0d0d62ed04 | available | ceph-v1        | 20   | ceph        | false    |             |
+--------------------------------------+-----------+----------------+------+-------------+----------+-------------+
# 从基于ceph后端的volumes新建实例;
# “--boot-volume”指定具有”bootable”属性的卷,启动后,虚拟机运行在volumes卷
[root@node1 ~]# openstack flavor list
+--------------------------------------+------+------+------+-----------+-------+-----------+
| ID                                   | Name |  RAM | Disk | Ephemeral | VCPUs | Is Public |
+--------------------------------------+------+------+------+-----------+-------+-----------+
| bc3229e5-4614-48c3-943f-6a7783bcbbfc | 1c1g | 1024 |   20 |         0 |     1 | True      |
| f4063a6c-b185-4bad-92d7-938ddb209553 | 1c2g | 2024 |   40 |         0 |     1 | False     |
+--------------------------------------+------+------+------+-----------+-------+-----------+
[root@node1 ~]# openstack network list
+--------------------------------------+---------+--------------------------------------+
| ID                                   | Name    | Subnets                              |
+--------------------------------------+---------+--------------------------------------+
| 5ac5c948-909f-47ff-beba-a2ffaf917c5f | vlan99  | bbd536c6-a975-4841-8082-35b28de16ef0 |
| 98f5d807-80e0-48a3-9f40-97eb6ed15f33 | vlan809 | ffc3c430-e551-4c78-be5e-52e6aaf1484d |
+--------------------------------------+---------+--------------------------------------+
[root@node1 ~]# openstack security group list
+--------------------------------------+-----------+------------------------+----------------------------------+
| ID                                   | Name      | Description            | Project                          |
+--------------------------------------+-----------+------------------------+----------------------------------+
| 5bb5f2b1-9210-470f-a4a7-2715220b2920 | allow all |                        | 75aed7016c86445198356e78dddde4ba |
| dc241f97-0099-448f-8be4-8a41f1a6a806 | default   | Default security group | 15897818eb0a42a382b75bbeefb14983 |
+--------------------------------------+-----------+------------------------+----------------------------------+
[root@node1 ~]# nova boot --flavor bc3229e5-4614-48c3-943f-6a7783bcbbfc --boot-volume d5e8af24-7a9e-4509-82b8-6a2999aeeeb2 --nic net-id=5ac5c948-909f-47ff-beba-a2ffaf917c5f --security-group 5bb5f2b1-9210-470f-a4a7-2715220b2920 cirros-cephvolumes-instance1
或者
[root@node1 ~]# openstack server create --flavor bc3229e5-4614-48c3-943f-6a7783bcbbfc --volume d5e8af24-7a9e-4509-82b8-6a2999aeeeb2 --network 5ac5c948-909f-47ff-beba-a2ffaf917c5f --security-group 5bb5f2b1-9210-470f-a4a7-2715220b2920 cirros-cephvolumes-instance1
+--------------------------------------+-------------------------------------------------+
| Property                             | Value                                           |
+--------------------------------------+-------------------------------------------------+
| OS-DCF:diskConfig                    | MANUAL                                          |
| OS-EXT-AZ:availability_zone          |                                                 |
| OS-EXT-SRV-ATTR:host                 | -                                               |
| OS-EXT-SRV-ATTR:hostname             | cirros-cephvolumes-instance1                    |
| OS-EXT-SRV-ATTR:hypervisor_hostname  | -                                               |
| OS-EXT-SRV-ATTR:instance_name        |                                                 |
| OS-EXT-SRV-ATTR:kernel_id            |                                                 |
| OS-EXT-SRV-ATTR:launch_index         | 0                                               |
| OS-EXT-SRV-ATTR:ramdisk_id           |                                                 |
| OS-EXT-SRV-ATTR:reservation_id       | r-3n00xb60                                      |
| OS-EXT-SRV-ATTR:root_device_name     | -                                               |
| OS-EXT-SRV-ATTR:user_data            | -                                               |
| OS-EXT-STS:power_state               | 0                                               |
| OS-EXT-STS:task_state                | scheduling                                      |
| OS-EXT-STS:vm_state                  | building                                        |
| OS-SRV-USG:launched_at               | -                                               |
| OS-SRV-USG:terminated_at             | -                                               |
| accessIPv4                           |                                                 |
| accessIPv6                           |                                                 |
| adminPass                            | GdtbyR3G3k62                                    |
| config_drive                         |                                                 |
| created                              | 2020-02-05T13:59:38Z                            |
| description                          | -                                               |
| flavor:disk                          | 20                                              |
| flavor:ephemeral                     | 0                                               |
| flavor:extra_specs                   | {}                                              |
| flavor:original_name                 | 1c1g                                            |
| flavor:ram                           | 1024                                            |
| flavor:swap                          | 0                                               |
| flavor:vcpus                         | 1                                               |
| hostId                               |                                                 |
| host_status                          |                                                 |
| id                                   | 8611811b-b4e9-4fdc-88b9-1d63427a664d            |
| image                                | Attempt to boot from volume - no image supplied |
| key_name                             | -                                               |
| locked                               | False                                           |
| metadata                             | {}                                              |
| name                                 | cirros-cephvolumes-instance1                    |
| os-extended-volumes:volumes_attached | []                                              |
| progress                             | 0                                               |
| security_groups                      | 5bb5f2b1-9210-470f-a4a7-2715220b2920            |
| status                               | BUILD                                           |
| tags                                 | []                                              |
| tenant_id                            | 75aed7016c86445198356e78dddde4ba                |
| updated                              | 2020-02-05T13:59:38Z                            |
| user_id                              | 51ffe09d0ed342f4bf4e443e454055cc                |
+--------------------------------------+-------------------------------------------------+
2)从ceph rbd启动虚拟机
# --nic:net-id指网络id,非subnet-id;
# 最后“cirros-cephrbd-instance1”为instance名
 
nova boot --flavor bc3229e5-4614-48c3-943f-6a7783bcbbfc --image 1333fc90-d9a6-4df8-9a8b-391b035770c0 --nic net-id=5ac5c948-909f-47ff-beba-a2ffaf917c5f --security-group 5bb5f2b1-9210-470f-a4a7-2715220b2920 cirros-cephrbd-instance1
+--------------------------------------+--------------------------------------------------+
| Property                             | Value                                            |
+--------------------------------------+--------------------------------------------------+
| OS-DCF:diskConfig                    | MANUAL                                           |
| OS-EXT-AZ:availability_zone          |                                                  |
| OS-EXT-SRV-ATTR:host                 | -                                                |
| OS-EXT-SRV-ATTR:hostname             | cirros-cephrbd-instance1                         |
| OS-EXT-SRV-ATTR:hypervisor_hostname  | -                                                |
| OS-EXT-SRV-ATTR:instance_name        |                                                  |
| OS-EXT-SRV-ATTR:kernel_id            |                                                  |
| OS-EXT-SRV-ATTR:launch_index         | 0                                                |
| OS-EXT-SRV-ATTR:ramdisk_id           |                                                  |
| OS-EXT-SRV-ATTR:reservation_id       | r-d1kr04ao                                       |
| OS-EXT-SRV-ATTR:root_device_name     | -                                                |
| OS-EXT-SRV-ATTR:user_data            | -                                                |
| OS-EXT-STS:power_state               | 0                                                |
| OS-EXT-STS:task_state                | scheduling                                       |
| OS-EXT-STS:vm_state                  | building                                         |
| OS-SRV-USG:launched_at               | -                                                |
| OS-SRV-USG:terminated_at             | -                                                |
| accessIPv4                           |                                                  |
| accessIPv6                           |                                                  |
| adminPass                            | SsERBjVHB849                                     |
| config_drive                         |                                                  |
| created                              | 2020-02-05T14:02:52Z                             |
| description                          | -                                                |
| flavor:disk                          | 20                                               |
| flavor:ephemeral                     | 0                                                |
| flavor:extra_specs                   | {}                                               |
| flavor:original_name                 | 1c1g                                             |
| flavor:ram                           | 1024                                             |
| flavor:swap                          | 0                                                |
| flavor:vcpus                         | 1                                                |
| hostId                               |                                                  |
| host_status                          |                                                  |
| id                                   | 1bbc59bf-6827-439e-86d3-21eda28d8b43             |
| image                                | cirros3.5 (1333fc90-d9a6-4df8-9a8b-391b035770c0) |
| key_name                             | -                                                |
| locked                               | False                                            |
| metadata                             | {}                                               |
| name                                 | cirros-cephrbd-instance1                         |
| os-extended-volumes:volumes_attached | []                                               |
| progress                             | 0                                                |
| security_groups                      | 5bb5f2b1-9210-470f-a4a7-2715220b2920             |
| status                               | BUILD                                            |
| tags                                 | []                                               |
| tenant_id                            | 75aed7016c86445198356e78dddde4ba                 |
| updated                              | 2020-02-05T14:02:52Z                             |
| user_id                              | 51ffe09d0ed342f4bf4e443e454055cc                 |
+--------------------------------------+--------------------------------------------------+
# 查询生成的instance
[root@node1 ~]# openstack server list
+--------------------------------------+------------------------------+--------+----------------------+-----------+--------+
| ID                                   | Name                         | Status | Networks             | Image     | Flavor |
+--------------------------------------+------------------------------+--------+----------------------+-----------+--------+
| 1bbc59bf-6827-439e-86d3-21eda28d8b43 | cirros-cephrbd-instance1     | ACTIVE | vlan99=172.16.99.102 | cirros3.5 | 1c1g   |
| 8611811b-b4e9-4fdc-88b9-1d63427a664d | cirros-cephvolumes-instance1 | ACTIVE | vlan99=172.16.99.101 |           | 1c1g   |
+--------------------------------------+------------------------------+--------+----------------------+-----------+--------+
 
# 查看生成的instance的详细信息
[root@node1 ~]# nova show 1bbc59bf-6827-439e-86d3-21eda28d8b43
+--------------------------------------+----------------------------------------------------------+
| Property                             | Value                                                    |
+--------------------------------------+----------------------------------------------------------+
| OS-DCF:diskConfig                    | MANUAL                                                   |
| OS-EXT-AZ:availability_zone          | nova                                                     |
| OS-EXT-SRV-ATTR:host                 | node4                                                    |
| OS-EXT-SRV-ATTR:hostname             | cirros-cephrbd-instance1                                 |
| OS-EXT-SRV-ATTR:hypervisor_hostname  | node4                                                    |
| OS-EXT-SRV-ATTR:instance_name        | instance-0000010a                                        |
| OS-EXT-SRV-ATTR:kernel_id            |                                                          |
| OS-EXT-SRV-ATTR:launch_index         | 0                                                        |
| OS-EXT-SRV-ATTR:ramdisk_id           |                                                          |
| OS-EXT-SRV-ATTR:reservation_id       | r-d1kr04ao                                               |
| OS-EXT-SRV-ATTR:root_device_name     | /dev/vda                                                 |
| OS-EXT-SRV-ATTR:user_data            | -                                                        |
| OS-EXT-STS:power_state               | 1                                                        |
| OS-EXT-STS:task_state                | -                                                        |
| OS-EXT-STS:vm_state                  | active                                                   |
| OS-SRV-USG:launched_at               | 2020-02-05T14:03:15.000000                               |
| OS-SRV-USG:terminated_at             | -                                                        |
| accessIPv4                           |                                                          |
| accessIPv6                           |                                                          |
| config_drive                         |                                                          |
| created                              | 2020-02-05T14:02:52Z                                     |
| description                          | -                                                        |
| flavor:disk                          | 20                                                       |
| flavor:ephemeral                     | 0                                                        |
| flavor:extra_specs                   | {}                                                       |
| flavor:original_name                 | 1c1g                                                     |
| flavor:ram                           | 1024                                                     |
| flavor:swap                          | 0                                                        |
| flavor:vcpus                         | 1                                                        |
| hostId                               | 96c74a94ec1b18dbfd8a3bcda847feeb82a58271b0945688129cde93 |
| host_status                          | UP                                                       |
| id                                   | 1bbc59bf-6827-439e-86d3-21eda28d8b43                     |
| image                                | cirros3.5 (1333fc90-d9a6-4df8-9a8b-391b035770c0)         |
| key_name                             | -                                                        |
| locked                               | False                                                    |
| metadata                             | {}                                                       |
| name                                 | cirros-cephrbd-instance1                                 |
| os-extended-volumes:volumes_attached | []                                                       |
| progress                             | 0                                                        |
| security_groups                      | allow all                                                |
| status                               | ACTIVE                                                   |
| tags                                 | []                                                       |
| tenant_id                            | 75aed7016c86445198356e78dddde4ba                         |
| updated                              | 2020-02-05T14:03:15Z                                     |
| user_id                              | 51ffe09d0ed342f4bf4e443e454055cc                         |
| vlan99 network                       | 172.16.99.102                                            |
+--------------------------------------+----------------------------------------------------------+
 
# ceph作为后端存储的虚机xml文件查看
[root@node4 ~]# virsh dumpxml 1bbc59bf-6827-439e-86d3-21eda28d8b43
<domain type='kvm' id='3'>
  <name>instance-0000010a</name>
  <uuid>1bbc59bf-6827-439e-86d3-21eda28d8b43</uuid>
  <metadata>
    <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.0">
      <nova:package version="17.0.13-1.el7"/>
      <nova:name>cirros-cephrbd-instance1</nova:name>
      <nova:creationTime>2020-02-05 14:03:11</nova:creationTime>
      <nova:flavor name="1c1g">
        <nova:memory>1024</nova:memory>
        <nova:disk>20</nova:disk>
        <nova:swap>0</nova:swap>
        <nova:ephemeral>0</nova:ephemeral>
        <nova:vcpus>1</nova:vcpus>
      </nova:flavor>
      <nova:owner>
        <nova:user uuid="51ffe09d0ed342f4bf4e443e454055cc">admin</nova:user>
        <nova:project uuid="75aed7016c86445198356e78dddde4ba">admin</nova:project>
      </nova:owner>
      <nova:root type="image" uuid="1333fc90-d9a6-4df8-9a8b-391b035770c0"/>
    </nova:instance>
  </metadata>
  <memory unit='KiB'>1048576</memory>
  <currentMemory unit='KiB'>1048576</currentMemory>
  <vcpu placement='static'>1</vcpu>
  <cputune>
    <shares>1024</shares>
  </cputune>
  <resource>
    <partition>/machine</partition>
  </resource>
  <sysinfo type='smbios'>
    <system>
      <entry name='manufacturer'>RDO</entry>
      <entry name='product'>OpenStack Compute</entry>
      <entry name='version'>17.0.13-1.el7</entry>
      <entry name='serial'>4a7258d2-f86c-af77-106d-598ffd558b8e</entry>
      <entry name='uuid'>1bbc59bf-6827-439e-86d3-21eda28d8b43</entry>
      <entry name='family'>Virtual Machine</entry>
    </system>
  </sysinfo>
  <os>
    <type arch='x86_64' machine='pc-i440fx-rhel7.6.0'>hvm</type>
    <boot dev='hd'/>
    <smbios mode='sysinfo'/>
  </os>
  <features>
    <acpi/>
    <apic/>
  </features>
  <cpu mode='custom' match='exact' check='full'>
    <model fallback='forbid'>IvyBridge-IBRS</model>
    <vendor>Intel</vendor>
    <topology sockets='1' cores='1' threads='1'/>
    <feature policy='require' name='ss'/>
    <feature policy='require' name='vmx'/>
    <feature policy='require' name='pcid'/>
    <feature policy='require' name='hypervisor'/>
    <feature policy='require' name='arat'/>
    <feature policy='require' name='tsc_adjust'/>
    <feature policy='require' name='umip'/>
    <feature policy='require' name='stibp'/>
    <feature policy='require' name='ssbd'/>
    <feature policy='require' name='xsaveopt'/>
    <feature policy='require' name='pdpe1gb'/>
  </cpu>
  <clock offset='utc'>
    <timer name='pit' tickpolicy='delay'/>
    <timer name='rtc' tickpolicy='catchup'/>
    <timer name='hpet' present='no'/>
  </clock>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>destroy</on_crash>
  <devices>
    <emulator>/usr/libexec/qemu-kvm</emulator>
    <disk type='network' device='disk'>
      <driver name='qemu' type='raw' cache='none'/>
      <auth username='cinder'>
        <secret type='ceph' uuid='29355b97-1fd8-4135-a26e-d7efeaa27b0a'/>
      </auth>
      <source protocol='rbd' name='vms/1bbc59bf-6827-439e-86d3-21eda28d8b43_disk'>
        <host name='10.30.1.221' port='6789'/>
        <host name='10.30.1.222' port='6789'/>
        <host name='10.30.1.223' port='6789'/>
      </source>
      <target dev='vda' bus='virtio'/>
      <alias name='virtio-disk0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
    </disk>
    <controller type='usb' index='0' model='piix3-uhci'>
      <alias name='usb'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
    </controller>
    <controller type='pci' index='0' model='pci-root'>
      <alias name='pci.0'/>
    </controller>
    <interface type='bridge'>
      <mac address='fa:16:3e:03:7d:e3'/>
      <source bridge='brq5ac5c948-90'/>
      <target dev='tapa2466968-b7'/>
      <model type='virtio'/>
      <mtu size='1500'/>
      <alias name='net0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
    </interface>
    <serial type='pty'>
      <source path='/dev/pts/3'/>
      <log file='/var/lib/nova/instances/1bbc59bf-6827-439e-86d3-21eda28d8b43/console.log' append='off'/>
      <target type='isa-serial' port='0'>
        <model name='isa-serial'/>
      </target>
      <alias name='serial0'/>
    </serial>
    <console type='pty' tty='/dev/pts/3'>
      <source path='/dev/pts/3'/>
      <log file='/var/lib/nova/instances/1bbc59bf-6827-439e-86d3-21eda28d8b43/console.log' append='off'/>
      <target type='serial' port='0'/>
      <alias name='serial0'/>
    </console>
    <input type='tablet' bus='usb'>
      <alias name='input0'/>
      <address type='usb' bus='0' port='1'/>
    </input>
    <input type='mouse' bus='ps2'>
      <alias name='input1'/>
    </input>
    <input type='keyboard' bus='ps2'>
      <alias name='input2'/>
    </input>
    <graphics type='vnc' port='5901' autoport='yes' listen='0.0.0.0' keymap='en-us'>
      <listen type='address' address='0.0.0.0'/>
    </graphics>
    <video>
      <model type='cirrus' vram='16384' heads='1' primary='yes'/>
      <alias name='video0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
    </video>
    <memballoon model='virtio'>
      <stats period='10'/>
      <alias name='balloon0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
    </memballoon>
  </devices>
  <seclabel type='dynamic' model='dac' relabel='yes'>
    <label>+107:+107</label>
    <imagelabel>+107:+107</imagelabel>
  </seclabel>
</domain>
 
# 验证是否从ceph rbd启动
[root@node1 ~]# rbd ls vms
3)对rbd启动的虚拟机进行live-migration
# 使用”nova show 1bbc59bf-6827-439e-86d3-21eda28d8b43”得知从rbd启动的instance在迁移前位于node4节点;
# 或使用”nova hypervisor-servers node4”进行验证;
[root@node1 ~]# nova live-migration cirros-cephrbd-instance1 node3
注:迁移前一定要保证node3和node4之间可以ssh无密钥访问(计算节点间无密钥访问是云主机能迁移成功的关键),示范如下
以ceph-host-04和ceph-host-02为例,其实过程就是在一台主机(ceph-host-04)上使用ssh-keygen生成密钥,再把/root/.ssh/id_rsa和/root/.ssh/id_rsa.pub文件拷贝给其他主机(包括ceph-host-04和ceph-host-02),这样可以使N台主机之间都能相互无密钥访问
 
[root@ceph-host-04 ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:MGkIRd0B3Juv6+7OlNOknVGWGKXOulP4b/ddw+e+RDg root@ceph-host-04
The key's randomart image is:
+---[RSA 2048]----+
|  .ooo.+.....    |
|   . .o.o  + .   |
|    . =  oo +    |
|     . ooo o  .  |
|        So=  E . |
|        .Boo  +  |
|        *++    +o|
|       ooo. . o.=|
|       =Oo o.. +*|
+----[SHA256]-----+
[root@ceph-host-04 ~]# ssh-copy-id ceph-host-04
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'ceph-host-04 (10.30.1.224)' can't be established.
ECDSA key fingerprint is SHA256:qjCvy9Q/qRV2HIT0bt6ev//3rOGVntxAPQRDZ4aXfEE.
ECDSA key fingerprint is MD5:99:db:b6:3d:83:0e:c2:56:25:47:f6:1b:d7:bd:f0:ce.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@ceph-host-04's password:
Number of key(s) added: 1
Now try logging into the machine, with:   "ssh 'ceph-host-04'"
and check to make sure that only the key(s) you wanted were added.
 
[root@ceph-host-04 ~]# ssh-copy-id ceph-host-02
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@ceph-host-02's password:
Number of key(s) added: 1
Now try logging into the machine, with:   "ssh 'ceph-host-02'"
and check to make sure that only the key(s) you wanted were added.
 
[root@ceph-host-04 ~]# scp .ssh/id_rsa
id_rsa      id_rsa.pub  
[root@ceph-host-04 ~]# scp .ssh/id_rsa root@ceph-host-02:/root/.ssh/
id_rsa 
[root@ceph-host-04 ~]# ssh  ceph-host-02 w
01:23:10 up  5:20,  1 user,  load average: 0.12, 0.18, 0.36
USER     TTY      FROM             LOGIN@   IDLE   JCPU   PCPU WHAT
root     pts/0    desktop-l37krfr. 23:27    1:58   0.14s  0.14s -bash
[root@ceph-host-02 ~]# ssh  ceph-host-04 w
01:25:01 up  5:22,  1 user,  load average: 0.00, 0.01, 0.05
USER     TTY      FROM             LOGIN@   IDLE   JCPU   PCPU WHAT
root     pts/0    desktop-l37krfr. 22:04    5.00s  0.26s  0.26s -bash
# 迁移过程中可查看状态(nova list和openstack server list --long  --fit-width都可以查看状态)
[root@node1 ~]# nova list
+--------------------------------------+------------------------------+-----------+------------+-------------+----------------------+
| ID                                   | Name                         | Status    | Task State | Power State | Networks             |
+--------------------------------------+------------------------------+-----------+------------+-------------+----------------------+
| 1bbc59bf-6827-439e-86d3-21eda28d8b43 | cirros-cephrbd-instance1     | MIGRATING | migrating  | Running     | vlan99=172.16.99.102 |
| 8611811b-b4e9-4fdc-88b9-1d63427a664d | cirros-cephvolumes-instance1 | ACTIVE    | -          | Running     | vlan99=172.16.99.101 |
+--------------------------------------+------------------------------+-----------+------------+-------------+----------------------+
 
# 迁移完成后,查看instacn所在节点;
# 或使用”nova show 1bbc59bf-6827-439e-86d3-21eda28d8b43”命令查看”hypervisor_hostname”
# 使用“openstack server list --long  --fit-width”命令也能查看相应的信息
[root@node1 ~]# nova hypervisor-servers node4
[root@node1 ~]# nova hypervisor-servers node3
 
 
 
 
 
扩展:批量创建云主机,并查看在ceph上的情况
把原有的qcow2格式的centos7.5镜像转换为raw格式并压缩(注:qcow转换为raw的镜像在openstack平台上老是运行不了,还是自己直接制作一个raw的镜像比较靠谱)
[root@node1 tmp]# virt-sparsify -x centos7.5.qcow2 --convert raw centos7.5.raw
[root@node1 tmp]# ls -lh
total 2.4G
-rw-r----- 1 qemu  qemu 1.3G Feb  4 21:24 centos7.5.qcow2
-rw-r--r-- 1 root  root  20G Feb  6 13:34 centos7.5.raw
[root@node1 tmp]# qemu-img info centos7.5.raw
image: centos7.5.raw
file format: raw
virtual size: 20G (21474836480 bytes)
disk size: 1.1G
 
传到openstack的glance上
[root@node1 tmp]# openstack image create "centos7.5" --file centos7.5.raw --disk-format raw --container-format bare --public
+------------------+----------------------------------------------------------------------------------------------------------+
| Field            | Value                                                                                                    |
+------------------+----------------------------------------------------------------------------------------------------------+
| checksum         | 8862d942f2237a9478023fe48232d420                                                                         |
| container_format | bare                                                                                                     |
| created_at       | 2020-02-06T05:52:40Z                                                                                     |
| disk_format      | raw                                                                                                      |
| file             | /v2/images/bf4bff9e-51de-4787-a76e-3a4637e7fe75/file                                                     |
| id               | bf4bff9e-51de-4787-a76e-3a4637e7fe75                                                                     |
| min_disk         | 0                                                                                                        |
| min_ram          | 0                                                                                                        |
| name             | centos7.5                                                                                                |
| owner            | 75aed7016c86445198356e78dddde4ba                                                                         |
| properties       | direct_url='rbd://272905d2-fd66-4ef6-a772-9cd73a274683/images/bf4bff9e-51de-4787-a76e-3a4637e7fe75/snap' |
| protected        | False                                                                                                    |
| schema           | /v2/schemas/image                                                                                        |
| size             | 21474836480                                                                                              |
| status           | active                                                                                                   |
| tags             |                                                                                                          |
| updated_at       | 2020-02-06T06:39:06Z                                                                                     |
| virtual_size     | None                                                                                                     |
| visibility       | public                                                                                                   |
+------------------+----------------------------------------------------------------------------------------------------------+
查看镜像在ceph中的情况
[root@node1 ~]# rbd ls images
bf4bff9e-51de-4787-a76e-3a4637e7fe75
[root@node1 ~]# rbd info --pool images bf4bff9e-51de-4787-a76e-3a4637e7fe75
rbd image 'bf4bff9e-51de-4787-a76e-3a4637e7fe75':
    size 20GiB in 2560 objects
    order 23 (8MiB objects)
    block_name_prefix: rbd_data.d128d2b1e13e5
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
    flags:
    create_timestamp: Thu Feb  6 21:52:08 2020
 
使用脚本创建云主机
[root@node1 ~]# cat nova-create.sh
for i in `seq 1 9`;do
    IP1=172.16.99.14${i}
    IP2=192.168.9.22${i}
    openstack server create --flavor 1c1g --availability-zone nova --image 'centos7.5' --nic net-id=5ac5c948-909f-47ff-beba-a2ffaf917c5f,v4-fixed-ip=${IP1} --nic net-id=98f5d807-80e0-48a3-9f40-97eb6ed15f33,v4-fixed-ip=${IP2} --security-group 5bb5f2b1-9210-470f-a4a7-2715220b2920 openstack-vm${i}
done
[root@node1 ~]# bash nova-create.sh
[root@node1 ~]# openstack server list
+--------------------------------------+---------------+--------+---------------------------------------------+-----------+--------+
| ID                                   | Name          | Status | Networks                                    | Image     | Flavor |
+--------------------------------------+---------------+--------+---------------------------------------------+-----------+--------+
| 9bca8f74-9a44-4697-b0e0-3631fc063e99 | openstack-vm9 | ACTIVE | vlan99=172.16.99.149; vlan809=192.168.9.229 | centos7.5 | 1c1g   |
| 9e3a5d42-8347-48d4-a628-7c8caaf20ccd | openstack-vm8 | ACTIVE | vlan99=172.16.99.148; vlan809=192.168.9.228 | centos7.5 | 1c1g   |
| 8b512e2b-c5d3-4ce9-9fec-6868488cfbfb | openstack-vm7 | ACTIVE | vlan99=172.16.99.147; vlan809=192.168.9.227 | centos7.5 | 1c1g   |
| b5a5b16c-f565-433f-846b-cd94b9018995 | openstack-vm6 | ACTIVE | vlan99=172.16.99.146; vlan809=192.168.9.226 | centos7.5 | 1c1g   |
| 271584ec-3dea-4351-8cf3-97fcd416e2c0 | openstack-vm5 | ACTIVE | vlan99=172.16.99.145; vlan809=192.168.9.225 | centos7.5 | 1c1g   |
| fc6ae1f4-dc19-4d5b-b044-4564b40a72e3 | openstack-vm4 | ACTIVE | vlan99=172.16.99.144; vlan809=192.168.9.224 | centos7.5 | 1c1g   |
| eabc346b-1354-4d30-913a-3983948e29d8 | openstack-vm3 | ACTIVE | vlan99=172.16.99.143; vlan809=192.168.9.223 | centos7.5 | 1c1g   |
| ec84be63-687f-4dcf-9ce2-b87a923640ab | openstack-vm2 | ACTIVE | vlan99=172.16.99.142; vlan809=192.168.9.222 | centos7.5 | 1c1g   |
| 5c459d9c-fb56-422b-a074-a142ba2d091d | openstack-vm1 | ACTIVE | vlan99=172.16.99.141; vlan809=192.168.9.221 | centos7.5 | 1c1g   |
+--------------------------------------+---------------+--------+---------------------------------------------+-----------+--------+
查看这些云主机在ceph存储池中的情况
[root@node1 ~]# rbd ls vms
271584ec-3dea-4351-8cf3-97fcd416e2c0_disk
5c459d9c-fb56-422b-a074-a142ba2d091d_disk
8b512e2b-c5d3-4ce9-9fec-6868488cfbfb_disk
9bca8f74-9a44-4697-b0e0-3631fc063e99_disk
9e3a5d42-8347-48d4-a628-7c8caaf20ccd_disk
b5a5b16c-f565-433f-846b-cd94b9018995_disk
eabc346b-1354-4d30-913a-3983948e29d8_disk
ec84be63-687f-4dcf-9ce2-b87a923640ab_disk
fc6ae1f4-dc19-4d5b-b044-4564b40a72e3_disk
[root@node1 ~]# rbd info --pool vms 5c459d9c-fb56-422b-a074-a142ba2d091d_disk
rbd image '5c459d9c-fb56-422b-a074-a142ba2d091d_disk':
    size 20GiB in 2560 objects
    order 23 (8MiB objects)
    block_name_prefix: rbd_data.d1ffc5b6d642c
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
    flags:
    create_timestamp: Thu Feb  6 23:10:32 2020
    parent: images/bf4bff9e-51de-4787-a76e-3a4637e7fe75@snap
    overlap: 20GiB
[root@node1 ~]# ceph df
RAW STORAGE:
    CLASS     SIZE        AVAIL       USED       RAW USED     %RAW USED
    hdd       1.5 TiB     1.4 TiB     47 GiB       66 GiB          4.43
    TOTAL     1.5 TiB     1.4 TiB     47 GiB       66 GiB          4.43
POOLS:
    POOL              ID     STORED      OBJECTS     USED        %USED     MAX AVAIL
    nova-metadata      6      13 MiB          25      38 MiB         0       461 GiB
    nova-data          7       450 B           6     1.1 MiB         0       443 GiB
    volumes            8     1.8 KiB           8     512 KiB         0       818 GiB
    images             9      23 GiB       2.75k      43 GiB      3.10       703 GiB
    vms               11     1.7 GiB         233     3.2 GiB      0.24       710 GiB
 
 
关于报错:
 
health问题解决
health: HEALTH_WARN
clock skew detected on mon.ceph-host-02, mon.ceph-host-03
这个是时间同步造成的
# ansible ceph -a 'yum install ntpdate -y'
# ansible ceph -a 'systemctl stop ntpdate'
# ansible ceph -a 'ntpdate time.windows.com'
# ansible ceph -m shell -a 'echo "0 2 * * * root timedatectl set-timezone Asia/Shanghai && ntpdate time1.aliyun.com && hwclock -w >/dev/null 2>&1" >>/etc/crontab'
 
作者:Dexter_Wang   工作岗位:某互联网公司资深Linux架构师  联系邮箱:993852246@qq.com

推荐阅读