首页 > 技术文章 > Openstack Mitaka 版本 centos7.9安装-0

gshelldon 2021-11-09 10:22 原文

Openstack Mitaka 版本 centos7.9安装

role IP 配置
控制节点 controller 10.0.0.11 3G+ 开启虚拟化
计算节点 conputer1 10.0.0.31 1G+ 开启虚拟化

1、基础配置

yum -y install lrzsz vim ntpdate wget net-tools

cat>/etc/profile.d/vim.sh<<EOF
alias vi=vim
EOF

# 更改国内镜像源
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum makecache fast

1.1 关闭selinux


1.2 关闭防火墙


1.3 配置hosts文件

cat>>/etc/hosts<<EOF
10.0.0.11   controller
10.0.0.31   compute1
EOF

# 更改主机名
hostnamectl set-hostname controller

1.4 配置mitaka本地源

本地源地址:

链接:https://pan.baidu.com/s/1ed7CuhMOzBOwOng-8dtwpA
提取码:6fhk

# 在控制节点安装 
yum -y install vsftpd
systemctl start vsftpd
systemctl enable vsftpd

cd /var/ftp/pub
mkdir openstackmitaka
mkdir mnt

# 上传的本地镜像
[root@controller pub]# ls
mnt  openstackmitaka  Openstack-Mitaka.iso

mount -o loop Openstack-Mitaka.iso openstackmitaka/

# 操作系统镜像
mount /dev/sr0 /var/ftp/pub/mnt

# 控制节点
## 配置开机自动挂载
cat>>/etc/fstab<<EOF
/var/ftp/pub/Openstack-Mitaka.iso         /var/ftp/pub/openstackmitaka/       iso9660 ro,relatime 0 0
/dev/sr0                                  /var/ftp/pub/mnt                    iso9660 defaults        0 0
EOF


# 配置本地源
cat>/etc/yum.repos.d/openstack.repo<<EOF 
[Base_ISO]
name=base_iso
baseurl=ftp://10.0.0.11/pub/mnt/
gpgcheck=0

[Openstack_Mitaka]
name=openstack mitaka
baseurl=ftp://10.0.0.11/pub/openstackmitaka/Openstack-Mitaka/
gpgcheck=0
EOF

yum makecache fast

1.5 配置时间同步

yum -y install chrony

# 服务端-控制节点
vi /etc/chrony.conf
server ntp1.aliyun.com iburst
allow 10.0.0.0/24

systemctl restart chronyd
# 客户端-计算节点
 vi /etc/chrony.conf 
 
 server 10.0.0.11 iburst

1.6 安装基本组件

# 所有节点
yum -y install python-openstackclient openstack-selinux openstack-utils.noarch

1.7 控制节点执行

a、安装数据库

yum -y install mariadb-server mariadb python2-PyMySQL

vi /etc/my.cnf
[mysqld]
bind-address = 10.0.0.11
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

systemctl enable mariadb --now

# 初始化数据库
mysql_secure_installation

## 设置数据库密码
Set root password? [Y/n] y
New password:  # <===== openstack
Re-enter new password: 
Password updated successfully!

Remove anonymous users? [Y/n] y
Disallow root login remotely? [Y/n] y
Remove test database and access to it? [Y/n] y
Reload privilege tables now? [Y/n] y

b、安装rabbitmq

 yum install rabbitmq-server -y
 
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service

# 添加用户和密码‘RABBIT_PASS’并授权
rabbitmqctl add_user openstack RABBIT_PASS
rabbitmqctl set_permissions openstack ".*" ".*" ".*"

# 启用rabbit dashboard
rabbitmq-plugins enable  rabbitmq_management

c、安装memcache

认证服务认证缓存使用Memcached缓存令牌。缓存服务memecached运行在控制节点。在生产部署中,我们推荐联合启用防火墙、认证和加密保证它的安全。

yum install memcached python-memcached -y

vi /etc/sysconfig/memcached
OPTIONS="controller,10.0.0.11"

systemctl enable memcached.service
systemctl start memcached.service

2.安装认证服务

在控制节点执行

2.1 创建数据库

# 创建数据库
mysql -uroot -p
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
  IDENTIFIED BY 'KEYSTONE_DBPASS';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
  IDENTIFIED BY 'KEYSTONE_DBPASS';

2.2 安装身份认证服务

yum install openstack-keystone httpd mod_wsgi -y

# 临时认证token
openssl rand -hex 10
b7ab536ae8ba4c3db833

cp /etc/keystone/keystone.conf{,_bak}
grep -Ev '^$|^#' /etc/keystone/keystone.conf_bak > /etc/keystone/keystone.conf

cat>/etc/keystone/keystone.conf<<EOF
[DEFAULT]
admin_token = b7ab536ae8ba4c3db833
[assignment]
[auth]
[cache]
[catalog]
[cors]
[cors.subdomain]
[credential]
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[eventlet_server_ssl]
[federation]
[fernet_tokens]
[identity]
[identity_mapping]
[kvs]
[ldap]
[matchmaker_redis]
[memcache]
[oauth1]
[os_inherit]
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
[policy]
[resource]
[revoke]
[role]
[saml]
[shadow_users]
[signing]
[ssl]
[token]
provider = fernet

[tokenless_auth]
[trust]
EOF
# 使用命令更改配置可以重复执行。
openstack-config

2.3 同步数据库

# 同步数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone

# 初始化成功用此命令可以查看生成的数据表。
mysql keystone -e 'show tables;' -uroot -p

2.4 初始化fernet

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

# 会在/etc/keystone/下生成 fernet-keys目录,里面存放了fernet 的token;如果身份认证服务在注册的时候出错可以重新初始化新的fernet token,把原来的删除。
/etc/keystone/fernet-keys

2.5 配置httpd服务

vi /etc/httpd/conf/httpd.conf
ServerName controller

vi  /etc/httpd/conf.d/wsgi-keystone.conf
Listen 5000
Listen 35357

<VirtualHost *:5000>
    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-public
    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    ErrorLogFormat "%{cu}t %M"
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined

    <Directory /usr/bin>
        Require all granted
    </Directory>
</VirtualHost>

<VirtualHost *:35357>
    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-admin
    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    ErrorLogFormat "%{cu}t %M"
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined

    <Directory /usr/bin>
        Require all granted
    </Directory>
</VirtualHost>

# 启动服务
systemctl enable httpd --now

2.6 注册身份认证服务

export OS_TOKEN=b7ab536ae8ba4c3db833
export OS_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3

# 创建身份认证服务
[root@controller ~]# openstack service create --name keystone --description "OpenStack Identity" identity
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Identity               |
| enabled     | True                             |
| id          | db37d54d45274871bea7ba510cc6584d |
| name        | keystone                         |
| type        | identity                         |
+-------------+----------------------------------+

# 身份认证的接口
[root@controller ~]# openstack endpoint create --region RegionOne identity public http://controller:5000/v3
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 9914f144a5fd44cb814997a8d785016d |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | db37d54d45274871bea7ba510cc6584d |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://controller:5000/v3        |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne identity internal http://controller:5000/v3
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | affd11b84ecd401189d7ac3ea5262786 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | db37d54d45274871bea7ba510cc6584d |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://controller:5000/v3        |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne identity admin http://controller:35357/v3
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 5e739dc035834ffebc1567aa69511195 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | db37d54d45274871bea7ba510cc6584d |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://controller:35357/v3       |
+--------------+----------------------------------+

2.7 创建域、项目、用户和角色

[root@controller ~]# openstack domain create --description "Default Domain" default
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Default Domain                   |
| enabled     | True                             |
| id          | 1510ca4a5403403b9bb65719bfdf67fe |
| name        | default                          |
+-------------+----------------------------------+
[root@controller ~]# openstack project create --domain default --description "Admin Project" admin
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Admin Project                    |
| domain_id   | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled     | True                             |
| id          | 3a75cae60c7c49b0b3e843e5949975bc |
| is_domain   | False                            |
| name        | admin                            |
| parent_id   | 1510ca4a5403403b9bb65719bfdf67fe |
+-------------+----------------------------------+
[root@controller ~]# openstack user create --domain default --password-prompt admin
User Password:    # <===== admin,用户的密码,可以用来登陆dashboard
Repeat User Password:
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled   | True                             |
| id        | c4d4838b317840c1b54ba1549a1010be |
| name      | admin                            |
+-----------+----------------------------------+
[root@controller ~]# openstack role create admin
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | None                             |
| id        | b1681570a20543c8857b4fa7b5f3ee63 |
| name      | admin                            |
+-----------+----------------------------------+
[root@controller ~]# openstack role add --project admin --user admin admin

# 创建一个service项目后面的组件会放在这个项目中
[root@controller ~]# openstack project create --domain default --description "Service Project" service
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Service Project                  |
| domain_id   | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled     | True                             |
| id          | f9dee317061e47c7a3d56b7b6f8b2250 |
| is_domain   | False                            |
| name        | service                          |
| parent_id   | 1510ca4a5403403b9bb65719bfdf67fe |
+-------------+----------------------------------+

# 使用keyston认证,自动分发token
vi admin-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

source admin-openrc

# 需要重新连接下终端 或者unset一下前面三个临时的变量
[root@controller ~]# openstack token issue
+------------+----------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                          |
+------------+----------------------------------------------------------------------------------------------------------------+
| expires    | 2021-11-05T05:52:01.962585Z                                                                                    |
| id         | gAAAAABhhLhx874niGA3YGqO2tZ4jiECJxmLrdl4Uqqwdp0oKX-oCCjTG7Ts5ff8bApGeIdAEil59MuwU2NRlHo6LP0LVvMSGYgFyPkKK5y2_i |
|            | UZD0gjc3omArcgkJgjzgJsv0yVKOJOAa6DhV4T4qXvbKwKQI9J5fzICgrzXnyS2kqQwcmU-xY                                      |
| project_id | 3a75cae60c7c49b0b3e843e5949975bc                                                                               |
| user_id    | c4d4838b317840c1b54ba1549a1010be                                                                               |
+------------+----------------------------------------------------------------------------------------------------------------+

[root@controller ~]# openstack endpoint list

3.glance 镜像服务

在控制节点

3.1 创建数据库

mysql -uroot -p

CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';

3.2 创建用户

[root@controller ~]# openstack user create --domain default --password-prompt glance
User Password:    # <==== glance
Repeat User Password:
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled   | True                             |
| id        | bd2f2e471ab74de1abdee9fe171fb892 |
| name      | glance                           |
+-----------+----------------------------------+

[root@controller ~]# openstack role add --project service --user glance admin

3.3 注册服务目录信息

[root@controller ~]# openstack service create --name glance --description "OpenStack Image" image
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Image                  |
| enabled     | True                             |
| id          | f7636aa78fc946e5a9f0464b0c2a5209 |
| name        | glance                           |
| type        | image                            |
+-------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne image public http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 8acfd45ca57b4dcabdfa71e1cc1bda7c |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | f7636aa78fc946e5a9f0464b0c2a5209 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne image internal http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 3c5c594af71e4e27a932a9244c332770 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | f7636aa78fc946e5a9f0464b0c2a5209 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne image admin http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 2879c39c7dc64db8a5f82d479b6d2f08 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | f7636aa78fc946e5a9f0464b0c2a5209 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+

3.4 安装并配置

yum install openstack-glance -y
cp /etc/glance/glance-api.conf{,_bak}
grep -Ev '^$|^#' /etc/glance/glance-api.conf_bak > /etc/glance/glance-api.conf
[root@controller ~]# cat>/etc/glance/glance-api.conf<<EOF
[DEFAULT]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

[image_format]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance

[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
EOF
cp /etc/glance/glance-registry.conf{,_bak}
grep -Ev '^$|^#' /etc/glance/glance-registry.conf_bak > /etc/glance/glance-registry.conf

[root@controller ~]# cat>/etc/glance/glance-registry.conf<<EOF
[DEFAULT]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance

[glance_store]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance

[matchmaker_redis]
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_policy]
[paste_deploy]
flavor = keystone

[profiler]
EOF
[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance
Option "verbose" from group "DEFAULT" is deprecated for removal.  Its value may be silently ignored in the future.
/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:1056: OsloDBDeprecationWarning: EngineFacade is deprecated; please use oslo_db.sqlalchemy.enginefacade
  expire_on_commit=expire_on_commit, _conf=conf)
  
# 验证
mysql glance -uroot -p -e 'show tables;'

3.4 启动服务

systemctl enable openstack-glance-api.service \
  openstack-glance-registry.service
systemctl start openstack-glance-api.service \
  openstack-glance-registry.service
  

# 查看角色、项目、用户关系
openstack role assignment list

3.5 上传镜像测试

openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare --public
+------------------+------------------------------------------------------+
| Field            | Value                                                |
+------------------+------------------------------------------------------+
| checksum         | ee1eca47dc88f4879d8a229cc70a07c6                     |
| container_format | bare                                                 |
| created_at       | 2021-11-05T06:03:36Z                                 |
| disk_format      | qcow2                                                |
| file             | /v2/images/e2f926f4-f466-4239-8444-178a30ae7744/file |
| id               | e2f926f4-f466-4239-8444-178a30ae7744                 |
| min_disk         | 0                                                    |
| min_ram          | 0                                                    |
| name             | cirros                                               |
| owner            | 3a75cae60c7c49b0b3e843e5949975bc                     |
| protected        | False                                                |
| schema           | /v2/schemas/image                                    |
| size             | 13287936                                             |
| status           | active                                               |
| tags             |                                                      |
| updated_at       | 2021-11-05T06:03:37Z                                 |
| virtual_size     | None                                                 |
| visibility       | public                                               |
+------------------+------------------------------------------------------+

[root@controller ~]# glance image-list
+--------------------------------------+--------+
| ID                                   | Name   |
+--------------------------------------+--------+
| e2f926f4-f466-4239-8444-178a30ae7744 | cirros |
+--------------------------------------+--------+

4.nova服务

控制节点安装

nova-api 服务

接收和响应来自最终用户的计算API请求。此服务支持OpenStack计算服务API,Amazon EC2 API,以及特殊的管理API用于赋予用户做一些管理的操作。它会强制实施一些规则,发起多数的编排活动,例如运行一个实例。

nova-api-metadata 服务

接受来自虚拟机发送的元数据请求。服务一般在安装nova-network服务的多主机模式下使用。

一个持续工作的守护进程,通过Hypervior的API来创建和销毁虚拟机实例。例如:

  • XenServer/XCP 的 XenAPI
  • KVM 或 QEMU 的 libvirt
  • VMware 的 VMwareAPI

过程是蛮复杂的。最为基本的,守护进程同意了来自队列的动作请求,转换为一系列的系统命令如启动一个KVM实例,然后,到数据库中更新它的状态。

nova-scheduler 服务拿到一个来自队列请求虚拟机实例,然后决定那台计算服务器主机来运行它。

nova-conductor模块

媒介作用于nova-compute服务与数据库之间。它排除了由nova-compute服务对云数据库的直接访问。nova-conductor模块可以水平扩展。但是,不要将它部署在运行nova-compute服务的主机节点上。

nova-cert模块

服务器守护进程向Nova Cert服务提供X509证书。用来为euca-bundle-image生成证书。仅仅是在EC2 API的请求中使用

nova-network worker 守护进程

nova-compute服务类似,从队列中接受网络任务,并且操作网络。执行任务例如创建桥接的接口或者改变IPtables的规则。

nova-consoleauth 守护进程

授权控制台代理所提供的用户令牌。详情可查看nova-novncproxynova-xvpvncproxy。该服务必须为控制台代理运行才可奏效。在集群配置中你可以运行二者中任一代理服务而非仅运行一个nova-consoleauth服务。

nova-novncproxy 守护进程

提供一个代理,用于访问正在运行的实例,通过VNC协议,支持基于浏览器的novnc客户端。

nova-spicehtml5proxy 守护进程

提供一个代理,用于访问正在运行的实例,通过 SPICE 协议,支持基于浏览器的 HTML5 客户端。

nova-xvpvncproxy 守护进程

提供一个代理,用于访问正在运行的实例,通过VNC协议,支持OpenStack特定的Java客户端。

nova-cert 守护进程

X509 证书。

nova客户端

用于用户作为租户管理员或最终用户来提交命令。

队列

一个在守护进程间传递消息的中央集线器。常见实现有RabbitMQ Zero MQ__等AMQP消息队列。

SQL数据库

存储构建时和运行时的状态,为云基础设施,包括有:

  • 可用实例类型
  • 使用中的实例
  • 可用网络
  • 项目

理论上,OpenStack计算可以支持任何和SQL-Alchemy所支持的后端数据库,通常使用SQLite3来做测试可开发工作,MySQL和PostgreSQL 作生产环境。

4.1 创建数据库

mysql -u root -p

CREATE DATABASE nova_api;
CREATE DATABASE nova;

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
  IDENTIFIED BY 'NOVA_DBPASS';

4.2 创建用户注册api

[root@controller ~]# . admin-openrc
[root@controller ~]# openstack user create --domain default --password-prompt nova
User Password:    # <==== nova
Repeat User Password:
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled   | True                             |
| id        | 59b3ca2333324089ad3045d583ef4dbd |
| name      | nova                             |
+-----------+----------------------------------+
[root@controller ~]# openstack role add --project service --user nova admin
[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Compute                |
| enabled     | True                             |
| id          | 0fc8b3c4e2ba40bdafa6a21d9ab6eb5b |
| name        | nova                             |
| type        | compute                          |
+-------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field        | Value                                     |
+--------------+-------------------------------------------+
| enabled      | True                                      |
| id           | ed74f11bf10e4371a2361d1773523ab0          |
| interface    | public                                    |
| region       | RegionOne                                 |
| region_id    | RegionOne                                 |
| service_id   | 0fc8b3c4e2ba40bdafa6a21d9ab6eb5b          |
| service_name | nova                                      |
| service_type | compute                                   |
| url          | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field        | Value                                     |
+--------------+-------------------------------------------+
| enabled      | True                                      |
| id           | 52eed6e272ee451ea1f0af4417a2380a          |
| interface    | internal                                  |
| region       | RegionOne                                 |
| region_id    | RegionOne                                 |
| service_id   | 0fc8b3c4e2ba40bdafa6a21d9ab6eb5b          |
| service_name | nova                                      |
| service_type | compute                                   |
| url          | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field        | Value                                     |
+--------------+-------------------------------------------+
| enabled      | True                                      |
| id           | 97b7b8ac4ea84ff096682a91df829175          |
| interface    | admin                                     |
| region       | RegionOne                                 |
| region_id    | RegionOne                                 |
| service_id   | 0fc8b3c4e2ba40bdafa6a21d9ab6eb5b          |
| service_name | nova                                      |
| service_type | compute                                   |
| url          | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+

4.3 安装并配置

yum install openstack-nova-api openstack-nova-conductor \
  openstack-nova-console openstack-nova-novncproxy \
  openstack-nova-scheduler -y
cp /etc/nova/nova.conf{,_bak}
grep -Ev '^$|^#' /etc/nova/nova.conf_bak > /etc/nova/nova.conf

[root@controller ~]# vi /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 10.0.0.11
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api

[barbican]
[cache]
[cells]
[cinder]
[conductor]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova

[ephemeral_storage_encryption]
[glance]
api_servers = http://controller:9292

[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova

[libvirt]
[matchmaker_redis]
[metrics]
[neutron]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS

[oslo_middleware]
[oslo_policy]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip

[workarounds]
[xenserver]

# 同步数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage db sync" nova

4.4 启动服务

systemctl enable openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service

计算节点安装

4.4 安装nova-compute

yum install openstack-nova-compute libvirt-client -y

4.5 配置

cp /etc/nova/nova.conf{,_bak}
grep -Ev '^$|^#' /etc/nova/nova.conf_bak > /etc/nova/nova.conf

vi /etc/nova/nova.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
# 本机的管理IP地址
my_ip = 10.0.0.31
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api_database]
[barbican]
[cache]
[cells]
[cinder]
[conductor]
[cors]
[cors.subdomain]
[database]
[ephemeral_storage_encryption]
[glance]
api_servers = http://controller:9292

[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova

[libvirt]
[matchmaker_redis]
[metrics]
[neutron]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS

[oslo_middleware]
[oslo_policy]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
# vnc接口的地址如果是主机名那么客户机windows需要添加hosts解析控制台才不会报错。
# 如果不更改客户机hosts,那么使用控制节点IP地址即可。
novncproxy_base_url = http://controller:6080/vnc_auto.html

[workarounds]
[xenserver]


# 如果返回为0需要进行下面的设置;结合后面创建的虚拟机来看,这里不配置会导致虚拟机开机启动不起来,不同7系列版本可能会不一致。
egrep -c '(vmx|svm)' /proc/cpuinfo

vi /etc/nova/nova.conf
[libvirt]
...
virt_type = qemu
cpu_mode = none

4.6 启动服务

systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service

4.7 验证

. admin-openrc
openstack compute service list
+----+------------------+------------+----------+---------+-------+----------------------------+
| Id | Binary           | Host       | Zone     | Status  | State | Updated At                 |
+----+------------------+------------+----------+---------+-------+----------------------------+
|  1 | nova-scheduler   | controller | internal | enabled | up    | 2021-11-05T06:58:35.000000 |
|  2 | nova-conductor   | controller | internal | enabled | up    | 2021-11-05T06:58:29.000000 |
|  3 | nova-consoleauth | controller | internal | enabled | up    | 2021-11-05T06:58:30.000000 |
|  7 | nova-compute     | compute1   | nova     | enabled | up    | 2021-11-05T06:58:27.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+

5.安装neutron网络服务

控制节点安装

5.1 配置数据库

mysql -u root -p

CREATE DATABASE neutron;

GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
  IDENTIFIED BY 'NEUTRON_DBPASS';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
  IDENTIFIED BY 'NEUTRON_DBPASS';

5.2 创建用户并注册endpoint

[root@controller ~]# openstack user create --domain default --password-prompt neutron
User Password:    # <==== neutron
Repeat User Password:
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled   | True                             |
| id        | d9151535769a4bb386550a2b466f13f1 |
| name      | neutron                          |
+-----------+----------------------------------+

[root@controller ~]# openstack role add --project service --user neutron admin

[root@controller ~]# openstack service create --name neutron --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Networking             |
| enabled     | True                             |
| id          | 4f17e060610e4f00b51bbdb78d5941fb |
| name        | neutron                          |
| type        | network                          |
+-------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne network public http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 4bcdd9a9e33f4e7bbd6025a2157dbf42 |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 4f17e060610e4f00b51bbdb78d5941fb |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne network internal http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | cd78e2cb2dec4e15af526fe840abe6e3 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 4f17e060610e4f00b51bbdb78d5941fb |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 2e4a8c6e0c7b4e178007c9ee2f3f60a0 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 4f17e060610e4f00b51bbdb78d5941fb |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+

# 验证
[root@controller ~]# openstack endpoint list|grep network
| 63d6214c80f24a9ba7068b9e31d2d9b0 | RegionOne | neutron      | network      | True    | admin     | http://controller:9696                    |
| 74d918942dac446ebcb177558a4d361d | RegionOne | neutron      | network      | True    | public    | http://controller:9696                    |
| a2685cde3ffc4916be6aa970adfbd0cd | RegionOne | neutron      | network      | True    | internal  | http://controller:9696         

5.3 安装&配置

controller 配置

yum install openstack-neutron openstack-neutron-ml2 \
  openstack-neutron-linuxbridge ebtables -y

cp /etc/neutron/neutron.conf{,_bak}
grep -Ev '^#|^$' /etc/neutron/neutron.conf_bak > /etc/neutron/neutron.conf

[root@controller ~]# cat /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins =
rpc_backend = rabbit
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True

[agent]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[matchmaker_redis]
[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS

[oslo_policy]
[quotas]
[ssl]


cp /etc/neutron/plugins/ml2/ml2_conf.ini{,_bak}
grep -Ev '^#|^$' /etc/neutron/plugins/ml2/ml2_conf.ini_bak > /etc/neutron/plugins/ml2/ml2_conf.ini

[root@controller ~]# cat /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[ml2]
type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security

[ml2_type_flat]
flat_networks = provider

[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
[ml2_type_vxlan]
[securitygroup]
enable_ipset = True

# ====== 配置二层网络几个重要的参数 ======
type_drivers = flat,vlan      # flat 扁平化网络,vlan
tenant_network_types =        # 三层网络使用,为空表示禁用
mechanism_drivers = linuxbridge    # 桥接 
flat_networks = provider      # 二层网络的名称 provider


cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,_bak}
grep -Ev '^$|^#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini_bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[root@controller ~]# cat /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth0

[securitygroup]
# 打开安全组,控制安全组的驱动
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

[vxlan]
enable_vxlan = False

# ====== physical_interface_mappings = provider:eth0 ======    
# 这里的网络与上面的相对应,桥接到物理网卡eth0上面。


[root@controller ~]# cat /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True

# 生成metadata_proxy_shared_secret  只要nova.conf中相同即可
openssl rand -hex 10  

[root@controller ~]# cat /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = d4e44a1b239c039806c9


vim /etc/nova/nova.conf
。。。。。。
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron

service_metadata_proxy = True
metadata_proxy_shared_secret = d4e44a1b239c039806c9


ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
 # ====== 此处省略 =====
INFO  [alembic.runtime.migration] Running upgrade 2a16083502f3 -> 2e5352a0ad4d, Add missing foreign keys
INFO  [alembic.runtime.migration] Running upgrade 2e5352a0ad4d -> 11926bcfe72d, add geneve ml2 type driver
INFO  [alembic.runtime.migration] Running upgrade 11926bcfe72d -> 4af11ca47297, Drop cisco monolithic tables
INFO  [alembic.runtime.migration] Running upgrade 4af11ca47297 -> 1b294093239c, Drop embrane plugin table
INFO  [alembic.runtime.migration] Running upgrade 1b294093239c, 32e5974ada25 -> 8a6d8bdae39, standardattributes migration
INFO  [alembic.runtime.migration] Running upgrade 8a6d8bdae39 -> 2b4c2465d44b, DVR sheduling refactoring
INFO  [alembic.runtime.migration] Running upgrade 2b4c2465d44b -> e3278ee65050, Drop NEC plugin tables
INFO  [alembic.runtime.migration] Running upgrade e3278ee65050, 15e43b934f81 -> c6c112992c9, rbac_qos_policy
INFO  [alembic.runtime.migration] Running upgrade c6c112992c9 -> 5ffceebfada, network_rbac_external
INFO  [alembic.runtime.migration] Running upgrade 5ffceebfada, 0e66c5227a8a -> 4ffceebfcdc, standard_desc
  OK

systemctl restart openstack-nova-api.service

5.4 启动验证

systemctl enable neutron-server.service \
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
  neutron-metadata-agent.service
  
systemctl start neutron-server.service \
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
  neutron-metadata-agent.service

# 需要等待一段时间
[root@controller ~]# neutron agent-list
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host       | availability_zone | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
| 354633ca-ebec-4b90-84dd-339e4b55144e | Linux bridge agent | controller |                   | :-)   | True           | neutron-linuxbridge-agent |
| 50c0fc56-79eb-42fe-97ae-887333618568 | DHCP agent         | controller | nova              | :-)   | True           | neutron-dhcp-agent        |
| e004250b-c51a-44b4-89e9-091f11a0a4da | Metadata agent     | controller |                   | :-)   | True           | neutron-metadata-agent    |
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+

计算节点安装

5.5 安装配置

yum install openstack-neutron-linuxbridge ebtables ipset -y

cp /etc/neutron/neutron.conf{,_bak}
grep -Ev '^#|^$' /etc/neutron/neutron.conf_bak > /etc/neutron/neutron.conf

[root@compute1 ~]# cat>/etc/neutron/neutron.conf<<EOF
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone

[agent]
[cors]
[cors.subdomain]
[database]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS

[oslo_policy]
[quotas]
[ssl]
EOF

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,_bak}
grep -Ev '^$|^#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini_bak  >/etc/neutron/plugins/ml2/linuxbridge_agent.ini

vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth0
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

[vxlan]
enable_vxlan = False

如果OpenStack宿主机是Exsi创建的虚拟机,则必须要进行以下设置。

这个配置主要是让Exsi创建的虚拟机的网卡可以进入混杂模式。

如果没有这个设置,那么就会导致Exsi创建的虚拟机网卡不能分发包到OpenStack创建到虚拟机。

vi /etc/nova/nova.conf
...
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
...

5.6 启动

systemctl restart openstack-nova-compute.service

systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service

[root@controller ~]# neutron agent-list
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host       | availability_zone | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
| 354633ca-ebec-4b90-84dd-339e4b55144e | Linux bridge agent | controller |                   | :-)   | True           | neutron-linuxbridge-agent |
| 50c0fc56-79eb-42fe-97ae-887333618568 | DHCP agent         | controller | nova              | :-)   | True           | neutron-dhcp-agent        |
| 7af96de4-c3da-4773-bc23-b77cd896231f | Linux bridge agent | compute1   |                   | :-)   | True           | neutron-linuxbridge-agent |
| e004250b-c51a-44b4-89e9-091f11a0a4da | Metadata agent     | controller |                   | :-)   | True           | neutron-metadata-agent    |
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+

6.安装dashboard

在控制节点

6.1 安装

 yum install openstack-dashboard -y

6.2 配置

实质是在更改一个django的配置文件。

# 找到下面的配置并更改
vi /etc/openstack-dashboard/local_settings

# 在 controller 节点上配置仪表盘以使用 OpenStack 服务:
OPENSTACK_HOST = "controller"

# 允许所有主机访问仪表板
ALLOWED_HOSTS = ['*', ]

# 配置 memcached 会话存储服务
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'controller:11211',
    }
}

# 启用第3版认证API
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

# 启用对域的支持
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

# 配置API版本
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 2,
}

# 通过仪表盘创建用户时的默认域配置为 default
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"

# 通过仪表盘创建的用户默认角色配置为 user 
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

# 如果您选择网络参数1,禁用支持3层网络服务
OPENSTACK_NEUTRON_NETWORK = {
    ...
    'enable_router': False,
    'enable_quotas': False,
    'enable_distributed_router': False,
    'enable_ha_router': False,
    'enable_lb': False,
    'enable_firewall': False,
    'enable_vpn': False,
    'enable_fip_topology_check': False,
}

# 配置时区
TIME_ZONE = "UTC"

vi /etc/httpd/conf.d/openstack-dashboard.conf
# 第四行添加
WSGIApplicationGroup %{GLOBAL}

6.3 启动

systemctl restart httpd.service memcached.service

# 访问地址 
http://10.0.0.11/dashboard
用户名:admin
密码:admin

7.命令行启动实例

7.1 创建网络

[root@controller ~]# . admin-openrc
[root@controller ~]# neutron net-create --shared --provider:physical_network provider --provider:network_type flat provider
Created a new network:
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | True                                 |
| availability_zone_hints   |                                      |
| availability_zones        |                                      |
| created_at                | 2021-11-06T07:18:02                  |
| description               |                                      |
| id                        | e54cd98e-df99-411d-9a5b-5b5537ea6464 |
| ipv4_address_scope        |                                      |
| ipv6_address_scope        |                                      |
| mtu                       | 1500                                 |
| name                      | provider                             |
| port_security_enabled     | True                                 |
| provider:network_type     | flat                                 |
| provider:physical_network | provider                             |
| provider:segmentation_id  |                                      |
| router:external           | False                                |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tags                      |                                      |
| tenant_id                 | 3a75cae60c7c49b0b3e843e5949975bc     |
| updated_at                | 2021-11-06T07:18:02                  |
+---------------------------+--------------------------------------+

--shared    # 共享网络,其它用户或者项目可以使用。
--provider:physical_network provider    #使用物理网络 provider 与neutron配置文件中ini文件中的对应。
--provider:network_type flat    # 网络类型 flat,特点:flat型网络一般独占一个物理网卡。
provider           # 网络名称

7.1 创建子网

neutron subnet-create --name provider-net \
  --allocation-pool start=START_IP_ADDRESS,end=END_IP_ADDRESS \
  --dns-nameserver DNS_RESOLVER --gateway PROVIDER_NETWORK_GATEWAY \
  provider PROVIDER_NETWORK_CIDR
  
START_IP_ADDRESS      # 开始的第一个IP
END_IP_ADDRESS        # 结束最后一个IP地址
DNS_RESOLVER          # dns地址
PROVIDER_NETWORK_GATEWAY    # 网关地址
provider              # 与这个网络关联
PROVIDER_NETWORK_CIDR       # 类似192.168.0.0/24

# 因为桥接的是eth0 网卡,所以创建的是与虚拟机一样的IP段,这样虚拟机就可以通过eth0上网。
[root@controller ~]# neutron subnet-create --name provider-net \
   --allocation-pool start=10.0.0.150,end=10.0.0.160 \
   --dns-nameserver 223.5.5.5 --gateway 10.0.0.1 \
   provider 10.0.0.0/24
   
Created a new subnet:
+-------------------+----------------------------------------------+
| Field             | Value                                        |
+-------------------+----------------------------------------------+
| allocation_pools  | {"start": "10.0.0.150", "end": "10.0.0.160"} |
| cidr              | 10.0.0.0/24                                  |
| created_at        | 2021-11-06T07:30:06                          |
| description       |                                              |
| dns_nameservers   | 223.5.5.5                                    |
| enable_dhcp       | True                                         |
| gateway_ip        | 10.0.0.1                                     |
| host_routes       |                                              |
| id                | b3643d73-0925-485e-bcce-c22e6e4c2d3b         |
| ip_version        | 4                                            |
| ipv6_address_mode |                                              |
| ipv6_ra_mode      |                                              |
| name              | provider-net                                 |
| network_id        | e54cd98e-df99-411d-9a5b-5b5537ea6464         |
| subnetpool_id     |                                              |
| tenant_id         | 3a75cae60c7c49b0b3e843e5949975bc             |
| updated_at        | 2021-11-06T07:30:06                          |
+-------------------+----------------------------------------------+

7.2 创建实例模板

有默认的虚拟机模板

[root@controller ~]# openstack flavor list
+----+-----------+-------+------+-----------+-------+-----------+
| ID | Name      |   RAM | Disk | Ephemeral | VCPUs | Is Public |
+----+-----------+-------+------+-----------+-------+-----------+
| 1  | m1.tiny   |   512 |    1 |         0 |     1 | True      |
| 2  | m1.small  |  2048 |   20 |         0 |     1 | True      |
| 3  | m1.medium |  4096 |   40 |         0 |     2 | True      |
| 4  | m1.large  |  8192 |   80 |         0 |     4 | True      |
| 5  | m1.xlarge | 16384 |  160 |         0 |     8 | True      |
+----+-----------+-------+------+-----------+-------+-----------+

# 如果要创建
[root@controller ~]# openstack flavor create --id 0 --vcpus 1 --ram 128 --disk 5 my_create
+----------------------------+-----------+
| Field                      | Value     |
+----------------------------+-----------+
| OS-FLV-DISABLED:disabled   | False     |
| OS-FLV-EXT-DATA:ephemeral  | 0         |
| disk                       | 5         |
| id                         | 0         |
| name                       | my_create |
| os-flavor-access:is_public | True      |
| ram                        | 128       |
| rxtx_factor                | 1.0       |
| swap                       |           |
| vcpus                      | 1         |
+----------------------------+-----------+

7.3 创建密钥对

密钥对可以无密码直接ssh连接虚拟机,如果创建了会在创建实例的时候注入。

ssh-keygen -q -N "" -f ~/.ssh/id_rsa
[root@controller ~]# openstack keypair create --public-key ~/.ssh/id_rsa.pub my_ssh_key
+-------------+-------------------------------------------------+
| Field       | Value                                           |
+-------------+-------------------------------------------------+
| fingerprint | 44:59:f7:cb:05:70:8b:4b:e1:50:e8:36:4f:bf:51:f4 |
| name        | my_ssh_key                                      |
| user_id     | c4d4838b317840c1b54ba1549a1010be                |
+-------------+-------------------------------------------------+

7.4 创建安全组规则

默认情况下应该是可以ping通的,如果不行考虑添加。

[root@controller ~]# openstack security group rule create --proto icmp default
+-----------------------+--------------------------------------+
| Field                 | Value                                |
+-----------------------+--------------------------------------+
| id                    | af241acf-296b-428d-b368-529ec09c9dfe |
| ip_protocol           | icmp                                 |
| ip_range              | 0.0.0.0/0                            |
| parent_group_id       | 15b8498c-eb91-47a2-8f7e-004c93d4803b |
| port_range            |                                      |
| remote_security_group |                                      |
+-----------------------+--------------------------------------+
[root@controller ~]# openstack security group rule create --proto tcp --dst-port 22 default
+-----------------------+--------------------------------------+
| Field                 | Value                                |
+-----------------------+--------------------------------------+
| id                    | 73ada95f-81ff-42b1-95c9-e732d2f6480b |
| ip_protocol           | tcp                                  |
| ip_range              | 0.0.0.0/0                            |
| parent_group_id       | 15b8498c-eb91-47a2-8f7e-004c93d4803b |
| port_range            | 22:22                                |
| remote_security_group |                                      |
+-----------------------+--------------------------------------+

7.5 启动实例

[root@controller ~]# openstack flavor list
+----+-----------+-------+------+-----------+-------+-----------+
| ID | Name      |   RAM | Disk | Ephemeral | VCPUs | Is Public |
+----+-----------+-------+------+-----------+-------+-----------+
| 0  | my_create |   128 |    5 |         0 |     1 | True      |
| 1  | m1.tiny   |   512 |    1 |         0 |     1 | True      |
| 2  | m1.small  |  2048 |   20 |         0 |     1 | True      |
| 3  | m1.medium |  4096 |   40 |         0 |     2 | True      |
| 4  | m1.large  |  8192 |   80 |         0 |     4 | True      |
| 5  | m1.xlarge | 16384 |  160 |         0 |     8 | True      |
+----+-----------+-------+------+-----------+-------+-----------+

[root@controller ~]# openstack image list
+--------------------------------------+--------+--------+
| ID                                   | Name   | Status |
+--------------------------------------+--------+--------+
| e2f926f4-f466-4239-8444-178a30ae7744 | cirros | active |
+--------------------------------------+--------+--------+

[root@controller ~]# neutron net-list
+--------------------------------------+----------+--------------------------------------------------+
| id                                   | name     | subnets                                          |
+--------------------------------------+----------+--------------------------------------------------+
| e54cd98e-df99-411d-9a5b-5b5537ea6464 | provider | b3643d73-0925-485e-bcce-c22e6e4c2d3b 10.0.0.0/24 |
+--------------------------------------+----------+--------------------------------------------------+

# 创建虚拟机
openstack server create --flavor my_create --image cirros \
  --nic net-id=e54cd98e-df99-411d-9a5b-5b5537ea6464 --security-group default \
  --key-name my_ssh_key cirros-tt
  
+--------------------------------------+-----------------------------------------------+
| Field                                | Value                                         |
+--------------------------------------+-----------------------------------------------+
| OS-DCF:diskConfig                    | MANUAL                                        |
| OS-EXT-AZ:availability_zone          |                                               |
| OS-EXT-SRV-ATTR:host                 | None                                          |
| OS-EXT-SRV-ATTR:hypervisor_hostname  | None                                          |
| OS-EXT-SRV-ATTR:instance_name        | instance-00000001                             |
| OS-EXT-STS:power_state               | 0                                             |
| OS-EXT-STS:task_state                | scheduling                                    |
| OS-EXT-STS:vm_state                  | building                                      |
| OS-SRV-USG:launched_at               | None                                          |
| OS-SRV-USG:terminated_at             | None                                          |
| accessIPv4                           |                                               |
| accessIPv6                           |                                               |
| addresses                            |                                               |
| adminPass                            | HL964yvkPF7n                                  |
| config_drive                         |                                               |
| created                              | 2021-11-06T07:50:12Z                          |
| flavor                               | my_create (0)                                 |
| hostId                               |                                               |
| id                                   | 0b5e4f81-e811-44f7-b149-eec8465fa841          |
| image                                | cirros (e2f926f4-f466-4239-8444-178a30ae7744) |
| key_name                             | my_ssh_key                                    |
| name                                 | cirros-tt                                     |
| os-extended-volumes:volumes_attached | []                                            |
| progress                             | 0                                             |
| project_id                           | 3a75cae60c7c49b0b3e843e5949975bc              |
| properties                           |                                               |
| security_groups                      | [{u'name': u'default'}]                       |
| status                               | BUILD                                         |
| updated                              | 2021-11-06T07:50:13Z                          |
| user_id                              | c4d4838b317840c1b54ba1549a1010be              |
+--------------------------------------+-----------------------------------------------+

[root@controller ~]# openstack server list
+--------------------------------------+-----------+--------+---------------------+
| ID                                   | Name      | Status | Networks            |
+--------------------------------------+-----------+--------+---------------------+
| 64a7c56c-039f-47b4-be73-1561e3a1c3d5 | cirros-tt | ACTIVE | provider=10.0.0.153 |
+--------------------------------------+-----------+--------+---------------------+

# 实例的控制台访问地址
[root@controller ~]# openstack console url show cirros-tt
+-------+---------------------------------------------------------------------------------+
| Field | Value                                                                           |
+-------+---------------------------------------------------------------------------------+
| type  | novnc                                                                           |
| url   | http://controller:6080/vnc_auto.html?token=5306cd0e-fc1f-4481-a5e3-0ce45642f76a |
+-------+---------------------------------------------------------------------------------+

# 这里显示的controller 是控制节点的主机名,如果在windows中没有配置解析会访问不了。解决方法
方法一: 配置hosts解析
方法二:修改计算节点nova.conf配置文件
vi /etc/nova/nova.conf
... ... ...
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://10.0.0.11:6080/vnc_auto.html

systemctl restart openstack-nova-compute.service

推荐阅读