[root@minikube ~]# minikube start --driver=docker
[root@minikube ~]# minikube config set driver docker
[root@minikube ~]# minikube kubectl -- get po -A
[root@minikube ~]# kubectl get po -A
[root@controller ~]# cp kuryr-kubernetes/devstack/local.conf.sample devstack/local.conf
[root@controller ~]# cat devstack/local.conf
# Credentials
ADMIN_PASSWORD=tera123
DATABASE_PASSWORD=tera123
RABBIT_PASSWORD=tera123
SERVICE_PASSWORD=tera123
SERVICE_TOKEN=tera123
# Enable Keystone v3
IDENTITY_API_VERSION=3
# In pro of speed and being lightweight, we will be explicit in regards to
# which services we enable
SERVICE_HOST=192.168.4.220
MYSQL_HOST=192.168.4.220
RABBIT_HOST=192.168.4.220
GLANCE_HOSTPORT=192.168.4.220:9292
Q_HOST=192.168.4.220
KEYSTONE_AUTH_HOST=192.168.4.220
KEYSTONE_SERVICE_HOST=192.168.4.220
CEILOMETER_BACKEND=mongodb
DATABASE_TYPE=mysql
ENABLED_SERVICES=n-cpu,n-net,n-api-meta,c-vol
[root@controller ~]# vi devstack/stack.sh
SUPPORTED_DISTROS="bionic|focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel7"
if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
die $LINENO "If you wish to run this script anyway run with FORCE=yes"
fi
fi
6.11 执行Devstack脚本
执行Devsyack安装脚本,脚本执行完成后会输出安装信息以及操作系统的版本信息。
[root@controller ~]# ./devstack/stack.sh
DevStack Component Timing
(times are in seconds)
=========================
run_process 28
test_with_retry 4
apt-get-update 17
async_wait 0
osc 482
wait_for_service 14
dbsync 77
pip_install 98
apt-get 9
-------------------------
Unaccounted time 951
=========================
Total runtime 1680
This is your host IP address: 192.168.4.220
This is your host IPv6 address: ::1
Keystone is serving at http://192.168.4.220/identity/
The default users are: admin and demo
The password: tera123
Services are running under systemd unit files.
For more information see:
https://docs.openstack.org/devstack/latest/systemd.html
DevStack Version: wallaby
Change: 83821a11ac1d6738b63cb10878b8aaa02e153374 Merge "Address feedback from glance-remote patch" 2021-03-23 16:56:21 +0000
OS Version: CentOS Linux release 7.8.2003 (Core)
7 配置OpenStack-ironic
7.1 上传Deploy Images(控制节点)
镜像通过disk-image-create命令构建。
[root@controller ~]# glance image-create --name deploy-vmlinuz --visibility public --disk-format aki --container-format aki < coreos_production_pxe.vmlinuz
[root@controller ~]# glance image-create --name deploy-initrd --visibility public --disk-format ari --container-format ari < coreos_production_pxe_image-oem.cpio.gz
[root@controller ~]# glance image-create --name my-image.vmlinuz --visibility public --disk-format aki --container-format aki < my-image.vmlinuz
[root@controller ~]# glance image-create --name my-image.initrd --visibility public --disk-format ari --container-format ari < my-image.initrd
[root@controller ~]# export MY_VMLINUZ_UUID=$(openstack image list | awk '/my-image.vmlinuz/ { print $2 }')
[root@controller ~]# export MY_INITRD_UUID=$(openstack image list | awk '/my-image.initrd/ { print $2 }')
[root@controller ~]# glance image-create --name my-image --visibility \
public --disk-format qcow2 --container-format bare --property \
kernel_id=$MY_VMLINUZ_UUID --property ramdisk_id=$MY_INITRD_UUID < my-image.qcow2
为了OpenStack-Helm项目的快速部署、验证与研究,本次部署采用AIO(All in one)模式,因此需要注意的是,若要安装所有的功能模块至少保证服务器的内存资源不小于16G,否则可能会导致部署失败。
3.3 实施部署前的环境准备
进行实施部署前的环境准备工作,包括系统基础环境和网络代理配置。
配置Sudo免密:
noone@noone-virtual-machine:~$ chmod +w /etc/sudoers
noone@noone-virtual-machine:~$ sudo vim /etc/sudoers
noone@noone-virtual-machine:~$ sudo cat /etc/sudoers
#
# This file MUST be edited with the 'visudo' command as root.
#
# Please consider adding local content in /etc/sudoers.d/ instead of
# directly modifying this file.
#
# See the man page for details on how to write a sudoers file.
#
Defaults env_reset
Defaults mail_badpass
Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
# Host alias specification
# User alias specification
# Cmnd alias specification
# User privilege specification
root ALL=(ALL:ALL) ALL
# Members of the admin group may gain root privileges
%admin ALL=(ALL) ALL
# Allow members of group sudo to execute any command
%sudo ALL=(ALL:ALL) ALL
# See sudoers(5) for more information on "#include" directives:
noone ALL=(ALL) NOPASSWD:ALL
#includedir /etc/sudoers.d
noone@noone-virtual-machine:~$
配置Ubuntu的软件源列表、更新系统中的软件包:
noone@noone-virtual-machine:~$ cd /etc/apt/
noone@noone-virtual-machine:/etc/apt$ cat sources.list
deb http://mirrors.163.com/ubuntu/ bionic main restricted universe multiverse
deb http://mirrors.163.com/ubuntu/ bionic-security main restricted universe multiverse
deb http://mirrors.163.com/ubuntu/ bionic-updates main restricted universe multiverse
deb http://mirrors.163.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb http://mirrors.163.com/ubuntu/ bionic-backports main restricted universe multiverse
deb-src http://mirrors.163.com/ubuntu/ bionic main restricted universe multiverse
deb-src http://mirrors.163.com/ubuntu/ bionic-security main restricted universe multiverse
deb-src http://mirrors.163.com/ubuntu/ bionic-updates main restricted universe multiverse
deb-src http://mirrors.163.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb-src http://mirrors.163.com/ubuntu/ bionic-backports main restricted universe multiverse
noone@noone-virtual-machine:/etc/apt$ sudo apt update
noone@noone-virtual-machine:/etc/apt$ sudo apt upgrade
noone@noone-virtual-machine:~$ sudo vim /etc/privoxy/config
......
# Suppose you are running Privoxy on an IPv6-capable machine and
# you want it to listen on the IPv6 address of the loopback
# device:
#
# listen-address [::1]:8118
#
listen-address 127.0.0.1:8118
listen-address [::1]:8118
#
......
# To chain Privoxy and Tor, both running on the same system, you
# would use something like:
#
# forward-socks5t / 127.0.0.1:9050 .
#
注释:
此处的配置,将本地转发到8118端口的流量,转发到192.168.0.60:10808,此接口是局域网中代理软件提供的
forward-socks5 / 192.168.0.60:10808 .
#
# Note that if you got Tor through one of the bundles, you may
# have to change the port from 9050 to 9150 (or even another
# one). For details, please check the documentation on the Tor
# website.
#
# The public Tor network can't be used to reach your local
# network, if you need to access local servers you therefore
# might want to make some exceptions:
#
# forward 192.168.*.*/ .
# forward 10.*.*.*/ .
# forward 127.*.*.*/ .
#
forward 192.168.*.*/ .
forward 10.*.*.*/ .
forward 127.*.*.*/ .
forward 172.24.*.*/ .
#
......
# Examples:
#
# forwarded-connect-retries 1
#
forwarded-connect-retries 1
#
......
noone@noone-virtual-machine:/tmp$ wget www.google.com
--2021-03-24 10:21:45-- http://www.google.com/
Connecting to 127.0.0.1:8118... connected.
Proxy request sent, awaiting response... 200 OK
Length: 12823 (13K) [text/html]
Saving to: ‘index.html’
index.html 100%[=====>] 12.52K --.-KB/s in 0s
2021-03-24 10:21:47 (187 MB/s) - ‘index.html’ saved [12823/12823]
noone@noone-virtual-machine:/tmp$
修改NSSwitch配置文件指定行:
noone@noone-virtual-machine:~$ cat /etc/nsswitch.conf
# /etc/nsswitch.conf
#
# Example configuration of GNU Name Service Switch functionality.
# If you have the `glibc-doc-reference' and `info' packages installed, try:
# `info libc "Name Service Switch"' for information about this file.
……
hosts: files dns
……
noone@noone-virtual-machine:~$
[root@controller ~]# mysql -uroot -p
MariaDB [(none)]> CREATE DATABASE zun;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON zun.* TO 'zun'@'localhost' \
IDENTIFIED BY 'ZUN_PASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON zun.* TO 'zun'@'%' \
IDENTIFIED BY 'ZUN_PASS';
# 创建并配置管理网的网桥,并将智能网卡的管理网IP放到此网桥上
root@OCTEONTX:~# ovs-vsctl add-br br-m -- set bridge br-m datapath_type=netdev
root@OCTEONTX:~# ip add del dev eth4 192.168.5.45/24
root@OCTEONTX:~# ovs-vsctl add-port br-m eth4
root@OCTEONTX:~# ip link set dev br-m up
root@OCTEONTX:~# ip add add dev br-m 192.168.5.45/24
root@OCTEONTX:~# ip route add default via 192.168.5.1 dev br-m
# 创建并配置业务网的网桥,将智能网卡的物理网口eth0连接到此网桥上
root@OCTEONTX:~# ovs-vsctl add-br br-net -- set bridge br-net datapath_type=netdev
root@OCTEONTX:~# ovs-vsctl add-port br-net eth0 -- set Interface eth0 type=dpdk options:dpdk-devargs=0002:02:00.0
root@OCTEONTX:~# ip link set dev br-net up
4.1.5 在宿主机侧创建两台虚拟机,连接到智能网卡侧的业务网桥
# 将vhost-ec2004Y驱动上传到宿主机,并在screen中保持运行状态。
[root@asterfusion ~]# screen -S vhost-ec96
[root@asterfusion ~]# ./vhost-ec2004Y -l0-7 -w 0000:03:02.0 -w 0000:03:02.1 -- -s /tmp/vhost-user-0.sock -s /tmp/vhost-user-1.sock -c --mergeable 1 --stats 1 -t 0
使用组合键Ctrl + A,然后再使用Ctrl + D将screen保存到后台继续运行。
[root@asterfusion ~]# ls -lh /tmp/vhost-user-*
srwxrwxr-x 1 qemu qemu 0 Aug 23 14:33 /tmp/vhost-user-0.sock
srwxrwxr-x 1 qemu qemu 0 Aug 23 15:08 /tmp/vhost-user-1.sock
# 修改虚拟机的xml配置文件,添加一个vhost-user的虚拟网卡。
# centos-00:
<domain type='kvm'>
<name>centos-00</name>
<devices>
<emulator>/usr/libexec/qemu-kvm</emulator>
……
<interface type='vhostuser'>
<mac address='52:54:00:c7:ac:00'/>
<source type='unix' path='/tmp/vhost-user-0.sock' mode='server'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x03' slot='0x02' function='0x0'/>
</interface>
……
</devices>
</domain>
# centos-01:
<domain type='kvm'>
<name>centos-01</name>
<devices>
<emulator>/usr/libexec/qemu-kvm</emulator>
……
<interface type='vhostuser'>
<mac address='52:54:00:c7:ac:01'/>
<source type='unix' path='/tmp/vhost-user-1.sock' mode='server'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
……
</devices>
</domain>
# 创建两台CentOS7虚拟机并启动。
[root@asterfusion ~]# virsh define centos-00.xml
[root@asterfusion ~]# virsh define centos-01.xml
[root@asterfusion ~]# virsh start centos-00
[root@asterfusion ~]# virsh start centos-01
[root@asterfusion ~]# virsh list --all
Id Name State
----------------------------------------------------
13 centos-00 running
14 centos-01 running
# 将虚拟机连接到宿主机侧的管理网桥。
[root@asterfusion ~]# ip link add centos-00-m type veth peer name centos-00-m-s
[root@asterfusion ~]# ip link add centos-01-m type veth peer name centos-01-m-s
[root@asterfusion ~]# ovs-vsctl add-br br-m
[root@asterfusion ~]# ip link set dev br-m up
[root@asterfusion ~]# ip add add dev br-m 192.168.5.145/24
[root@asterfusion ~]# ip route add default via 192.168.5.1 dev br-m
[root@asterfusion ~]# ovs-vsctl add-port br-m centos-00-m-s
[root@asterfusion ~]# ovs-vsctl add-port br-m centos-01-m-s
[root@asterfusion ~]# virsh attach-interface centos-00 --type direct --source centos-00-m --config
[root@asterfusion ~]# virsh attach-interface centos-00 --type direct --source centos-00-m --live
[root@asterfusion ~]# virsh attach-interface centos-01 --type direct --source centos-01-m --config
[root@asterfusion ~]# virsh attach-interface centos-01 --type direct --source centos-01-m --live
[root@asterfusion ~]# ip link set dev centos-00-m up
[root@asterfusion ~]# ip link set dev centos-01-m up
[root@asterfusion ~]# ip link set dev centos-00-m-s up
[root@asterfusion ~]# ip link set dev centos-01-m-s up
# 分别给两台虚拟机配置业务IP。
# centos-00:
[root@centos-00 ~]# ip link set dev eth0 up
[root@centos-00 ~]# ip add add dev eth0 172.0.0.100/24
# centos-01:
[root@centos-01 ~]# ip link set dev eth0 up
[root@centos-01 ~]# ip add add dev eth0 172.0.0.200/24
# 分别给两台虚拟机配置管理IP。
# centos -00:
[root@centos-00 ~]# ip link set dev eth1 up
[root@centos-00 ~]# ip add add dev eth1 192.168.5.155/24
[root@centos-00 ~]# ip route add default via 192.168.5.1 dev eth1
# centos-01:
[root@centos-01 ~]# ip link set dev eth1 up
[root@centos-01 ~]# ip add add dev eth1 192.168.5.165/24
[root@centos-01 ~]# ip route add default via 192.168.5.1 dev eth1
# 在智能网卡侧将虚拟机使用的两个VF绑定到业务网桥br-net。
root@OCTEONTX:~# ovs-vsctl add-port br-net sdp1 -- set Interface sdp1 type=dpdk options:dpdk-devargs=0002:0f:00.2 mtu_request=9000
root@OCTEONTX:~# ovs-vsctl add-port br-net sdp2 -- set Interface sdp2 type=dpdk options:dpdk-devargs=0002:0f:00.3 mtu_request=9000
4.1.6 验证宿主机侧两台虚拟机的连通性
# 经过验证两台虚拟机能够经过智能网卡侧的网桥br-net正常通信。
# centos-00:
[root@centos-00 ~]# ping 172.0.0.200 -c 4
PING 172.0.0.200 (172.0.0.200) 56(84) bytes of data.
64 bytes from 172.0.0.200: icmp_seq=1 ttl=64 time=0.220 ms
64 bytes from 172.0.0.200: icmp_seq=2 ttl=64 time=0.164 ms
64 bytes from 172.0.0.200: icmp_seq=3 ttl=64 time=0.140 ms
64 bytes from 172.0.0.200: icmp_seq=4 ttl=64 time=0.132 ms
--- 172.0.0.200 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3000ms
rtt min/avg/max/mdev = 0.132/0.164/0.220/0.034 ms
[root@centos-00 ~]#
# centos-01:
[root@centos-01 ~]# ping 172.0.0.100 -c 4
PING 172.0.0.100 (172.0.0.100) 56(84) bytes of data.
64 bytes from 172.0.0.100: icmp_seq=1 ttl=64 time=0.159 ms
64 bytes from 172.0.0.100: icmp_seq=2 ttl=64 time=0.163 ms
64 bytes from 172.0.0.100: icmp_seq=3 ttl=64 time=0.179 ms
64 bytes from 172.0.0.100: icmp_seq=4 ttl=64 time=0.180 ms
--- 172.0.0.100 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 2999ms
rtt min/avg/max/mdev = 0.159/0.170/0.180/0.013 ms
[root@centos-01 ~]#
# 将卸载SSL的容器前端IP设为10.0.0.50/24,后端IP设为172.0.0.50/24。
root@OCTEONTX:~# ip link add nginx-host type veth peer name nginx-ovs
root@OCTEONTX:~# ip link set dev nginx-host up
root@OCTEONTX:~# ip link set dev nginx-ovs up
root@OCTEONTX:~# ip link add proxy-host type veth peer name proxy-ovs
root@OCTEONTX:~# ip link set dev proxy-host up
root@OCTEONTX:~# ip link set dev proxy-ovs up
root@OCTEONTX:~# ovs-vsctl add-port br-net proxy-host
root@OCTEONTX:~# ovs-vsctl add-port br-net proxy-ovs
root@OCTEONTX:~# ovs-vsctl add-br Proxy-B -- set bridge Proxy-B datapath_type=netdev
root@OCTEONTX:~# ovs-vsctl add-br Proxy-F -- set bridge Proxy-F datapath_type=netdev
root@OCTEONTX:~# ovs-vsctl add-port Proxy-B nginx-host
root@OCTEONTX:~# ovs-vsctl add-port Proxy-F proxy-host
root@OCTEONTX:~# ip link set dev Proxy-B up
root@OCTEONTX:~# ip link set dev Proxy-F up
root@OCTEONTX:~# ip add add dev Proxy-B 172.0.0.50/24
root@OCTEONTX:~# ip add add dev Proxy-F 10.0.0.50/24
[root@controller ~]# openstack compute service list
7.4 验证Endpoint
[root@controller ~]# openstack endpoint list
7.5 验证Neutron
[root@controller ~]# openstack network agent list
8 部署Ironic(控制节点、Bare Metal)
8.1 MySQL中创建库和权限(控制节点)
在数据库中创建Ironic库,并且授予用户访问库的权限。
[root@localhost ~]# mysql -uroot -p
MariaDB [(none)]> CREATE DATABASE ironic CHARACTER SET utf8;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON ironic.* TO 'ironic'@'localhost' identified by 'tera123';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON ironic.* TO 'ironic'@'%' identified by 'tera123';
[root@controller ~]# glance image-create --name deploy-vmlinuz --visibility public --disk-format aki --container-format aki < coreos_production_pxe.vmlinuz
[root@controller ~]# glance image-create --name deploy-initrd --visibility public --disk-format ari --container-format ari < coreos_production_pxe_image-oem.cpio.gz
[root@controller ~]# openstack server create --flavor my-baremetal-flavor \
--nic net-id=3793d3bd-5a26-4dd2-a637-007b8ed7c2b0 \
--image 08e111be-d256-4c43-bb07-ea65a1219f77 test
13.2 验证结果
[root@controller ~]# openstack server list
[root@controller ~]# openstack baremetal port show eb97e31d-5200-4f1a-beef-75a1c91cc1b6
[root@controller ~]# openstack port show baed8d7c-b6fc-48e8-8cc8-b16b7b55d4a2
[root@controller ~]# ssh root@192.168.4.236
14 常见问题
14.1 获取swift_temp_url报错
MissingAuthPlugin: An auth plugin is required to determine endpoint URL。因为我们选择了Direct的部署方式,裸机服务器的IPA会从Swift Object Storage将User Image拉到本地,在裸机端完成镜像注入,但是我们环境中没有部署Swift,所以需要改为ISCSI的部署方式。
PXE-E51: No DHCP or proxyDHCP offers were received。这是在为裸机MAC地址分配IP地址时发生的报错,主要原因是因为DHCP服务器与裸机网络不通,需要确保DHCP服务器与裸机之间可以通信。检查Neutron的配合文件,测试Provisioning网络的可用性。
[root@bare ~]# vi /etc/ironic/ironic.conf
[neutron]
cleaning_network=3793d3bd-5a26-4dd2-a637-007b8ed7c2b0
cleaning_network_security_groups=42263b5f-cdff-4374-a04c-506cc22eee70
provisioning_network=3793d3bd-5a26-4dd2-a637-007b8ed7c2b0
provisioning_network_security_groups=42263b5f-cdff-4374-a04c-506cc22eee70
14.6 获取Deploy Image报错
ERROR while preparing to deploy to node : MissingAuthPlugin: An auth plugin is required to determine endpoint URL,这是一个Bug。获取Image的时候MissingAuthPlugin是因为没有配置Glance。
[root@localhost ~]# mysql -uroot -p
MariaDB [(none)]> create database keystone;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' identified by 'tera123';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' identified by 'tera123';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'openstack' identified by 'tera123';
[root@localhost ~]# openstack project create --domain default \
--description "Service Project" service
[root@localhost ~]# openstack role create user
8.7 验证Keystone
[root@localhost ~]# openstack user list
+----------------------------------------------------------------+--------------+
| ID | Name |
| 57341d3e37eb4dc997624f9502495e44 | admin |
+-----------------------------------------------------------------+--------------+
[root@localhost ~]# mysql -uroot -p
MariaDB [(none)]> create database glance;
MariaDB [(none)]>GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' identified by 'tera123';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' identified by 'tera123';
MariaDB [(none)]>GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'openstack' identified by 'tera123';
[root@localhost ~]# mysql -uroot -p
MariaDB [(none)]> create database nova_api;
MariaDB [(none)]> create database nova;
MariaDB [(none)]> create database nova_cell0;
MariaDB [(none)]> create database placement;
MariaDB[(none)]>GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'openstack' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'openstack' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'openstack' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'openstack' identified by 'tera123';
[root@localhost ~]# vi /etc/httpd/conf.d/00-nova-placement-api.conf
Listen 8778
<VirtualHost *:8778>
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
WSGIProcessGroup nova-placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
WSGIDaemonProcess nova-placement-api processes=3 threads=1 user=nova group=nova
WSGIScriptAlias / /usr/bin/nova-placement-api
<IfVersion >= 2.4>
ErrorLogFormat "%M"
</IfVersion>
ErrorLog /var/log/nova/nova-placement-api.log
#SSLEngine On
#SSLCertificateFile ...
#SSLCertificateKeyFile ...
</VirtualHost>
Alias /nova-placement-api /usr/bin/nova-placement-api
<Location /nova-placement-api>
SetHandler wsgi-script
Options +ExecCGI
WSGIProcessGroup nova-placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
</Location>
10.6 同步数据库
导入nova-api、nova、cell0、placement库SQL。
[root@localhost ~]# su -s /bin/sh -c "nova-manage api_db sync" nova
[root@localhost ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
[root@localhost ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
[root@localhost ~]# su -s /bin/sh -c "nova-manage db sync" nova
[root@localhost ~]# openstack compute service list --service nova-compute
[root@localshot ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting computes from cell 'cell1': 54e6c270-7390-4390-8702-02b72874c5a7
Checking host mapping for compute host 'compute': 39d80423-6001-4036-a546-5287c1e93ec5
Creating host mapping for compute host 'compute': 39d80423-6001-4036-a546-5287c1e93ec5
Found 1 unmapped computes in cell: 54e6c270-7390-4390-8702-02b72874c5a7
OpenStack Networking plug-ins and agents:OpenStack通用代理是L3,DHCP和插件代理
12.1 MySQL中创建库和权限
在数据库中创建Neutron用户,并且授予用户访问库的权限。
[root@localhost ~]# mysql -uroot -p
MariaDB [(none)]> create database neutron;
MariaDB[(none)]>GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' identified by 'tera123';
MariaDB[(none)]>GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'openstack' identified by 'tera123';
ERROR oslo.messaging._drivers.impl_rabbit [-] [94fc1201-373b-451f-a9e2-46fc81b5de20] AMQP server on controller:5672 is unreachable: [Errno 111] ECONNREFUSED. Trying again in 24 seconds.: error: [Errno 111] ECONNREFUSED
ERROR oslo_db.sqlalchemy.engines [-] Database connection was found disconnected; reconnecting: DBConnectionError: (pymysql.err.OperationalError) (2013, ‘Lost connection to MySQL server during query’) [SQL: u’SELECT 1′] (Background on this error at: http://sqlalche.me/e/e3q8)
[root@localhost ~]# vi /etc/my.cnf
[mysqld]
wait_timeout=28800
7.2 新增节点报错
ERROR nova.scheduler.client.report [req-fb678c94-091f-4dd3-bd44-49068015a07e – – – – -] [req-a7dd9b65-4ef2-4822-8110-f4a311839683] Failed to create resource provider record in placement API for UUID 73542ad1-f32b-4ba8-a62c-98ec704234c3. Got 409: {“errors”: [{“status”: 409, “request_id”: “req-a7dd9b65-4ef2-4822-8110-f4a311839683”, “detail”: “There was a conflict when trying to complete your request.\n\n Conflicting resource provider name: compute already exists. “, “title”: “Conflict”}]}.
[root@localhost ~]# mysql -uroot -p
MariaDB [nova_api]> update resource_providers set uuid='4d9ed4b4-f3a2-4e5d-9d8e-2f657a844a04' where name='compute' and uuid='e131e7c4-f7db-4889-8c34-e750e7b129da';