OpenStack超级架构-1OpenStack部分
【摘要】 OpenStack部分主机规划主机名IPcq-kz-h-controller1-0-10.ang.local192.168.0.10cq-kz-h-controller2-0-11.ang.local192.168.0.20cq-kz-h-lvsvip1-0-12.ang.local192.168.0.30cq-kz-h-lvsvip2-0-13.ang.local192.168.0.40c...
OpenStack部分
主机规划
主机名 | IP |
---|---|
cq-kz-h-controller1-0-10.ang.local | 192.168.0.10 |
cq-kz-h-controller2-0-11.ang.local | 192.168.0.20 |
cq-kz-h-lvsvip1-0-12.ang.local | 192.168.0.30 |
cq-kz-h-lvsvip2-0-13.ang.local | 192.168.0.40 |
cq-kz-h-compute1-0-14.ang.local | 192.168.0.50 |
cq-kz-h-compute2-0-15.ang.local | 192.168.0.60 |
更改主机名与关闭防火墙
更改主机名
地区----机房简称----虚拟机(h)/物理机(v)----业务名称---ip地址---域名后缀 例子:cq-kz-v-taobao-lvs-0-100.xier.local hostnamectl set-hostname cq-kz-h-template-0-5.xier.local
关闭防火墙与selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config; systemctl stop firewalld; systemctl disable firewalld;setenforce 0
配置免密
sshpass -p000000 ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.20.20 "-o StrictHostKeyChecking=no"
安装基本工具
yum install vim iotop bc gcc gcc-c++ glibc glibc-devel pcre pcre-devel openssl openssl-devel zip unzip zlib-devel net-tools lrzsz tree telnet lsof tcpdump wget libevent libevent-devel bc systemd-devel bash-completion traceroute bridge-utils -y
配置hosts解析
cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.0.10 cq-kz-h-controller1-0-10.xier.local cq-kz-h-controller1-0-10 192.168.0.20 cq-kz-h-controller2-0-20.xier.local cq-kz-h-controller2-0-20 192.168.0.50 cq-kz-h-lvs1-0-50.xier.local cq-kz-h-lvs1-0-50 192.168.0.60 cq-kz-h-lvs2-0-60.xier.local cq-kz-h-lvs2-0-60 192.168.0.30 cq-kz-h-compute1-0-30.xier.local cq-kz-h-compute1-0-30 192.168.0.40 cq-kz-h-compute2-0-40.xier.local cq-kz-h-compute2-0-40 192.168.0.100 openstack-vip.ang.local
配置时间服务器
controller1节点
dnf install -y chrony vi /etc/chrony.conf server ntp6.aliyun.com iburst allow all local stratum 10 systemctl restart chronyd clock -w
其它所有节点
dnf install -y chrony vi /etc/chrony.conf server 192.168.0.10 iburst systemctl restart chronyd clock -w
controller所有节点MySQL主主架构
controller1与controller2节点
mysql下载地址:https://downloads.mysql.com/archives/community/
wget https://downloads.mysql.com/archives/get/p/23/file/mysql-5.7.26-linux-glibc2.12-x86_64.tar.gz tar xf mysql-5.7.26-linux-glibc2.12-x86_64.tar.gz useradd -s /sbin/nologin mysql mkdir /application/mysql -pv mkdir /data/mysql/data -pv mkdir /data/mysql/binlog -pv echo "PATH=/application/mysql/bin:$PATH" > /etc/profile.d/mysql.sh source /etc/profile.d/mysql.sh mv mysql-5.7.26-linux-glibc2.12-x86_64/* /application/mysql/ mysqld --initialize-insecure --user=mysql --basedir=/application/mysql --datadir=/data/mysql/data vim /etc/my.cnf [mysqld] user=mysql basedir=/application/mysql datadir=/data/mysql/data socket=/tmp/mysql.sock server_id=6 # data2设置为8,不一样即可 port=3306 log_bin=/data/mysql/binlog/mysql-bin character_set_server=utf8 [mysql] socket=/tmp/mysql.sock chown -R mysql.mysql /data/ cp /application/mysql/support-files/mysql.server /etc/init.d/mysqld service mysqld start yum install libncurses* -y mysql grant replication slave on *.* to repl@'%' identified by '123';
controller1与controller2节点互相操作
mysql> show master status; +------------------+----------+--------------+------------------+-------------------+ | File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set | +------------------+----------+--------------+------------------+-------------------+ | mysql-bin.000001 | 437 | | | | +------------------+----------+--------------+------------------+-------------------+ help change master to; CHANGE MASTER TO MASTER_HOST='10.0.0.17', # 注意对方之间的值 MASTER_USER='repl', MASTER_PASSWORD='123', MASTER_PORT=3306, MASTER_LOG_FILE='mysql-bin.000002', MASTER_LOG_POS=154;
部署rabbitmq与memcached
controller所有节点
dnf install -y centos-release-openstack-victoria [root@localhost yum.repos.d]# sed -i -e "s|mirrorlist=|#mirrorlist=|g" -e "s|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g" -e 's|http://vault.centos.org/[^/]*/[^/]*/|https://mirrors.aliyun.com/centos-vault/8.5.2111/|g' /etc/yum.repos.d/*
由于源的问题,使用华为源即可解决不能安装rabbitmq问题
dnf install wget -y wget -O /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-8-reg.repo dnf install -y rabbitmq-server memcached
controller1与controller2
systemctl enable --now rabbitmq-server
controller1
scp /var/lib/rabbitmq/.erlang.cookie root@192.168.0.20:/var/lib/rabbitmq/.erlang.cookie
controller2
systemctl restart rabbitmq-server
controller1与controller2
rabbitmq-plugins enable rabbitmq_management
controller2
# 1.停止服务 rabbitmqctl stop_app # 2.重置状态 rabbitmqctl reset # 3.节点加入 rabbitmqctl join_cluster rabbit@gz-zy-h-data1-0-16 # 4.重启服务 rabbitmqctl start_app 接触集群 # 1.停止服务 rabbitmqctl stop_app # 2.重置集群状态 rabbitmqctl reset # 3.重启服务 rabbitmqctl start_app
controller1与controller2
vim /etc/sysconfig/memcached PORT="11211" USER="memcached" MAXCONN="2048" CACHESIZE="1024" OPTIONS="-l 0.0.0.0,::1" systemctl enable --now memcached
controller1
rabbitmqctl add_user openstack openstack123 rabbitmqctl set_permissions openstack ".*" ".*" ".*"
lvsvip所有节点代理
配置邮件报警
lvsvip1与lvsvip2节点
dnf install -y ipvsadm keepalived vim /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } notification_email_from Alexandre.Cassen@firewall.loc smtp_server 127.0.0.1 smtp_connect_timeout 30 router_id cq-kz-h-lvsvip2-0-60.ang.local # 这里更改为自己的主机名 vrrp_skip_check_adv_addr vrrp_strict vrrp_garp_interval 0 vrrp_gna_interval 0 } include /etc/keepalived/conf.d/*.conf mkdir /etc/keepalived/conf.d/ vim /etc/keepalived/conf.d/mail.conf vrrp_instance mail { state BACKUP interface ens33 virtual_router_id 66 priority 100 # lvsvip2设置为80 advert_int 1 nopreempt virtual_ipaddress { 192.168.0.100/24 dev ens33 label ens33:1 } notify_master "/etc/keepalived/notify.sh master" notify_backup "/etc/keepalived/notify.sh backup" notify_fault "/etc/keepalived/notify.sh fault" } vim /etc/keepalived/notify.sh #!/bin/bash contact='2443599212@qq.com' notify() { mailsubject="$(hostname) to be $1, vip floating" # 发送标题 mailbody="$(date +'%F %T'): vrrp transition, $(hostname) changed to be $1" # 发送内容 echo "$mailbody" | mail -s "$mailsubject" $contact } case $1 in master) notify master ;; backup) notify backup ;; fault) notify fault ;; *) echo "Usage: $(basename $0) {master|backup|fault}" exit 1 ;; esac chmod +x /etc/keepalived/notify.sh dnf install -y mailx vim /etc/mail.rc set from=2443599212@qq.com set smtp=smtp.qq.com set smtp-auth-user=2443599212@qq.com set smtp-auth-password=rqqwhmnmumasebhe systemctl restart keepalived
配置lvs高可用并代理MySQL
controller1与controller2节点配置lvs环境
vim /root/lvs-dr-rs.sh #!/bin/bash # LVS DR默认初始化脚本 LVS_VIP=10.0.0.100/32 DEV=lo:1 # source /etc/rc.d/init.d/functions case "$1" in start) /sbin/ifconfig lo:0 $LVS_VIP netmask 255.255.255.255 # broadcast $LVS_VIP # /sbin/route add -host $LVS_VIP dev lo:0 echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce sysctl -p > /dev/null 2>&1 echo "RealServer RS Start OK" ;; stop) /sbin/ifconfig lo:0 down # /sbin/route del $LVS_VIP > /dev/null 2>&1 echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce sysctl -p > /dev/null 2>&1 echo "RealServer RS Stoped" ;; *) echo "Usage: $0 {start|stop}" exit 1 esac exit 0 # 启动脚本 bash lvs-dr-rs.sh start
lvsvip1与lvsvip2配置代理MySQL
# 需要一个传输层做铺垫 vim /etc/keepalived/conf.d/http.conf virtual_server 10.0.0.100 80 { delay_loop 6 lb_algo wrr lb_kind DR protocol TCP sorry_server 127.0.0.1 80 real_server 10.0.0.10 80 { weight 100 TCP_CHECK { connect_timeout 5 nb_get_retry 3 delay_before_retry 3 connect_port 80 } } real_server 10.0.0.11 80 { weight 1 TCP_CHECK { connect_timeout 5 nb_get_retry 3 delay_before_retry 3 connect_port 80 } } } vim /etc/keepalived/conf.d/mysql.conf virtual_server 10.0.0.100 3306 { delay_loop 3 lb_algo wrr lb_kind DR protocol TCP real_server 10.0.0.10 3306 { weight 10 HTTP_GET { connect_timeout 1 nb_get_retry 3 delay_before_retry 1 } } real_server 10.0.0.11 3306 { weight 1 HTTP_GET { connect_timeout 1 nb_get_retry 3 delay_before_retry 1 } } }
lvsvip1与lvsvip2配置代理rabbitmq
cd /etc/keepalived/conf.d vim rabbitmq.conf virtual_server 10.0.0.100 5672 { delay_loop 3 lb_algo wrr lb_kind DR protocol TCP real_server 10.0.0.10 5672 { weight 10 HTTP_GET { connect_timeout 1 nb_get_retry 3 delay_before_retry 1 } } real_server 10.0.0.11 5672 { weight 1 HTTP_GET { connect_timeout 1 nb_get_retry 3 delay_before_retry 1 } } } # web vim rabbitmq_dashboard.conf virtual_server 10.0.0.100 15672 { delay_loop 3 lb_algo wrr lb_kind DR protocol TCP real_server 10.0.0.10 15672 { weight 10 HTTP_GET { connect_timeout 1 nb_get_retry 3 delay_before_retry 1 } } real_server 10.0.0.11 15672 { weight 1 HTTP_GET { connect_timeout 1 nb_get_retry 3 delay_before_retry 1 } } }
lvsvip1与lvsvip2配置代理memcache
cd /etc/keepalived/conf.d cp rabbitmq.conf memcache.conf vim memcache.conf virtual_server 10.0.0.100 11211 { delay_loop 3 lb_algo wrr lb_kind DR protocol TCP real_server 10.0.0.10 11211 { weight 10 HTTP_GET { connect_timeout 1 nb_get_retry 3 delay_before_retry 1 } } real_server 10.0.0.11 11211 { weight 1 HTTP_GET { connect_timeout 1 nb_get_retry 3 delay_before_retry 1 } } }
部署keystone(controller1)
controller1创库授权
mysql CREATE DATABASE keystone; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone123';
controller1安装openstack相关软件
dnf install -y centos-release-openstack-victoria dnf install -y python3-openstackclient openstack-selinux crudini dnf install openstack-keystone httpd python3-mod_wsgi python3-PyMySQL python3-memcached -y
编辑配置文件
cp /etc/keystone/keystone.conf{,.bak} grep -Ev '^$|#' /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf crudini --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:keystone123@openstack-vip.xier.local/keystone crudini --set /etc/keystone/keystone.conf token provider fernet
同步数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone # 检查 mysql -ukeystone -pkeystone123 -e "use keystone; show tables"
初始化 Fernet 密钥存储库
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
初始化keystone
keystone-manage bootstrap --bootstrap-password admin \ --bootstrap-admin-url http://openstack-vip.xier.local:5000/v3/ \ --bootstrap-internal-url http://openstack-vip.xier.local:5000/v3/ \ --bootstrap-public-url http://openstack-vip.xier.local:5000/v3/ \ --bootstrap-region-id RegionOne
配置apache HTTP服务
vim /etc/httpd/conf/httpd.conf ServerName 192.168.0.10
创库软链接
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
启动服务
systemctl enable --now httpd.service
创建生成环境变量
cat > /etc/keystone/admin-openrc << EOF export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=admin export OS_AUTH_URL=http://openstack-vip.xier.local:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2 EOF
配置keystone的lvs规则代理并验证
lvsvip1
cd /etc/keepalived/conf.d cp mysql.conf keystone.conf sed -i "s/3306/5000/g" keystone.conf systemctl reload keepalived
创建域、项目
openstack domain create --description "An Example Domain" example openstack project create --domain default --description "Service Project" service
controller1验证
unset OS_AUTH_URL OS_PASSWORD # 输入密码admin openstack --os-auth-url http://openstack-vip.xier.local:5000/v3 \ --os-project-domain-name Default --os-user-domain-name Default \ --os-project-name admin --os-username admin token issue
部署glance
创库授权
mysql CREATE DATABASE glance; GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance123';
创建用户、角色、glance实体、镜像API
openstack user create --domain default --password glance glance openstack role add --project service --user glance admin openstack service create --name glance --description "OpenStack Image" image openstack endpoint create --region RegionOne image public http://openstack-vip.xier.local:9292 openstack endpoint create --region RegionOne image internal http://openstack-vip.xier.local:9292 openstack endpoint create --region RegionOne image admin http://openstack-vip.xier.local:9292
安装相关软件包
dnf install openstack-glance -y
编辑配置文件
cp /etc/glance/glance-api.conf{,.bak} grep -Ev '^$|#' /etc/glance/glance-api.conf.bak > /etc/glance/glance-api.conf crudini --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:glance123@openstack-vip.xier.local/glance crudini --set /etc/glance/glance-api.conf keystone_authtoken www_authenticate_uri http://openstack-vip.xier.local:5000 crudini --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://openstack-vip.xier.local:5000 crudini --set /etc/glance/glance-api.conf keystone_authtoken auth_type password crudini --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name Default crudini --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name Default crudini --set /etc/glance/glance-api.conf keystone_authtoken project_name service crudini --set /etc/glance/glance-api.conf keystone_authtoken username glance crudini --set /etc/glance/glance-api.conf keystone_authtoken password glance crudini --set /etc/glance/glance-api.conf paste_deploy flavor keystone crudini --set /etc/glance/glance-api.conf glance_store stores file,http crudini --set /etc/glance/glance-api.conf glance_store default_store file crudini --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/
同步数据库
su -s /bin/sh -c "glance-manage db_sync" glance
启动服务
systemctl enable --now openstack-glance-api.service
配置glance的lvs规则代理并验证
lvsvip1
cd /etc/keepalived/conf.d cp mysql.conf glance.conf sed -i "s/3306/9292/g" glance.conf systemctl reload keepalived
controller1验证
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img glance image-create --name "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility=public openstack image list
部署placement
创库授权
mysql CREATE DATABASE placement; GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement123';
创建用户、角色、API
openstack user create --domain default --password placement placement openstack role add --project service --user placement admin openstack service create --name placement --description "Placement API" placement openstack endpoint create --region RegionOne placement public http://openstack-vip.xier.local:8778 openstack endpoint create --region RegionOne placement internal http://openstack-vip.xier.local:8778 openstack endpoint create --region RegionOne placement admin http://openstack-vip.xier.local:8778
安装软件包
yum install openstack-placement-api -y
编辑配置文件
cp /etc/placement/placement.conf{,.bak} grep -Ev "^$|#" /etc/placement/placement.conf.bak > /etc/placement/placement.conf crudini --set /etc/placement/placement.conf placement_database connection mysql+pymysql://placement:placement123@openstack-vip.xier.local/placement crudini --set /etc/placement/placement.conf api auth_strategy keystone crudini --set /etc/placement/placement.conf keystone_authtoken memcached_servers openstack-vip.xier.local:11211 crudini --set /etc/placement/placement.conf keystone_authtoken auth_url http://openstack-vip.xier.local:5000/v3 crudini --set /etc/placement/placement.conf keystone_authtoken auth_type password crudini --set /etc/placement/placement.conf keystone_authtoken project_domain_name Default crudini --set /etc/placement/placement.conf keystone_authtoken user_domain_name Default crudini --set /etc/placement/placement.conf keystone_authtoken project_name service crudini --set /etc/placement/placement.conf keystone_authtoken username placement crudini --set /etc/placement/placement.conf keystone_authtoken password placement cat >> /etc/httpd/conf.d/00-placement-api.conf << EOF <Directory /usr/bin> <IfVersion >= 2.4> Require all granted </IfVersion> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> </Directory> EOF
同步数据库
su -s /bin/sh -c "placement-manage db sync" placement
启动服务
systemctl restart httpd
配置placement的lvs规则代理并验证
lvsvip1
cp /etc/keepalived/conf.d/mysql.conf /etc/keepalived/conf.d/placement.conf sed -i "s/3306/8778/g" /etc/keepalived/conf.d/placement.conf systemctl reload keepalived
controller1验证
placement-status upgrade check
部署nova
controller1节点
创库授权
CREATE DATABASE nova_api; CREATE DATABASE nova; CREATE DATABASE nova_cell0; GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova123'; GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova123'; GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
创建nova服务凭证
openstack user create --domain default --password nova nova openstack role add --project service --user nova admin openstack service create --name nova --description "OpenStack Compute" compute openstack endpoint create --region RegionOne compute public http://openstack-vip.xier.local:8774/v2.1 openstack endpoint create --region RegionOne compute internal http://openstack-vip.xier.local:8774/v2.1 openstack endpoint create --region RegionOne compute admin http://openstack-vip.xier.local:8774/v2.1
安装软件包
dnf install -y openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler
编辑配置文件
cp /etc/nova/nova.conf{,.bak} grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf crudini --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata crudini --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:nova123@openstack-vip.xier.local/nova_api crudini --set /etc/nova/nova.conf database connection mysql+pymysql://nova:nova123@openstack-vip.xier.local/nova crudini --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:openstack123@openstack-vip.xier.local:5672/ crudini --set /etc/nova/nova.conf api auth_strategy keystone crudini --set /etc/nova/nova.conf keystone_authtoken www_authenticate_uri http://openstack-vip.xier.local:5000/ crudini --set /etc/nova/nova.conf keystone_authtoken auth_url http://openstack-vip.xier.local:5000/ crudini --set /etc/nova/nova.conf keystone_authtoken memcached_servers openstack-vip.xier.local:11211 crudini --set /etc/nova/nova.conf keystone_authtoken auth_type password crudini --set /etc/nova/nova.conf keystone_authtoken project_domain_name Default crudini --set /etc/nova/nova.conf keystone_authtoken user_domain_name Default crudini --set /etc/nova/nova.conf keystone_authtoken project_name service crudini --set /etc/nova/nova.conf keystone_authtoken username nova crudini --set /etc/nova/nova.conf keystone_authtoken password nova crudini --set /etc/nova/nova.conf DEFAULT my_ip 10.0.0.10 crudini --set /etc/nova/nova.conf vnc enabled true crudini --set /etc/nova/nova.conf vnc server_listen '$my_ip' crudini --set /etc/nova/nova.conf vnc server_proxyclient_address '$my_ip' crudini --set /etc/nova/nova.conf glance api_servers http://openstack-vip.xier.local:9292 crudini --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp crudini --set /etc/nova/nova.conf placement region_name RegionOne crudini --set /etc/nova/nova.conf placement project_domain_name Default crudini --set /etc/nova/nova.conf placement project_name service crudini --set /etc/nova/nova.conf placement auth_type password crudini --set /etc/nova/nova.conf placement user_domain_name Default crudini --set /etc/nova/nova.conf placement auth_url http://openstack-vip.xier.local:5000/v3 crudini --set /etc/nova/nova.conf placement username placement crudini --set /etc/nova/nova.conf placement password placement
同步数据库
su -s /bin/sh -c "nova-manage api_db sync" nova su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova su -s /bin/sh -c "nova-manage db sync" nova # 验证 su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
启动服务
systemctl enable --now openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
配置nova的lvs规则代理并验证
lvsvip1
cp /etc/keepalived/conf.d/mysql.conf /etc/keepalived/conf.d/vnc.conf sed -i "s/3306/6080/g" /etc/keepalived/conf.d/vnc.conf cp /etc/keepalived/conf.d/mysql.conf /etc/keepalived/conf.d/nova.conf sed -i "s/3306/8774/g" /etc/keepalived/conf.d/nova.conf systemctl reload keepalived
compute1节点
安装软件包
dnf install -y centos-release-openstack-victoria dnf install -y crudini wget wget -O /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-8-reg.repo dnf install -y openstack-nova-compute
编辑配置文件
cp /etc/nova/nova.conf{,.bak} grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf crudini --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata crudini --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:openstack123@openstack-vip.xier.local crudini --set /etc/nova/nova.conf api auth_strategy keystone crudini --set /etc/nova/nova.conf keystone_authtoken www_authenticate_uri http://openstack-vip.xier.local:5000/ crudini --set /etc/nova/nova.conf keystone_authtoken auth_url http://openstack-vip.xier.local:5000/ crudini --set /etc/nova/nova.conf keystone_authtoken memcached_servers openstack-vip.xier.local:11211 crudini --set /etc/nova/nova.conf keystone_authtoken auth_type password crudini --set /etc/nova/nova.conf keystone_authtoken project_domain_name Default crudini --set /etc/nova/nova.conf keystone_authtoken user_domain_name Default crudini --set /etc/nova/nova.conf keystone_authtoken project_name service crudini --set /etc/nova/nova.conf keystone_authtoken username nova crudini --set /etc/nova/nova.conf keystone_authtoken password nova crudini --set /etc/nova/nova.conf DEFAULT my_ip 10.0.0.14 crudini --set /etc/nova/nova.conf vnc enabled true crudini --set /etc/nova/nova.conf vnc server_listen 0.0.0.0 crudini --set /etc/nova/nova.conf vnc server_proxyclient_address '$my_ip' crudini --set /etc/nova/nova.conf vnc novncproxy_base_url http://openstack-vip.xier.local:6080/vnc_auto.html crudini --set /etc/nova/nova.conf glance api_servers http://openstack-vip.xier.local:9292 crudini --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp crudini --set /etc/nova/nova.conf placement region_name RegionOne crudini --set /etc/nova/nova.conf placement project_domain_name Default crudini --set /etc/nova/nova.conf placement project_name service crudini --set /etc/nova/nova.conf placement auth_type password crudini --set /etc/nova/nova.conf placement user_domain_name Default crudini --set /etc/nova/nova.conf placement auth_url http://openstack-vip.xier.local:5000/v3 crudini --set /etc/nova/nova.conf placement username placement crudini --set /etc/nova/nova.conf placement password placement
启动服务
systemctl enable --now libvirtd.service openstack-nova-compute.service 尝试使用应用层
controller1主机发现
配置主机发现
openstack compute service list --service nova-compute su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova crudini --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300 systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
部署neutron
创库授权
CREATE DATABASE neutron; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron123';
创建用户、角色、API
openstack user create --domain default --password neutron neutron openstack role add --project service --user neutron admin openstack service create --name neutron --description "OpenStack Networking" network openstack endpoint create --region RegionOne network public http://openstack-vip.xier.local:9696 openstack endpoint create --region RegionOne network internal http://openstack-vip.xier.local:9696 openstack endpoint create --region RegionOne network admin http://openstack-vip.xier.local:9696
安装软件
dnf install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
编辑配置文件
cp /etc/neutron/neutron.conf{,.bak} grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf crudini --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:neutron123@openstack-vip.xier.local/neutron crudini --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2 crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins router crudini --set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips true crudini --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:openstack123@openstack-vip.xier.local crudini --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone crudini --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://openstack-vip.xier.local:5000 crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://openstack-vip.xier.local:5000 crudini --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers openstack-vip.xier.local:11211 crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_type password crudini --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default crudini --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default crudini --set /etc/neutron/neutron.conf keystone_authtoken project_name service crudini --set /etc/neutron/neutron.conf keystone_authtoken username neutron crudini --set /etc/neutron/neutron.conf keystone_authtoken password neutron crudini --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes true crudini --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes true crudini --set /etc/neutron/neutron.conf nova auth_url http://openstack-vip.xier.local:5000 crudini --set /etc/neutron/neutron.conf nova auth_type password crudini --set /etc/neutron/neutron.conf nova project_domain_name default crudini --set /etc/neutron/neutron.conf nova user_domain_name default crudini --set /etc/neutron/neutron.conf nova region_name RegionOne crudini --set /etc/neutron/neutron.conf nova project_name service crudini --set /etc/neutron/neutron.conf nova username nova crudini --set /etc/neutron/neutron.conf nova password nova crudini --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak} grep -Ev "^$|#" /etc/neutron/plugins/ml2/ml2_conf.ini.bak > /etc/neutron/plugins/ml2/ml2_conf.ini crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan,vxlan crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge,l2population crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks extnet crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 1:1000 crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset true cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak} grep -Ev "^$|#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings extnet:eth0 crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 10.0.0.10 crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true cat >> /etc/sysctl.conf << EOF net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 EOF modprobe br_netfilter sysctl -p cp /etc/neutron/l3_agent.ini{,.bak} grep -Ev "^$|#" /etc/neutron/l3_agent.ini.bak > /etc/neutron/l3_agent.ini crudini --set /etc/neutron/l3_agent.ini DEFAULT interface_driver linuxbridge cp /etc/neutron/dhcp_agent.ini{,.bak} grep -Ev "^$|#" /etc/neutron/dhcp_agent.ini.bak > /etc/neutron/dhcp_agent.ini crudini --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver linuxbridge crudini --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata true cp /etc/neutron/metadata_agent.ini{,.bak} grep -Ev "^$|#" /etc/neutron/metadata_agent.ini.bak > /etc/neutron/metadata_agent.ini crudini --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_host 10.0.0.10 crudini --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret ang crudini --set /etc/nova/nova.conf neutron auth_url http://openstack-vip.xier.local:5000 crudini --set /etc/nova/nova.conf neutron auth_type password crudini --set /etc/nova/nova.conf neutron project_domain_name default crudini --set /etc/nova/nova.conf neutron user_domain_name default crudini --set /etc/nova/nova.conf neutron region_name RegionOne crudini --set /etc/nova/nova.conf neutron project_name service crudini --set /etc/nova/nova.conf neutron username neutron crudini --set /etc/nova/nova.conf neutron password neutron crudini --set /etc/nova/nova.conf neutron service_metadata_proxy true crudini --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret ang ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
启动重启相关服务
systemctl restart openstack-nova-api.service systemctl enable --now neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service
配置neutron的lvs规则代理
lvsvip1
cp /etc/keepalived/conf.d/mysql.conf /etc/keepalived/conf.d/neutron.conf sed -i "s/3306/9696/g" /etc/keepalived/conf.d/neutron.conf systemctl reload keepalived
compute1节点部署neutron
compute1安装软件
yum install openstack-neutron-linuxbridge ebtables ipset -y
配置文件
cp /etc/neutron/neutron.conf{,.bak} grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf crudini --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:openstack123@openstack-vip.xier.local crudini --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone crudini --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://openstack-vip.xier.local:5000 crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://openstack-vip.xier.local:5000 crudini --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers openstack-vip.xier.local:11211 crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_type password crudini --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default crudini --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default crudini --set /etc/neutron/neutron.conf keystone_authtoken project_name service crudini --set /etc/neutron/neutron.conf keystone_authtoken username neutron crudini --set /etc/neutron/neutron.conf keystone_authtoken password neutron crudini --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak} grep -Ev "^$|#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings extner:eth0 crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 10.0.0.14 crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver cat >> /etc/sysctl.conf << EOF net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 EOF modprobe br_netfilter sysctl -p crudini --set /etc/nova/nova.conf neutron auth_url http://openstack-vip.xier.local:5000 crudini --set /etc/nova/nova.conf neutron auth_type password crudini --set /etc/nova/nova.conf neutron project_domain_name default crudini --set /etc/nova/nova.conf neutron user_domain_name default crudini --set /etc/nova/nova.conf neutron region_name RegionOne crudini --set /etc/nova/nova.conf neutron project_name service crudini --set /etc/nova/nova.conf neutron username neutron crudini --set /etc/nova/nova.conf neutron password neutron
重启启动相关服务
systemctl restart openstack-nova-compute.service systemctl enable --now neutron-linuxbridge-agent.service
验证
openstack network agent list
部署dashboard
安装软件
yum install openstack-dashboard -y
配置文件
vim /etc/openstack-dashboard/local_settings # -*- coding: utf-8 -*- # ---------------------------------------------------------------------- # NOTE: The default values of the settings are defined in # openstack_dashboard/defaults.py. Previously most available settings # were listed in this example file, but it is no longer true. # For available settings, see openstack_dashboard/defaults.py and # the horizon setting reference found at # https://docs.openstack.org/horizon/latest/configuration/settings.html. # # Django related settings and HORIZON_CONFIG still exist here. # Keep in my mind that they will be revisit in upcoming releases. # ---------------------------------------------------------------------- import os from django.utils.translation import ugettext_lazy as _ from openstack_dashboard.settings import HORIZON_CONFIG DEBUG = False # This setting controls whether or not compression is enabled. Disabling # compression makes Horizon considerably slower, but makes it much easier # to debug JS and CSS changes #COMPRESS_ENABLED = not DEBUG # This setting controls whether compression happens on the fly, or offline # with `python manage.py compress` # See https://django-compressor.readthedocs.io/en/latest/usage/#offline-compression # for more information #COMPRESS_OFFLINE = not DEBUG # If horizon is running in production (DEBUG is False), set this # with the list of host/domain names that the application can serve. # For more information see: # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = ["*"] # Set SSL proxy settings: # Pass this header from the proxy after terminating the SSL, # and don't forget to strip it from the client's request. # For more information see: # https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header #SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # If Horizon is being served through SSL, then uncomment the following two # settings to better secure the cookies from security exploits #CSRF_COOKIE_SECURE = True #SESSION_COOKIE_SECURE = True # If provided, a "Report Bug" link will be displayed in the site header # which links to the value of this setting (ideally a URL containing # information on how to report issues). #HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com" # Show backdrop element outside the modal, do not close the modal # after clicking on backdrop. #HORIZON_CONFIG["modal_backdrop"] = "static" # Specify a regular expression to validate user passwords. #HORIZON_CONFIG["password_validator"] = { # "regex": '.*', # "help_text": _("Your password does not meet the requirements."), #} # Turn off browser autocompletion for forms including the login form and # the database creation workflow if so desired. #HORIZON_CONFIG["password_autocomplete"] = "off" # Setting this to True will disable the reveal button for password fields, # including on the login form. #HORIZON_CONFIG["disable_password_reveal"] = False LOCAL_PATH = '/tmp' # Set custom secret key: # You can either set it to a specific value or you can let horizon generate a # default secret key that is unique on this machine, e.i. regardless of the # amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, # there may be situations where you would want to set this explicitly, e.g. # when multiple dashboard instances are distributed on different machines # (usually behind a load-balancer). Either you have to make sure that a session # gets all requests routed to the same dashboard instance or you set the same # SECRET_KEY for all of them. SECRET_KEY='e53a2174d8aa1ad8e169' # We recommend you use memcached for development; otherwise after every reload # of the django development server, you will have to login again. To use # memcached set CACHES to something like below. # For more information, see # https://docs.djangoproject.com/en/1.11/topics/http/sessions/. CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'openstack-vip.xier.local:11211', }, } # If you use ``tox -e runserver`` for developments,then configure # SESSION_ENGINE to django.contrib.sessions.backends.signed_cookies # as shown below: #SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies' SESSION_ENGINE = 'django.contrib.sessions.backends.cache' # Send email to the console by default EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # Or send them to /dev/null #EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' # Configure these for your outgoing email host #EMAIL_HOST = 'smtp.my-company.com' #EMAIL_PORT = 25 #EMAIL_HOST_USER = 'djangomail' #EMAIL_HOST_PASSWORD = 'top-secret!' OPENSTACK_HOST = "10.0.0.10" OPENSTACK_KEYSTONE_URL = "http://%s/identity/v3" % OPENSTACK_HOST OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True OPENSTACK_API_VERSIONS = { "identity": 3, "image": 2, "volume": 3, } OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default" OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user" OPENSTACK_NEUTRON_NETWORK = { 'enable_router': True, 'enable_quotas': True, 'enable_distributed_router': True, 'enable_ha_router': True, 'enable_lb': True, 'enable_firewall': True, 'enable_vpn': True, 'enable_fip_topology_check': True, } WEBROOT = "/dashboard" # The timezone of the server. This should correspond with the timezone # of your entire OpenStack installation, and hopefully be in UTC. TIME_ZONE = "Asia/Shanghai" # Change this patch to the appropriate list of tuples containing # a key, label and static directory containing two files: # _variables.scss and _styles.scss #AVAILABLE_THEMES = [ # ('default', 'Default', 'themes/default'), # ('material', 'Material', 'themes/material'), # ('example', 'Example', 'themes/example'), #] LOGGING = { 'version': 1, # When set to True this will disable all logging except # for loggers specified in this configuration dictionary. Note that # if nothing is specified here and disable_existing_loggers is True, # django.db.backends will still log unless it is disabled explicitly. 'disable_existing_loggers': False, # If apache2 mod_wsgi is used to deploy OpenStack dashboard # timestamp is output by mod_wsgi. If WSGI framework you use does not # output timestamp for logging, add %(asctime)s in the following # format definitions. 'formatters': { 'console': { 'format': '%(levelname)s %(name)s %(message)s' }, 'operation': { # The format of "%(message)s" is defined by # OPERATION_LOG_OPTIONS['format'] 'format': '%(message)s' }, }, 'handlers': { 'null': { 'level': 'DEBUG', 'class': 'logging.NullHandler', }, 'console': { # Set the level to "DEBUG" for verbose output logging. 'level': 'DEBUG' if DEBUG else 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'console', }, 'operation': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'operation', }, }, 'loggers': { 'horizon': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'horizon.operation_log': { 'handlers': ['operation'], 'level': 'INFO', 'propagate': False, }, 'openstack_dashboard': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'novaclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'cinderclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'keystoneauth': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'keystoneclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'glanceclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'neutronclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'swiftclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'oslo_policy': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'openstack_auth': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'django': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, # Logging from django.db.backends is VERY verbose, send to null # by default. 'django.db.backends': { 'handlers': ['null'], 'propagate': False, }, 'requests': { 'handlers': ['null'], 'propagate': False, }, 'urllib3': { 'handlers': ['null'], 'propagate': False, }, 'chardet.charsetprober': { 'handlers': ['null'], 'propagate': False, }, 'iso8601': { 'handlers': ['null'], 'propagate': False, }, 'scss': { 'handlers': ['null'], 'propagate': False, }, }, } # 'direction' should not be specified for all_tcp/udp/icmp. # It is specified in the form. SECURITY_GROUP_RULES = { 'all_tcp': { 'name': _('All TCP'), 'ip_protocol': 'tcp', 'from_port': '1', 'to_port': '65535', }, 'all_udp': { 'name': _('All UDP'), 'ip_protocol': 'udp', 'from_port': '1', 'to_port': '65535', }, 'all_icmp': { 'name': _('All ICMP'), 'ip_protocol': 'icmp', 'from_port': '-1', 'to_port': '-1', }, 'ssh': { 'name': 'SSH', 'ip_protocol': 'tcp', 'from_port': '22', 'to_port': '22', }, 'smtp': { 'name': 'SMTP', 'ip_protocol': 'tcp', 'from_port': '25', 'to_port': '25', }, 'dns': { 'name': 'DNS', 'ip_protocol': 'tcp', 'from_port': '53', 'to_port': '53', }, 'http': { 'name': 'HTTP', 'ip_protocol': 'tcp', 'from_port': '80', 'to_port': '80', }, 'pop3': { 'name': 'POP3', 'ip_protocol': 'tcp', 'from_port': '110', 'to_port': '110', }, 'imap': { 'name': 'IMAP', 'ip_protocol': 'tcp', 'from_port': '143', 'to_port': '143', }, 'ldap': { 'name': 'LDAP', 'ip_protocol': 'tcp', 'from_port': '389', 'to_port': '389', }, 'https': { 'name': 'HTTPS', 'ip_protocol': 'tcp', 'from_port': '443', 'to_port': '443', }, 'smtps': { 'name': 'SMTPS', 'ip_protocol': 'tcp', 'from_port': '465', 'to_port': '465', }, 'imaps': { 'name': 'IMAPS', 'ip_protocol': 'tcp', 'from_port': '993', 'to_port': '993', }, 'pop3s': { 'name': 'POP3S', 'ip_protocol': 'tcp', 'from_port': '995', 'to_port': '995', }, 'ms_sql': { 'name': 'MS SQL', 'ip_protocol': 'tcp', 'from_port': '1433', 'to_port': '1433', }, 'mysql': { 'name': 'MYSQL', 'ip_protocol': 'tcp', 'from_port': '3306', 'to_port': '3306', }, 'rdp': { 'name': 'RDP', 'ip_protocol': 'tcp', 'from_port': '3389', 'to_port': '3389', }, } # Help URL can be made available for the client. To provide a help URL, edit the # following attribute to the URL of your choice. #HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org"
重启服务
systemctl restart httpd
配置dashboard的lvs代理
lvsvip1
cat dashboard.conf virtual_server 10.0.0.100 80 { delay_loop 3 lb_algo wrr lb_kind DR protocol TCP real_server 10.0.0.10 80 { weight 100 connect_timeout 1 nb_get_retry 3 delay_before_retry 1 } } real_server 10.0.0.11 80 { weight 1 HTTP_GET { url { path /index.html status_code 200 } connect_timeout 1 nb_get_retry 3 delay_before_retry 1 } } } systemctl reload keepalived
部署controller2完成高可用
keystone
controller2安装openstack基本服务
dnf install -y python3-openstackclient openstack-selinux crudini dnf install openstack-keystone httpd python3-mod_wsgi python3-PyMySQL python3-memcached -y
controller1节点拷贝配置文件
cd /etc/keystone/ tar zcvf keystone-controller.tar.gz ./* scp keystone-controller.tar.gz root@10.0.0.11:/etc/keystone/keystone-controller.tar.gz
controller2节点
tar zxvf /etc/keystone/keystone-controller.tar.gz -C /etc/keystone/ # 如果有不同的改为自己的地址 grep 10.0.0.10 /etc/keystone/* -R vim /etc/httpd/conf/httpd.conf ServerName 172.31.7.102:80 ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
启动服务
systemctl enable --now httpd.service
glance
安装服务
dnf install openstack-glance -y
controller1拷贝配置文件
cd /etc/glance/ tar cvf glance-controller1.tar.gz ./* scp glance-controller1.tar.gz root@10.0.0.11:/etc/glance/
controller2节点
tar xvf /etc/glance/glance-controller1.tar.gz -C /etc/glance/ grep 10.0.0.10 /etc/glance/* -R
启动服务
systemctl enable --now openstack-glance-api.service
placement
安装软件
yum install openstack-placement-api -y
controller1拷贝配置文件
cd /etc/placement/ tar cvf placement-controller1.tar.gz ./* scp placement-controller1.tar.gz root@10.0.0.11:/etc/placement/
controller2节点
tar xvf /etc/placement/placement-controller1.tar.gz -C /etc/placement/ grep 10.0.0.10 /etc/placement/* -R cat >> /etc/httpd/conf.d/00-placement-api.conf << EOF <Directory /usr/bin> <IfVersion >= 2.4> Require all granted </IfVersion> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> </Directory> EOF
启动服务
systemctl restart httpd
nova
安装软件
dnf install -y openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler
controller1拷贝配置文件
cd /etc/nova/ tar cvf nova-controller.tar.gz ./* scp nova-controller.tar.gz root@10.0.0.11:/etc/nova/
controller2节点
tar xvf /etc/nova/nova-controller.tar.gz -C /etc/nova/ grep 10.0.0.10 /etc/nova/* -R sed -i "s/10.0.0.10/10.0.0.11/g" /etc/nova/nova.conf
启动服务
systemctl enable --now openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
验证
nova service-list
neutron
安装软件
dnf install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
controller1拷贝配置文件
cd /etc/neutron/ tar cvf neutron-controller.tar.gz ./* scp neutron-controller.tar.gz root@10.0.0.11:/etc/neutron/
controller2节点
tar xvf /etc/neutron/neutron-controller.tar.gz -C /etc/neutron/ grep 10.0.0.10 /etc/neutron/* -R cat >> /etc/sysctl.conf << EOF net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 EOF
启动服务
systemctl enable --now neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service sysctl -p
验证
neutron agent-list
dashboard
安装软件
yum install openstack-dashboard -y
controller1拷贝配置文件
cd /etc/openstack-dashboard/ tar cvf dashboard-controller.tar.gz ./* scp dashboard-controller.tar.gz root@10.0.0.11:/etc/openstack-dashboard/
controller2节点
tar xvf /etc/openstack-dashboard/dashboard-controller.tar.gz -C /etc/openstack-dashboard/ grep 10.0.0.10 /etc/openstack-dashboard/* -R sed -i "s/10.0.0.10/10.0.0.11/g" /etc/openstack-dashboard/local_settings
启动服务
systemctl restart httpd
快速添加计算节点
nova与neutron
在compute1准备好配置文件
cd /etc/nova/ tar cvf nova-compute.tar.gz ./* cd /etc/neutron/ tar cvf neutron-compute.tar.gz ./* scp /etc/nova/nova-compute.tar.gz root@10.0.0.15:/root scp /etc/neutron/neutron-compute.tar.gz root@10.0.0.15:/root
conpute2编写脚本
cat >> /root/compute-install.sh << EOF #!/bin/bash # 执行脚本请将压缩包放入到root目录下 IP=`ifconfig | awk 'NR==2' | awk '{print $2}'` # 初始化 echo "开始安装源....." sleep 2 dnf install -y centos-release-openstack-victoria echo "源安装完成,正在安装基本软件....." sleep 2 dnf install -y crudini wget echo "正在配置华为源....." sleep 2 wget -O /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-8-reg.repo # 安装配置nova服务 echo "正在配置nova服务...." sleep 2 echo "安装nova服务" sleep 2 dnf install -y openstack-nova-compute echo "解压nova配置文件中....." sleep 2 tar xvf /root/nova-compute.tar.gz -C /etc/nova/ echo "替换配置文件对应本节点....." sleep 2 sed -i "s/10.0.0.14/${IP}/g" /etc/nova/nova.conf echo "启动nova服务中....." sleep 2 systemctl enable --now libvirtd.service openstack-nova-compute.service # 安装配置neutron服务 echo "正在配置neutron服务....." sleep 2 echo "安装neutron服务" sleep 2 yum install openstack-neutron-linuxbridge ebtables ipset -y echo "解压neutron配置文件中....." sleep 2 tar xvf /root/neutron-compute.tar.gz -C /etc/neutron/ echo "正在更改配置文件....." sleep 2 sed -i "s/10.0.0.14/${IP}/g" /etc/neutron/plugins/ml2/linuxbridge_agent.ini echo "启动neutron中....." sleep 2 systemctl restart openstack-nova-compute.service; systemctl enable --now neutron-linuxbridge-agent.service sleep 1 echo "本计算节点已成功添加到OpenStack中" EOF sh compute-install.sh
手机端演示
openstack优化配置
配置虚拟机自启动(计算节点)
# 计算节点 /etc/nova/nova.conf 进行以下配置: [DEFAULT] resume_guests_state_on_host_boot=true
配置CPU超限使用(计算节点)
# 默认为16,即允许开启16倍于物理cpu的虚拟cpu个数 [DEFAULT] cpu_allocation_ratio=16
配置内存超限使用(计算节点)
# 配置允许1.5倍于物理内存的虚拟内存 [DEFAULT] ram_allocation_ratio=1.5
配置保留磁盘空间(计算节点)
# 即会预留一部分磁盘空间给系统使用 [DEFAULT] reserved_host_disk_mb=20480
配置预留内存给系统使用(计算节点)
# 即预留一定的内存给系统使用 [DEFAULT] reserved_host_memory_mb=4096
配置虚拟机类型动态调整(计算节点)
# 有些时候,创建完成的虚拟机因业务需求需要变更内存或cpu或磁盘,因此需要配置允许后期类型调整 [DEFAULT] allow_resize_to_same_host=true # 允许调整
openstack跨主机迁移/类型调整(控制节点对计算节点做免密)
usermod nova -s /bin/bash echo 123456 | passwd --stdin nova su - nova ssh-keygen # 一直回车即可 ssh-copy-id nova@10.0.0.14 ssh-copy-id nova@10.0.0.15
OpenStack-API
控制节点安装python3环境和相关模块
# 安装python3
yum install -y python3
# 安装API需要的模块
pip3 install requests -i https://pypi.douban.com/simple
mkdir openstack-api
cd openstack-api
动态获取token
所有API资源创建都需要通过token认证
vim auth.py
#!/usr/bin/python3
import json, requests
# 定义全局路径
headers = {}
headers["Content-Type"] = "application/json"
os_auth_url = 'http://10.0.0.100:5000/v3/auth/tokens' # 填入controller节点IP
# 多步骤身份验证(2-Factor密码和TOTP)定义请求头
body = {
"auth": { #授权
"identity": { #身份
"methods": ["password"], #验证方法:密码
"password": {
"user": {
"id": "b09096c15b814053bf25264a3dbe5366", #根据controller的user_id
"password": "000000"
}
}
},
"scope": { #范围
"project": {
"id": "93ad92c3e318431099327595bf689acc" #根据controller的project_id
}
}
}
}
#获取token值
def get_token():
reponse = requests.post(os_auth_url, data=json.dumps(body), headers=headers).headers["X-Subject-Token"] # dumps序列化回服务端
return reponse
创建flavor(云主机类型)
vim flavor.py
#!/usr/bin/python3
import json, requests
from auth import get_token
headers = {}
headers['X-Auth-Token'] = get_token()
url = "http://10.0.0.100:8774/v2.1/flavors"
body = {
"flavor": {
"name": "C2_2G_20G",
"vcpus": 2,
"ram": 2048,
"disk": 20,
}
}
def create_flavor():
re = requests.post(url, data=json.dumps(body), headers=headers).json()
print(re)
def main_flavor():
re = requests.get(url, headers=headers).json()
name = re['flavors']
for flavor_name in name:
print(flavor_name["name"])
if flavor_name["name"] == body["flavor"]["name"]:
requests.delete(url + f"/{flavor_name['id']}", headers=headers)
print('....已删除同名云主机类型...正在新建中')
else:
create_flavor()
print('云主机类型创建成功...')
if __name__ == '__main__':
main_flavor()
创建network(网络)
vxlan网络
vim network_vxlan.py
#!/usr/bin/python3
import json, requests
from auth import get_token
class networks_vxlan:
headers = {}
headers['X-Auth-Token'] = get_token()
headers['Content-Type'] = 'application/json'
url = "http://10.0.0.100:9696/v2.0/"
body = {
"network": {
"name": "intnet_vxlan",
'provider:network_type': 'vxlan',
'router:external': False,
'shared': True,
}
}
def intnet_vxlan(self):
re = requests.post(self.url + "networks", data=json.dumps(self.body), headers=self.headers).json()
print(re)
def intsubnet_vxlan(self):
intnet = requests.get(self.url + "networks", headers=self.headers).json()["networks"]
for net_id in intnet:
if net_id['name'] == self.body['network']['name']:
intnet_id = net_id['id']
body = {
"subnet": {
"name": "intsubnet-vxlan",
'network_id': intnet_id,
'cidr': '166.66.66.0/24',
'gateway_ip': '166.66.66.1',
'ip_version': 4,
'dns_nameservers': ['223.6.6.6'],
}
}
re = requests.post(self.url + "subnets", data=json.dumps(body), headers=self.headers).json()
print(re)
def main_network(self):
re = requests.get(self.url + "networks", headers=self.headers).json()
name = re["networks"]
for net_name in name:
print(net_name['name'])
if net_name['name'] == self.body['network']['name']:
requests.delete(self.url + f"networks/{net_name['id']}", headers=self.headers)
print('删除同名网络....新建网络中...')
else:
self.intnet_vxlan()
self.intsubnet_vxlan()
print('新建网络成功')
if __name__ == '__main__':
net = networks_vxlan()
net.main_network()
flat网络
vim network_flat.py
#!/usr/bin/python3
import json, requests
from auth import get_token
class networks_flat:
headers = {}
headers['X-Auth-Token'] = get_token()
headers['Content-Type'] = 'application/json'
url = "http://10.0.0.100:9696/v2.0/"
body = {
"network": {
"name": "extnet_flat",
'provider:network_type': 'flat',
'provider:physical_network': 'extnal',
'router:external': True,
'shared': False,
}
}
def extnet_flat(self):
re = requests.post(self.url + "networks", data=json.dumps(self.body), headers=self.headers).json()
print(re)
def extsubnet_flat(self):
intnet = requests.get(self.url + "networks", headers=self.headers).json()["networks"]
for net_id in intnet:
if net_id['name'] == self.body['network']['name']:
intnet_id = net_id['id']
body = {
"subnet": {
"name": "extsubnet_flat",
'network_id': intnet_id,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.254',
'ip_version': 4,
'dns_nameservers': ['223.6.6.6'],
'allocation_pools': [
{
'start': '10.0.0.30',
'end': '10.0.0.240'
}
]
},
}
re = requests.post(self.url + "subnets", data=json.dumps(body), headers=self.headers).json()
print(re)
def main_network(self):
re = requests.get(self.url + "networks", headers=self.headers).json()
name = re["networks"]
for net_name in name:
print(net_name['name'])
if net_name['name'] == self.body['network']['name']:
requests.delete(self.url + f"networks/{net_name['id']}", headers=self.headers)
print('删除同名网络....新建网络中...')
else:
self.extnet_flat()
self.extsubnet_flat()
print('新建网络成功')
if __name__ == '__main__':
net = networks_flat()
net.main_network()
创建router(路由器)
vim router.py
#!/usr/bin/python3
import json, requests
from auth import get_token
from network_flat import networks_flat
class router:
headers = {}
headers['X-Auth-Token'] = get_token()
url = "http://10.0.0.100:9696/v2.0/"
re = requests.get(url + "networks", headers=headers).json()
name = re['networks']
for net_id in name:
if net_id['name'] == networks_flat.body['network']['name']:
ext_flat = net_id['id']
sub_flat = net_id['subnets'][0]
body = {
"router": {
"name": "ext_router",
'external_gateway_info': {
'network_id': ext_flat,
'enable_snat': True,
'external_fixed_ips': [
{'subnet_id': sub_flat}
]
},
}
}
def create_router(self):
re = requests.post(self.url + "routers", data=json.dumps(self.body), headers=self.headers).json()
print(re)
def main_router(self):
re = requests.get(self.url + "routers", headers=self.headers).json()
name = re["routers"]
for router_id in name:
if router_id['name'] == self.body['router']['name']:
requests.delete(self.url + f"routers/{router_id['id']}", headers=self.headers)
print("删除同名路由器...正在新建中...")
else:
self.create_router()
print("路由新建成功...")
if __name__ == '__main__':
r = router()
r.main_router()
上传镜像
vim image_push.py
#!/usr/bin/python3
import json, requests
from auth import get_token
headers = {}
headers['X-Auth-Token'] = get_token()
headers['Content-Type'] = 'application/octet-stream'
url = "http://10.0.0.100:9292"
body = {
"name": "cirros",
'container_format': 'bare',
'disk_format': 'qcow2',
}
def create_image():
re = requests.post(url + "/v2/images", data=json.dumps(body), headers=headers).json()
print(re)
re = requests.put(url + re["file"], data=open("D:\iso镜像\cirros-0.4.0-x86_64-disk.img", "rb"), headers=headers).status_code
print(re)
def main_image():
re = requests.get(url + "/v2/images", headers=headers).json()
name = re['images']
for image_id in name:
if image_id['name'] == body['name']:
requests.delete(url + f"/v2/images/{image_id['id']}", headers=headers)
print("删除同名镜像成功.....正在新建中")
else:
create_image()
print('镜像上传成功')
if __name__ == '__main__':
main_image()
创建云主机
vim create_server.py
#!/usr/bin/python3
import json, requests
import flavor
import push_image
from network_vxlan import networks_vxlan
from auth import get_token
headers = {}
headers['X-Auth-Token'] = get_token()
url = "http://10.0.0.100:8774/v2.1/servers"
image_ret = requests.get("http://10.0.0.100:9292/v2/images", headers=headers).json()["images"]
for image_json in image_ret:
if image_json["name"] == push_image.body["name"]:
image_id = image_json["id"]
flavor_ret = requests.get("http://10.0.0.100:8774/v2.1/flavors", headers=headers).json()["flavors"]
for flavor_json in flavor_ret:
if flavor_json["name"] == flavor.body["flavor"]["name"]:
# print(flavor_json["name"], flavor.body["flavor"]["name"])
flavor_id = flavor_json['id']
network_ret = requests.get("http://10.0.0.100:9696/v2.0/networks/", headers=headers).json()["networks"]
for network_json in network_ret:
if network_json["name"] == networks_vxlan.body["network"]["name"]:
network_id = network_json["id"]
body = {
"server": {
"name": "vm01",
"imageRef": image_id,
"flavorRef": flavor_id,
"networks": [{"uuid": network_id}] # openstack network list
}
}
def create_server():
requests.post(url, data=json.dumps(body), headers=headers).json()
def main_server():
re = requests.get(url, headers=headers).json()
name = re['servers']
for server_id in name:
if server_id['name'] == body['server']['name']:
requests.delete(url + f"/{server_id['id']}", headers=headers)
print("删除同名云主机....正在新建中...")
else:
create_server()
print("云主机创建成功")
if __name__ == '__main__':
main_server()
绑定浮动IP
vim float_ip.py
#!/usr/bin/python3
import json, requests
from auth import get_token
import create_server
from network_flat import networks_flat
headers = {}
headers["X-Auth-Token"] = get_token()
url_float = 'http://10.0.0.100:9696/v2.0/floatingips'
url_server = "http://10.0.0.100:8774/v2.1/servers"
url_extnet = "http://10.0.0.100:9696/v2.0/networks"
server_name = requests.get(url_server, headers=headers).json()
for server_json in server_name["servers"]:
if server_json["name"] == create_server.body["server"]["name"]:
server_id = server_json["id"]
float_name = requests.get("http://10.0.0.100:9696/v2.0/ports", headers=headers).json()
for float_json in float_name["ports"]:
if float_json["device_id"] == server_id:
port_id = float_json["id"]
network_name = requests.get(url_extnet, headers=headers).json()
for extnet_json in network_name["networks"]:
if extnet_json["name"] == networks_flat.body["network"]["name"]:
extnet_id = extnet_json["id"]
def create_ip():
body = {
"floatingip": {
"floating_network_id": extnet_id, # 外部网络ID(创建云主机时要内部网络,要把内部网络添加到路由中)
"port_id": port_id, # 云主机port的ID (openstack port list)
}
}
float_ret = requests.get(url_float, headers=headers).json()
for float_data in float_ret["floatingips"]:
if float_data["port_id"] == port_id:
return f"浮动IP已绑定,浮动地址为{float_data['floating_ip_address']}"
else:
requests.post(url_float, data=json.dumps(body), headers=headers).json()
return f"浮动IP绑定成功,浮动地址为{float_data['floating_ip_address']}"
if __name__ == '__main__':
ret = create_ip()
print(ret)
创建安全组
vim security.py
#!/usr/bin/python3
import json, requests
from auth import get_token
headers = {}
headers["X-Auth-Token"] = get_token()
url = "http://10.0.0.100:9696/v2.0/security-groups/"
body1 = {
"security_group": {
"name": "sec_group",
}
}
sec_body = requests.get(url, headers=headers).json()
sec_ret = sec_body["security_groups"]
def security_group_create():
for sec_json in sec_ret:
if sec_json["name"] == body1["security_group"]["name"]:
requests.delete(url + f"{sec_json['id']}", headers=headers)
print("安全组删除成功....正在重新创建....")
else:
res = requests.post(url, data=json.dumps(body1), headers=headers).json()
print("安全组创建成功")
return res
sec_rules_id = security_group_create()["security_group"]["id"]
def security_group_rules_create():
url = "http://10.0.0.100:9696/v2.0/security-group-rules/"
def port(max, min):
body = {
'security_group_rule': {
'direction': 'egress',
'protocol': 'tcp',
'ethertype': 'IPv4',
'remote_ip_prefix': "0.0.0.0/24",
'port_range_max': max,
'port_range_min': min,
'security_group_id': sec_rules_id,
}
}
return body
list = [port(20, 20), port(21, 21), port(22, 22), port(80, 80), port(3306, 3306)]
result = []
for body in list:
body = requests.post(url, data=json.dumps(body), headers=headers).json()
result.append(body)
# print([port for port in result])
if __name__ == '__main__':
security_group_rules_create()
【版权声明】本文为华为云社区用户原创内容,未经允许不得转载,如需转载请自行联系原作者进行授权。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱:
cloudbbs@huaweicloud.com
- 点赞
- 收藏
- 关注作者
评论(0)