1、系统初始化
init.sh
#!/usr/bin/bashecho "关闭防火墙:firewalld"systemctl disable firewalld && systemctl stop firewalldecho "关闭selinux"setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/configecho "设置主机名"read -p "Enter hostname(`hostname`):" hostnameif [[ $hostname ]];thenhostnamectl set-hostname $hostnamefi
2、安装前准备
2.1、提前配置好yum源
2.2、安装时间同步服务
2.2.1、控制节点
chrony-install-controller.sh
#!/usr/bin/bashecho "安装chrony"yum install chrony -y#注释掉其他的时间同步服务器,以自己为服务器进行时间同步echo "修改配置文件/etc/chronyc.conf"sed -ri 's/(^server.*)/#\1/' /etc/chrony.conf#匹配最后一行,在后面加入server controller iburstendrow=`sed -rn '/^#server.*/=' /etc/chrony.conf |sed -n "$"p`sed -ri "$endrow"a"server controller iburst" /etc/chrony.conf#设置允许同步的服务器 (客户端不需要这行)sed -ri /^#allow.*/a"allow 192.100.0.0/16" /etc/chrony.conf#(客户端不需要这行)sed -ri /^#local.*/a"local stratum 10" /etc/chrony.conf#启动systemctl enable chronyd.service && systemctl restart chronyd.service#手动同步#ntpdate controller#验证echo "验证"chronyc sources#配置openstack源#yum install centos-release-openstack-train -y && yum upgrade -yyum install openstack-selinux -y
2.2.2、其他节点
chrony-install-other.sh
#!/usr/bin/bashecho "安装chrony"yum install chrony -y#注释掉其他的时间同步服务器,以自己为服务器进行时间同步sed -ri 's/(^server.*)/#\1/' /etc/chrony.conf#匹配最后一行,在后面加入server controller iburstendrow=`sed -rn '/^#server.*/=' /etc/chrony.conf |sed -n "$"p`sed -ri "$endrow"a"server controller iburst" /etc/chrony.conf#启动systemctl enable chronyd.service && systemctl restart chronyd.service#验证echo "验证"chronyc sources#配置openstack源#yum install centos-release-openstack-train -y && yum upgrade -yyum install openstack-selinux -y
3、安装基础组件:mariadb、rabbitmq、memcached
mariadb-rabbitmq-memcached-install.sh
#!/bin/bashecho "================安装SQL数据库:mariadb=================="yum install mariadb mariadb-server python2-PyMySQL -ycp /etc/my.cnf /etc/my.cnf.baksed -ri '/^\[mysqld\]/'a"bind-address = 0.0.0.0\n\default-storage-engine = innodb\n\innodb_file_per_table = on\n\max_connections = 4096\n\collation-server = utf8_general_ci\n\character-set-server = utf8" /etc/my.cnfecho "================启动mariadb服务=================="systemctl enable mariadb.service && systemctl restart mariadb.service && systemctl status mariadb.serviceecho "================rabbitma安装=================="yum install rabbitmq-server -ysystemctl enable rabbitmq-server.service && systemctl start rabbitmq-server.service && systemctl status rabbitmq-server.servicerabbitmqctl add_user openstack openstackrabbitmqctl set_permissions openstack ".*" ".*" ".*"rabbitmqctl statusyum install memcached python-memcached -ysed -ri 's/-l 127.0.0.1,::1/-l 127.0.0.1,::1,controller/g' /etc/sysconfig/memcachedsystemctl enable memcached.service && systemctl start memcached.servicesystemctl status memcachedecho "================mysql_secure_installation 进行数据库设置=================="
安装完需要mysql_secure_installation 进行数据库设置
4、glance安装
glance-install-controller.sh
#!/bin/bash#创建glance数据库mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS glance;GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';"mysql -uroot -p123456 -e "show databases;"#创建用户、服务、端点"#模拟登录source /root/admin-openrc.shecho "创建glance用户"openstack user create --domain default --password glance glanceecho "给glance用户添加角色权限"openstack role add --project service --user glance adminecho "创建glance image服务"openstack service create --name glance --description "OpenStack Image" imageecho "添加public、internal和admin endpoint"openstack endpoint create --region RegionOne image public http://controller:9292openstack endpoint create --region RegionOne image internal http://controller:9292openstack endpoint create --region RegionOne image admin http://controller:9292#安装yum install openstack-glance -ycp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak#sed -ri '/^[ \t]*(#|$)/d' /etc/glance/glance-api.conftee /etc/glance/glance-api.conf <<-EOF[database]# ...connection = mysql+pymysql://glance:glance@controller/glance[keystone_authtoken]# ...www_authenticate_uri = http://controller:5000auth_url = http://controller:5000memcached_servers = controller:11211auth_type = passwordproject_domain_name = Defaultuser_domain_name = Defaultproject_name = serviceusername = glancepassword = glance[paste_deploy]# ...flavor = keystone[glance_store]# ...stores = file,httpdefault_store = filefilesystem_store_datadir = /var/lib/glance/images/EOF#同步数据库su -s /bin/sh -c "glance-manage db_sync" glance#启动systemctl enable openstack-glance-api.service && systemctl restart openstack-glance-api.service
5、placement安装
placement-install-controller.sh
#!/bin/bash#创建placement数据库mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS placement;GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'placement';GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement';"mysql -uroot -p123456 -e "show databases;"#创建用户、服务、端点"#模拟登录source /root/admin-openrc.shecho "创建用户"openstack user create --domain default --password placement placementecho "添加role"openstack role add --project service --user placement adminecho "创建placement服务"openstack service create --name placement --description "Placement API" placementecho "创建endpoint"openstack endpoint create --region RegionOne placement public http://controller:8778openstack endpoint create --region RegionOne placement internal http://controller:8778openstack endpoint create --region RegionOne placement admin http://controller:8778#安装echo "开始openstack-placement-api"yum install openstack-placement-api -ycp /etc/placement/placement.conf /etc/placement/placement.conf.bakecho "修改配置文件"tee /etc/placement/placement.conf <<-EOF[placement_database]# ...connection = mysql+pymysql://placement:placement@controller/placement[api]# ...auth_strategy = keystone[keystone_authtoken]# ...auth_url = http://controller:5000/v3memcached_servers = controller:11211auth_type = passwordproject_domain_name = Defaultuser_domain_name = Defaultproject_name = serviceusername = placementpassword = placementEOF#同步数据库echo "同步数据库"su -s /bin/sh -c "placement-manage db sync" placement#重启http服务echo "重启http服务"systemctl restart httpdecho "验证"placement-status upgrade check
6、nova安装
6.1、控制节点
nova-install-controller.sh
注意:**/etc/nova/nova.conf 修改myip**
#!/bin/bashecho "创建数据库"mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS nova_api;CREATE DATABASE IF NOT EXISTS nova;CREATE DATABASE IF NOT EXISTS nova_cell0;GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova';"mysql -uroot -p123456 -e "show databases;"echo "模拟登录"source /root/admin-openrc.shecho "创建用户"openstack user create --domain default --password nova novaecho "添加role"openstack role add --project service --user nova adminecho "创建compute服务"openstack service create --name nova --description "OpenStack Compute" computeecho "创建endpoint"openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1echo "安装nova-api、nova-conductor、nova-novncproxy、nova-scheduler"yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -ycp /etc/nova/nova.conf /etc/nova/nova.conf.baktee /etc/nova/nova.conf <<-EOF[DEFAULT]# ...enabled_apis = osapi_compute,metadatamy_ip = 192.100.5.137transport_url = rabbit://openstack:openstack@controller:5672/use_neutron = truefirewall_driver = nova.virt.firewall.NoopFirewallDriver[api_database]# ...connection = mysql+pymysql://nova:nova@controller/nova_api[database]# ...connection = mysql+pymysql://nova:nova@controller/nova[api]# ...auth_strategy = keystone[keystone_authtoken]# ...www_authenticate_uri = http://controller:5000/auth_url = http://controller:5000/memcached_servers = controller:11211auth_type = passwordproject_domain_name = Defaultuser_domain_name = Defaultproject_name = serviceusername = novapassword = nova[vnc]enabled = true# ...server_listen = \$my_ipserver_proxyclient_address = \$my_ip[glance]# ...api_servers = http://controller:9292[oslo_concurrency]# ...lock_path = /var/lib/nova/tmp[placement]# ...region_name = RegionOneproject_domain_name = Defaultproject_name = serviceauth_type = passworduser_domain_name = Defaultauth_url = http://controller:5000/v3username = placementpassword = placementEOFecho "同步数据库"su -s /bin/sh -c "nova-manage api_db sync" novasu -s /bin/sh -c "nova-manage cell_v2 map_cell0" novasu -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" novasu -s /bin/sh -c "nova-manage db sync" novaecho "验证"su -s /bin/sh -c "nova-manage cell_v2 list_cells" novaecho "启动"systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.servicesystemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.servicesystemctl status openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
6.2、计算节点
nova-install-computer.sh
#!/bin/bashecho "安装"yum install openstack-nova-compute -yif [[ ! -f /etc/nova/nova.conf.bak ]];thencp /etc/nova/nova.conf /etc/nova/nova.conf.bakfitee /etc/nova/nova.conf <<-EOF[DEFAULT]# ...enabled_apis = osapi_compute,metadatatransport_url = rabbit://openstack:openstack@controllermy_ip = 192.100.5.138use_neutron = truefirewall_driver = nova.virt.firewall.NoopFirewallDriver[api]# ...auth_strategy = keystone[keystone_authtoken]# ...www_authenticate_uri = http://controller:5000/auth_url = http://controller:5000/memcached_servers = controller:11211auth_type = passwordproject_domain_name = Defaultuser_domain_name = Defaultproject_name = serviceusername = novapassword = nova[vnc]# ...enabled = trueserver_listen = 0.0.0.0server_proxyclient_address = \$my_ipnovncproxy_base_url = http://controller:6080/vnc_auto.html[glance]# ...api_servers = http://controller:9292[oslo_concurrency]# ...lock_path = /var/lib/nova/tmp[placement]# ...region_name = RegionOneproject_domain_name = Defaultproject_name = serviceauth_type = passworduser_domain_name = Defaultauth_url = http://controller:5000/v3username = placementpassword = placementEOFecho "判断是否支持硬件加速"core=`egrep -c '(vmx|svm)' /proc/cpuinfo`if [[ $core == 0 ]]; thentee -a /etc/nova/nova.conf <<-EOF[libvirt]virt_type = qemuEOFfiecho "启动"systemctl enable libvirtd.service openstack-nova-compute.servicesystemctl restart libvirtd.service openstack-nova-compute.servicesystemctl status libvirtd.service openstack-nova-compute.service#解决 nova-status upgrade check 403问题#tee -a /etc/httpd/conf.d/00-placement-api.conf <<-EOF# <Directory /usr/bin># <IfVersion >= 2.4># Require all granted# </IfVersion># <IfVersion < 2.4># Order allow,deny# Allow from all# </IfVersion># </Directory>#EOFecho "控制器节点上运行:openstack compute service list --service nova-compute"echo "控制器节点上运行:su -s /bin/sh -c 'nova-manage cell_v2 discover_hosts --verbose' nova"echo "验证:openstack compute service list"echo "验证:nova-status upgrade check"
7、neutron安装
7.1、控制节点
neutron-install-controller.sh
#!/bin/bashecho "创建数据库"mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS neutron;GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';show databases;"echo "创建用户"openstack user create --domain default --password neutron neutronecho "添加角色"openstack role add --project service --user neutron adminecho "创建network服务"openstack service create --name neutron --description "OpenStack Networking" networkecho "创建endpoint"openstack endpoint create --region RegionOne network public http://controller:9696openstack endpoint create --region RegionOne network internal http://controller:9696openstack endpoint create --region RegionOne network admin http://controller:9696echo "选择:自助网络配置"echo "安装"yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -yif [[ ! -f /etc/neutron/neutron.conf.bak ]];thencp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bakfitee /etc/neutron/neutron.conf <<-EOF[database]# ...connection = mysql+pymysql://neutron:neutron@controller/neutron[DEFAULT]# ...core_plugin = ml2service_plugins = routerallow_overlapping_ips = truetransport_url = rabbit://openstack:openstack@controllerauth_strategy = keystonenotify_nova_on_port_status_changes = truenotify_nova_on_port_data_changes = true[keystone_authtoken]# ...www_authenticate_uri = http://controller:5000auth_url = http://controller:5000memcached_servers = controller:11211auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultproject_name = serviceusername = neutronpassword = neutron[nova]# ...auth_url = http://controller:5000auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultregion_name = RegionOneproject_name = serviceusername = novapassword = nova[oslo_concurrency]# ...lock_path = /var/lib/neutron/tmpEOFecho "模块化第 2 层 (ML2) 插件"if [[ ! -f /etc/neutron/plugins/ml2/ml2_conf.ini.bak ]];thencp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bakfitee /etc/neutron/plugins/ml2/ml2_conf.ini <<-EOF[ml2]# ...type_drivers = flat,vlan,vxlantenant_network_types = vxlanmechanism_drivers = linuxbridge,l2populationextension_drivers = port_security[ml2_type_flat]# ...flat_networks = provider[ml2_type_vxlan]# ...vni_ranges = 1:1000[securitygroup]# ...enable_ipset = trueEOFecho "Linux 网桥代理"if [[ ! -f /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak ]];thencp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bakfitee /etc/neutron/plugins/ml2/linuxbridge_agent.ini <<-EOF[linux_bridge]physical_interface_mappings = provider:ens32[vxlan]enable_vxlan = truelocal_ip = 管理接口ipl2_population = true[securitygroup]# ...enable_security_group = truefirewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriverEOFecho "启用网络网桥支持"modprobe br_netfilterecho "查看net.bridge.bridge-nf-call-iptables和net.bridge.bridge-nf-call-ip6tables的值是否为1"sysctl net.bridge.bridge-nf-call-iptables=1sysctl net.bridge.bridge-nf-call-ip6tables=1echo "配置第3层代理"if [[ ! -f /etc/neutron/l3_agent.ini.bak ]];thencp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.bakfitee /etc/neutron/l3_agent.ini <<-EOF[DEFAULT]# ...interface_driver = linuxbridgeEOFecho "配置 DHCP 代理"if [[ ! -f /etc/neutron/dhcp_agent.ini.bak ]];thencp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bakfitee /etc/neutron/dhcp_agent.ini <<-EOF[DEFAULT]# ...interface_driver = linuxbridgedhcp_driver = neutron.agent.linux.dhcp.Dnsmasqenable_isolated_metadata = TrueEOFecho "配置元数据代理"if [[ ! -f /etc/neutron/metadata_agent.ini.bak ]];thencp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bakfitee /etc/neutron/metadata_agent.ini <<-EOF[DEFAULT]# ...nova_metadata_host = controllermetadata_proxy_shared_secret = METADATA_SECRETEOFecho "将计算服务配置为使用网络服务"tee -a /etc/nova/nova.conf <<-EOF[neutron]# ...auth_url = http://controller:5000auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultregion_name = RegionOneproject_name = serviceusername = neutronpassword = neutronservice_metadata_proxy = truemetadata_proxy_shared_secret = METADATA_SECRETEOFecho "数据库同步"if [[ ! -f /etc/neutron/plugin.ini ]];thenln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.inifisu -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutronecho "启动"systemctl restart openstack-nova-api.service && systemctl status openstack-nova-api.servicesystemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.servicesystemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.servicesystemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.serviceecho "网络选项 2,还启用和启动第 3 层服务"systemctl enable neutron-l3-agent.servicesystemctl restart neutron-l3-agent.servicesystemctl status neutron-l3-agent.service
7.2、计算节点
neutron-install-computer.sh
#!/bin/bashecho "安装"yum install openstack-neutron-linuxbridge ebtables ipset -yif [[ ! -f /etc/neutron/neutron.conf.bak ]];thencp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bakfitee /etc/neutron/neutron.conf <<-EOF[DEFAULT]# ...transport_url = rabbit://openstack:openstack@controllerauth_strategy = keystone[keystone_authtoken]# ...www_authenticate_uri = http://controller:5000auth_url = http://controller:5000memcached_servers = controller:11211auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultproject_name = serviceusername = neutronpassword = neutron[oslo_concurrency]# ...lock_path = /var/lib/neutron/tmpEOFecho "选择:自助网络"if [[ ! -f /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak ]];thencp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bakfitee /etc/neutron/plugins/ml2/linuxbridge_agent.ini <<-EOF[linux_bridge]physical_interface_mappings = provider:ens33[vxlan]enable_vxlan = truelocal_ip = 管理接口ipl2_population = true[securitygroup]# ...enable_security_group = truefirewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriverEOFecho "启用网桥支持"modprobe br_netfilterecho "查看net.bridge.bridge-nf-call-iptables和net.bridge.bridge-nf-call-ip6tables的值是否为1"sysctl net.bridge.bridge-nf-call-iptables=1sysctl net.bridge.bridge-nf-call-ip6tables=1echo "将计算服务配置为使用网络服务"tee -a /etc/nova/nova.conf <<-EOF[neutron]# ...auth_url = http://controller:5000auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultregion_name = RegionOneproject_name = serviceusername = neutronpassword = neutronEOFecho "启动"systemctl restart openstack-nova-compute.service && systemctl status openstack-nova-compute.servicesystemctl enable neutron-linuxbridge-agent.service && systemctl restart neutron-linuxbridge-agent.service && systemctl status neutron-linuxbridge-agent.serviceecho "验证:openstack extension list --network"echo "验证提供商网络:openstack network agent list"
8、cinder安装
8.1.控制节点
cinder-install-controller.sh
#!/bin/bashecho "数据库配置"mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS cinder;GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';"mysql -uroot -p123456 -e "show databases;"echo "模拟登录"source /root/admin-openrc.shecho "创建用户"openstack user create --domain default --password cinder cinderecho "添加role"openstack role add --project service --user cinder adminecho "创建cinderv2服务"openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2echo "创建cinderv3服务"openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3echo "创建endpoint"openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)sopenstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)sopenstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)sopenstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)sopenstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)sopenstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)secho "安装"yum install openstack-cinder -yif [[ ! -f /etc/cinder/cinder.conf.bak ]];thencp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bakfitee /etc/cinder/cinder.conf <<-EOF[database]# ...connection = mysql+pymysql://cinder:cinder@controller/cinder[DEFAULT]# ...transport_url = rabbit://openstack:openstack@controllerauth_strategy = keystonemy_ip = 192.100.5.137[keystone_authtoken]# ...www_authenticate_uri = http://controller:5000auth_url = http://controller:5000memcached_servers = controller:11211auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultproject_name = serviceusername = cinderpassword = cinder[oslo_concurrency]# ...lock_path = /var/lib/cinder/tmpEOFecho "同步数据库"su -s /bin/sh -c "cinder-manage db sync" cinderecho "将计算配置为使用块存储"tee -a /etc/nova/nova.conf <<-EOF[cinder]os_region_name = RegionOneEOFecho "启动"systemctl restart openstack-nova-api.servicesystemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service && systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.servicesystemctl status openstack-cinder-api.service openstack-cinder-scheduler.service
8.2.卷存储节点
需要准备好磁盘并创建好卷组
cinder-install-block.sh
#!/bin/bashecho "安装cinder"yum install openstack-cinder targetcli python-keystone -yif [[ ! -f /etc/cinder/cinder.conf.bak ]];thencp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bakfitee /etc/cinder/cinder.conf <<-EOF[database]# ...connection = mysql+pymysql://cinder:cinder@controller/cinder[DEFAULT]# ...transport_url = rabbit://openstack:openstack@controllerauth_strategy = keystonemy_ip = 192.168.189.153enabled_backends = lvmglance_api_servers = http://controller:9292[keystone_authtoken]# ...www_authenticate_uri = http://controller:5000auth_url = http://controller:5000memcached_servers = controller:11211auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultproject_name = serviceusername = cinderpassword = cinder[lvm]volume_driver = cinder.volume.drivers.lvm.LVMVolumeDrivervolume_group = cinder-volumestarget_protocol = iscsitarget_helper = lioadm[oslo_concurrency]# ...lock_path = /var/lib/cinder/tmpEOFecho "启动"systemctl enable openstack-cinder-volume.service target.service && systemctl restart openstack-cinder-volume.service target.servicesystemctl status openstack-cinder-volume.service target.service
