安裝源包括:CENTOS7、EPEL7、OPENSTACK-KILOhtml
安裝mysql數據庫依賴包node
# yuminstall mariadb mariadb-server MySQL-python –ypython
編輯文件完成下列步驟:mysql
#vi /etc/my.cnf.d/mariadb_openstack.cnf linux
在 [mysqld] 部分, 修改添加下列的選項sql
[mysqld]數據庫
bind-address = 10.0.0.11apache
default-storage-engine = innodbdjango
lower_case_table_names=1swift
innodb_file_per_table
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
啓動數據庫服務
#systemctl start mariadb.service
#systemctl enable mariadb.service
設置MySQL安全配置嚮導(此步驟主要設置mysql可遠程鏈接):
#mysql_secure_installation
設置root密碼爲P@ssw0rd
#systemctl restart mariadb.service
安裝rabbitmq服務
# yum install rabbitmq-server -y
啓動rabbitmq服務並設置爲不開機啓動:
# systemctl start rabbitmq-server.service
# systemctl enable rabbitmq-server.service
# systemctl status rabbitmq-server.service
添加ops用戶
# rabbitmqctl add_user openstack P@ssw0rd
賦權
# rabbitmqctl set_permissions openstack ".*"".*" ".*"
# systemctl restart rabbitmq-server.service
操做數據庫,這裏要用到剛剛數據庫安裝時用到的密碼
# mysql -uroot -pP@ssw0rd
創建數據庫,與受權用戶,還有加密碼KEYSTONE_DBPASS
MariaDB [(none)]> CREATE DATABASE keystone;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO'keystone'@'localhost' IDENTIFIED BY 'P@ssw0rd';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%'IDENTIFIED BY 'P@ssw0rd';
MariaDB [(none)]> exit
生成一個隨機値,作爲管理令牌,爲後面的配置要用
# openssl rand -hex 10
fb7269c14626a5966181
安裝包
# yum install –y openstack-keystone python-keystoneclient
# vim /etc/keystone/keystone.conf
admin_token = 11d5c31d5a96d7b42315
verbose = true
[database]
connection = mysql://keystone:P@ssw0rd@controller/keystone
[token]
provider = keystone.token.providers.uuid.Provider
driver = keystone.token.persistence.backends.sql.Token
[revoke]
driver = keystone.contrib.revoke.backends.sql.Revoke
建立管理證書與密鑰,設置相關文件權限
# keystone-manage pki_setup --keystone-user keystone--keystone-group keystone
No handlers could be found for logger "oslo_config.cfg"
The following cert files already exist, use --rebuild to remove theexisting files before regenerating:
/etc/keystone/ssl/private/cakey.pem already exists
/etc/keystone/ssl/certs/ca.pem already exists
/etc/keystone/ssl/private/signing_key.pem already exists
/etc/keystone/ssl/certs/signing_cert.pem already exists
# chown -R keystone:keystone /var/log/keystone
# chown -R keystone:keystone /etc/keystone/ssl
# chmod -R o-rwx /etc/keystone/ssl
填充數據庫數據
# su -s /bin/sh -c "keystone-manage db_sync"keystone
No handlers could be found for logger"oslo_config.cfg"
填加到啓動與啓動程序
# systemctl enable openstack-keystone.service
# systemctl start openstack-keystone.service
默認狀況下,認證身份令牌到期後不會刪除,會一直存在數據庫中,因此加一下自動刪除
# (crontab -l -u keystone 2>&1 | grep -q token_flush)|| echo '@hourly /usr/bin/keystone-manage token_flush >/var/log/keystone/keystone-tokenflush.log2>&1' >> /var/spool/cron/keystone
建立admin用戶
# export OS_SERVICE_TOKEN=11d5c31d5a96d7b42315
# export OS_SERVICE_ENDPOINT=http://controller:35357/v2.0
# keystone tenant-create --name admin --description "AdminTenant"
# keystone user-create --name admin --pass P@ssw0rd --email root@localhost
# keystone role-create --name admin
# keystone user-role-add --tenant admin --user admin --roleadmin
建立demo用戶
# keystone tenant-create --name demo --description "DemoTenant"
# keystone user-create --name demo --tenant demo --pass P@ssw0rd --email demo@localhost
建立 service租戶
# keystone tenant-create --name service --description"Service Tenant"
建立實體
# keystone service-create --name keystone --type identity--description "OpenStack Identity"
# keystone endpoint-create --service-id $(keystoneservice-list | awk '/ identity / {print $2}') --publicurlhttp://controller:5000/v2.0 --internalurl http://controller:5000/v2.0--adminurl http://controller:35357/v2.0 --region regionOne
+-------------+---------------------------------- +
| Property | Value |
+-------------+---------------------------------- +
| adminurl | http://controller:35357/v2.0 |
| id | 71ed01478ea34f12bfe81cc9de80ff75 |
| internalurl | http://controller:5000/v2.0 |
| publicurl | http://controller:5000/v2.0 |
| region | regionOne |
| service_id |efa9e2e0830b4bd4a8d6470f1d1c95d4 |
+-------------+----------------------------------+
驗證
# unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
使用admin租戶和用戶,須要一個認證的令牌
# keystone --os-tenant-name admin --os-username admin--os-password P@ssw0rd--os-auth-url http://controller:35357/v2.0 token-get
使用 admin租戶和用戶,列出租戶以驗證 admin租戶和用戶
# keystone --os-tenant-name admin --os-username admin--os-password P@ssw0rd--os-auth-url http://controller:35357/v2.0 tenant-list
使用 admin租戶和用戶,列出用戶
# keystone --os-tenant-name admin --os-username admin--os-password P@ssw0rd--os-auth-url http://controller:35357/v2.0 user-list
使用 admin租戶和用戶,列出角色
# keystone --os-tenant-name admin --os-username admin--os-password P@ssw0rd--os-auth-url http://controller:35357/v2.0 role-list
建立客戶端環境腳本
# vi admin-openrc.sh
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=P@ssw0rd
export OS_AUTH_URL=http://controller:35357/v2.0
# vi demo-openrc.sh
export OS_TENANT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=P@ssw0rd
export OS_AUTH_URL=http://controller:5000/v2.0
# source admin-openrc.sh
建立數據庫
# mysql -uroot -pP@ssw0rd
MariaDB [(none)]> CREATE DATABASEglance;
MariaDB [(none)]> GRANT ALLPRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'P@ssw0rd';
MariaDB [(none)]> GRANT ALLPRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'P@ssw0rd';
導入 admin身份憑證以執行管理員用戶專有的命令
# sourceadmin-openrc.sh
建立服務證書
建立glance用戶
#keystone user-create --name glance --pass P@ssw0rd
給glance用戶添加admin角色
# keystoneuser-role-add --user glance --tenant service --role admin
建立glance服務實體
# keystone service-create --name glance--type p_w_picpath --description "OpenStack Image Service"
建立鏡像服務的 API 端點
# keystone endpoint-create --service-id$(keystone service-list | awk '/ p_w_picpath / {print $2}') --publicurl http://controller:9292--internalurl http://controller:9292--adminurl http://controller:9292--region regionOne
安裝軟件包
# yum install openstack-glance python-glanceclient
修改配置文件vim /etc/glance/glance-api.conf
[DEFAULT]
verbose = True
notification_driver = noop
[database]
connection =mysql://glance:P@ssw0rd@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_tenant_name = service
admin_user = glance
admin_password = P@ssw0rd
[paste_deploy]
flavor = keystone
[glance_store]
default_store = file
filesystem_store_datadir = /var/lib/glance/p_w_picpaths/
注意:註釋全部 auth_host、auth_port和 auth_protocol選項,由於
identity_uri已經包括了它們。
修改vim /etc/glance/glance-registry.conf
[DEFAULT]
verbose = True
notification_driver = noop
[database]
connection = mysql://glance:P@ssw0rd@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_tenant_name = service
admin_user = glance
admin_password = P@ssw0rd
[paste_deploy]
flavor = keystone
寫入鏡像服務數據庫
# su -s /bin/sh -c"glance-manage db_sync" glance
啓動鏡像服務並將其配置爲隨系統啓動
# systemctlenable openstack-glance-api.service openstack-glance-registry.service
#systemctl start openstack-glance-api.service openstack-glance-registry.service
# mkdir /tmp/p_w_picpaths
下載cirros-0.3.0-x86_64-disk.img,並將文件拷貝至該目錄
# source admin-openrc.sh
# glance p_w_picpath-create --name"cirros-0.3.0-x86_64" --file /tmp/p_w_picpaths/cirros-0.3.0-x86_64-disk.img--disk-format qcow2 --container-format bare --is-public True --progress
# glance p_w_picpath-list
建立數據庫
# mysql -u root -p
MariaDB [(none)]> CREATE DATABASE nova;
MariaDB [(none)]> GRANT ALL PRIVILEGES ONnova.* TO 'nova'@'localhost' IDENTIFIED BY 'P@ssw0rd';
MariaDB [(none)]> GRANT ALL PRIVILEGES ONnova.* TO 'nova'@'%' IDENTIFIED BY 'P@ssw0rd';
導入 admin身份憑證以執行管理員用戶專有的命令
# source admin-openrc.sh
建立服務證書
建立 nova用戶
# keystone user-create --name nova--pass P@ssw0rd
# keystone user-role-add --user nova--tenant service --role admin
# keystone service-create --name nova--type compute --description "OpenStack Compute"
# keystone endpoint-create--service-id $(keystone service-list | awk '/ compute / {print $2}')--publicurl http://controller:8774/v2/%\(tenant_id\)s--internalurl http://controller:8774/v2/%\(tenant_id\)s--adminurl http://controller:8774/v2/%\(tenant_id\)s--region regionOne
安裝包
# yum install openstack-nova-api openstack-nova-certopenstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-schedulerpython-novaclient
修改配置文件 vim /etc/nova/nova.conf
[DEFAULT]
rpc_backend = rabbit
my_ip= 10.0.0.11
rabbit_host = controller
rabbit_password = P@ssw0rd
auth_strategy= keystone
vncserver_listen = 10.0.0.11
vncserver_proxyclient_address = 10.0.0.11
verbose = True
[database]
connection = mysql://nova:P@ssw0rd@controller/nova
[keystone_authtoken]
auth_uri = http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_tenant_name = service
admin_user = nova
admin_password = P@ssw0rd
[glance]
host = controller
同步Compute 數據庫
# su -s/bin/sh -c "nova-manage db sync" nova
啓動服務
# systemctl enableopenstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.serviceopenstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
# systemctl startopenstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.serviceopenstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
安裝軟件包
# yum install openstack-nova-computesysfsutils
修改配置文件 vim /etc/nova/nova.conf
[DEFAULT]
verbose = True
rpc_backend = rabbit
rabbit_host = controller
rabbit_password = P@ssw0rd
auth_strategy = keystone
my_ip = 10.0.0.2[管理網IP地址]
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.0.0.2[管理網IP地址]
novncproxy_base_url = http://controller:6080/vnc_auto.html
[keystone_authtoken]
auth_uri = http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_tenant_name = service
admin_user = nova
admin_password = P@ssw0rd
[glance]
host = controller
完成安裝
肯定您的計算節點是否支持虛擬機的硬件加速。
# egrep -c '(vmx|svm)' /proc/cpuinfo
啓動計算服務及其依賴,並將其配置爲隨系統自動啓動
# systemctl enable libvirtd.serviceopenstack-nova-compute.service
# systemctl start libvirtd.service openstack-nova-compute.service
# source admin-openrc.sh
# nova service-list
# nova p_w_picpath-list
建立數據庫
# mysql -uroot -pP@ssw0rd
MariaDB [(none)]> CREATE DATABASE neutron;
MariaDB [(none)]> GRANT ALL PRIVILEGES ONneutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'P@ssw0rd';
MariaDB [(none)]> GRANT ALL PRIVILEGES ONneutron.* TO 'neutron'@'%' IDENTIFIED BY 'P@ssw0rd';
導入 admin 身份憑證以執行管理員用戶專有的命令:
# source admin-openrc.sh
建立服務證書
# keystone user-create --name neutron --pass P@ssw0rd
# keystone user-role-add --user neutron --tenantservice --role admin
# keystone service-create --name neutron --typenetwork --description "OpenStack Networking"
# keystone endpoint-create --service-id $(keystoneservice-list | awk '/ network / {print $2}') --publicurl http://controller:9696 --adminurl http://controller:9696 --internalurl http://controller:9696 --region regionOne
安裝網絡組件
# yum install -y openstack-neutronopenstack-neutron-ml2 python-neutronclient which
修改配置文件vim/etc/neutron/neutron.conf
[DEFAULT]
verbose = True
rpc_backend= rabbit
rabbit_host = controller
rabbit_password = P@ssw0rd
auth_strategy= keystone
core_plugin= ml2
service_plugins = router
allow_overlapping_ips = True
notify_nova_on_port_status_changes= True
notify_nova_on_port_data_changes = True
nova_url = http://controller:8774/v2
nova_admin_auth_url = http://controller:35357/v2.0
nova_region_name = regionOne
nova_admin_username = nova
nova_admin_tenant_id = SERVICE_TENANT_ID
【獲取方式:
# source admin-openrc.sh
#keystone tenant-get service】
nova_admin_password= P@ssw0rd
[database]
connection = mysql://neutron:P@ssw0rd@controller/neutron
[keystone_authtoken]
auth_uri= http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_tenant_name = service
admin_user = neutron
admin_password = P@ssw0rd
配置Modular Layer2 (ML2) 插件
修改配置文件vim/etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers= flat,gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[securitygroup]
enable_security_group= True
enable_ipset = True
firewall_driver =neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
配置 Compute 以使用Networking
修改控制節點上的配置文件 vim /etc/nova/nova.conf
[DEFAULT]
network_api_class= nova.network.neutronv2.api.API
security_group_api= neutron
linuxnet_interface_driver= nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver= nova.virt.firewall.NoopFirewallDriver
[neutron]
url =http://controller:9696
auth_strategy= keystone
admin_auth_url= http://controller:35357/v2.0
admin_tenant_name= service
admin_username= neutron
admin_password= P@ssw0rd
完成安裝
建立連接
# ln -s/etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
同步數據庫
# su -s/bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade kilo" neutron
重啓 Compute 服務
# systemctlrestart openstack-nova-api.service openstack-nova-scheduler.serviceopenstack-nova-conductor.service
啓動 Networking 服務並將其配置爲隨系統啓動
#systemctl enable neutron-server.service
#systemctl start neutron-server.service
驗證
導入admin身份憑證以執行管理員用戶專有的命令:
# sourceadmin-openrc.sh
列出加載的擴展,以驗證是否成功啓動了一個 neutron-server 進程:
# neutronext-list
配置前的準備
修改配置文件vim /etc/sysctl.conf 以將下列參數包含其中:
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
使修改生效:
# sysctl-p
安裝網絡組件
# yuminstall -y openstack-neutron openstack-neutron-ml2openstack-neutron-openvswitch
配置網絡的通用組件
修改配置文件vim /etc/neutron/neutron.conf
[DEFAULT]
verbose = True
rpc_backend = rabbit
rabbit_host = controller
rabbit_password = P@ssw0rd
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
[keystone_authtoken]
auth_uri = http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_tenant_name = service
admin_user = neutron
admin_password = P@ssw0rd
配置Modular Layer 2 (ML2)插件
修改配置文件 vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
flat_networks = external
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[ovs]
local_ip = 10.0.0.11
###########################################################
local_ip= INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS
管理段控制節點ip,若是沒有管理網段,就填控制節點IP]
###########################################################
enable_tunneling = True
bridge_mappings = external:br-ex
[agent]
tunnel_types = gre
配置 Layer-3 (L3) 代理
修改配置文件 /etc/neutron/l3_agent.ini
[DEFAULT]
verbose = True
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
use_namespaces = True
external_network_bridge = br-ex
router_delete_namespaces = True
配置DHCP代理
修改配置文件vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
verbose = True
interface_driver =neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver =neutron.agent.linux.dhcp.Dnsmasq
use_namespaces = True
dhcp_delete_namespaces = True
##############################################################################
注意:一些雲鏡像會忽略 DHCP MTU 選項,在這種狀況下,您要配置其使用metadata
# vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
dnsmasq_config_file =/etc/neutron/dnsmasq-neutron.conf
建立並修改文件 /etc/neutron/dnsmasq-neutron.conf
dhcp-option-force=26,1454
殺死全部存在的 dnsmasq 進程:
# pkilldnsmasq
##############################################################################
配置metadata代理
修改配置文件 /etc/neutron/metadata_agent.ini
[DEFAULT]
auth_url = http://controller:5000/v2.0
auth_region = regionOne
admin_tenant_name = service
admin_user = neutron
admin_password = P@ssw0rd
nova_metadata_ip = 10.0.0.11(controler的Ip)
metadata_proxy_shared_secret = P@ssw0rd
在控制節點(controller節點)上,修改配置文件vim /etc/nova/nova.conf
[neutron]
service_metadata_proxy = True
metadata_proxy_shared_secret = P@ssw0rd(與上個配置文件保持一致)
# systemctl restart openstack-nova-api.service
配置Open vSwitch (OVS)服務
#systemctl enable openvswitch.service
#systemctl start openvswitch.service
#ovs-vsctl add-br br-ex
# ovs-vsctladd-port br-ex eth1
完成安裝
# ln -s/etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
# cp/usr/lib/systemd/system/neutron-openvswitch-agent.service/usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
# sed -i's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g'/usr/lib/systemd/system/neutron-openvswitch-agent.service
# systemctlenable neutron-openvswitch-agent.service neutron-l3-agent.serviceneutron-dhcp-agent.service neutron-metadata-agent.serviceneutron-ovs-cleanup.service
#systemctl start neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.serviceneutron-metadata-agent.service
請勿直接地啓動 neutron-ovs-cleanup 服務
controller端驗證
# sourceadmin-openrc.sh
# neutronagent-list
此處可能因爲在controller端執行同步數據時,寫的版本有誤致使錯誤,從新執行數據同步命令,將juno修改成kilo便可。
配置前的準備
# vim /etc/sysctl.conf
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
# sysctl –p
安裝網絡組件
# yuminstall openstack-neutron-ml2 openstack-neutron-openvswitch
配置網絡的通用組件
# vim /etc/neutron/neutron.conf
[DEFAULT]
rpc_backend = rabbit
rabbit_host = controller
rabbit_password = P@ssw0rd
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
[keystone_authtoken]
auth_uri = http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_tenant_name = service
admin_user = neutron
admin_password = P@ssw0rd
配置 Modular Layer 2 (ML2) 插件
# vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver =neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[ovs]
local_ip = 10.0.0.2
###########################################################
local_ip= INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS
計算節點上的管理網絡接口的IP 地址
###########################################################
enable_tunneling = True
[agent]
tunnel_types = gre
配置 Open vSwitch (OVS) 服務
啓動 OVS 服務並將其配置爲隨系統啓動:
# systemctl enableopenvswitch.service
# systemctl startopenvswitch.service
配置 Compute 以使用 Networking
# vim /etc/nova/nova.conf
[DEFAULT]
network_api_class= nova.network.neutronv2.api.API
security_group_api= neutron
linuxnet_interface_driver= nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver= nova.virt.firewall.NoopFirewallDriver
[neutron]
url =http://controller:9696
auth_strategy= keystone
admin_auth_url= http://controller:35357/v2.0
admin_tenant_name= service
admin_username= neutron
admin_password= P@ssw0rd
完成安裝
# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini/etc/neutron/plugin.ini
# cp/usr/lib/systemd/system/neutron-openvswitch-agent.service/usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
# sed -i's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g'/usr/lib/systemd/system/neutron-openvswitch-agent.service
# systemctl restartopenstack-nova-compute.service
#systemctl enable neutron-openvswitch-agent.service
# systemctl startneutron-openvswitch-agent.service
驗證操做
在controller節點執行
# source admin-openrc.sh
# neutron agent-list
可經過Dashboard完成
# vim /etc/nova/nova.conf
[DEFAULT]
network_api_class= nova.network.api.API
security_group_api= nova
# systemctl restart openstack-nova-api.serviceopenstack-nova-scheduler.service openstack-nova-conductor.service
# yum install openstack-nova-networkopenstack-nova-api
# vi /etc/nova/nova.conf
[DEFAULT]
network_api_class= nova.network.api.API
security_group_api= nova
firewall_driver= nova.virt.libvirt.firewall.IptablesFirewallDriver
network_manager= nova.network.manager.FlatDHCPManager
network_size= 254
allow_same_net_traffic= False
multi_host= True
send_arp_for_ha= True
share_dhcp_address= True
force_dhcp_release= True
flat_network_bridge= br100
flat_interface= INTERFACE_NAME
public_interface= INTERFACE_NAME
# systemctl enable openstack-nova-network.serviceopenstack-nova-metadata-api.service
# systemctl start openstack-nova-network.serviceopenstack-nova-metadata-api.service
# sourceadmin-openrc.sh
# novanetwork-create demo-net --bridge br100 --multi-host T --fixed-range-v410.0.1.0/24
# novanet-list
# yuminstall openstack-dashboard httpd mod_wsgi memcached python-memcached
# vim /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller"
ALLOWED_HOSTS = ['*']
CACHES = {
'default': {
'BACKEND':'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION':'127.0.0.1:11211',
}
}
因爲一個包的 bug,儀表板的 CSS 會加載失敗。能夠執行如下命令來解決這個問題:
# chown-R apache:apache /usr/share/openstack-dashboard/static
啓動服務
#systemctl enable httpd.service memcached.service
#systemctl start httpd.service memcached.service
http://controller/dashboard
建立數據庫
# mysql -uroot -pP@ssw0rd
MariaDB[(none)]> CREATE DATABASE cinder;
MariaDB[(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost'IDENTIFIED BY 'P@ssw0rd';
MariaDB[(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'P@ssw0rd';
# sourceadmin-openrc.sh
建立 cinder 用戶
# keystoneuser-create --name cinder --pass P@ssw0rd
# keystone user-role-add --user cinder--tenant service --role admin
# keystone service-create --name cinderv2--type volumev2 --description "OpenStack Block Storage"
# keystone endpoint-create --service-id$(keystone service-list | awk '/ volumev2 / {print $2}') --publicurl http://controller:8776/v2/%\(tenant_id\)s --internalurl http://controller:8776/v2/%\(tenant_id\)s --adminurl http://controller:8776/v2/%\(tenant_id\)s --region regionOne
安裝並配置塊設備存儲服務在控制節點服務器上的組件
# yum installopenstack-cinder python-cinderclient python-oslo-db
# vim /etc/cinder/cinder.conf
[database]
connection = mysql://cinder:P@ssw0rd@controller/cinder
[DEFAULT]
rpc_backend = rabbit
rabbit_host = controller
rabbit_password = P@ssw0rd
auth_strategy = keystone
my_ip = 10.0.0.11
verbose = True
[keystone_authtoken]
auth_uri = http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_tenant_name = service
admin_user = cinder
admin_password = P@ssw0rd
# su -s /bin/sh -c"cinder-manage db sync" cinder
# systemctl enable openstack-cinder-api.serviceopenstack-cinder-scheduler.service
# systemctl start openstack-cinder-api.serviceopenstack-cinder-scheduler.service
配置前的準備
# yuminstall lvm2
#systemctl enable lvm2-lvmetad.service
#systemctl start lvm2-lvmetad.service
#pvcreate /dev/sdb1
#vgcreate cinder-volumes/dev/sdb1
# vim /etc/lvm/lvm.conf
devices {
filter = [ "a/sdb/", "r/.*/"]
安裝並配置塊存儲卷組件
# yum install openstack-cindertargetcli python-oslo-db MySQL-python
# vim /etc/cinder/cinder.conf
[database]
connection = mysql://cinder:P@ssw0rd@controller/cinder
[DEFAULT]
rpc_backend = rabbit
rabbit_host = controller
rabbit_password = P@ssw0rd
auth_strategy = keystone
my_ip = 10.0.0.2
#################################################
my_ip =MANAGEMENT_INTERFACE_IP_ADDRESS
存儲節點的管理網IP地址
#################################################
glance_host = controller
iscsi_helper = lioadm
verbose = True
[keystone_authtoken]
auth_uri = http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_tenant_name = service
admin_user = cinder
admin_password = P@ssw0rd
啓動服務
# systemctlenable openstack-cinder-volume.service target.service
# systemctl startopenstack-cinder-volume.service target.service
驗證操做
在controller端操做
# source admin-openrc.sh
# cinder service-list
建立一個 1 GB 的卷
# source demo-openrc.sh
# cinder create --display-name demo-volume1 1
建立數據庫
# keystone user-create --name swift --pass P@ssw0rd
# keystone user-role-add --user swift --tenant service --role admin
# keystone service-create --name swift --type object-store--description "OpenStack Object Storage"
# keystone endpoint-create --service-id $(keystone service-list | awk'/ object-store / {print $2}') --publicurl 'http://controller:8080/v1/AUTH_%(tenant_id)s' --internalurl'http://controller:8080/v1/AUTH_%(tenant_id)s'--adminurl http://controller:8080 --region regionOne
在controller節點配置
# yum install openstack-swift-proxy python-swiftclient python-keystone-auth-token python-keystonemiddlewarememcached
將配置文件proxy-server.conf-sample拷貝至/etc/swift
# vim /etc/swift/proxy-server.conf
[DEFAULT]
bind_port = 8080
user = swift
swift_dir = /etc/swift
[pipeline:main]
pipeline =authtoken cache healthcheck keystoneauth proxy-logging proxy-server
[app:proxy-server]
allow_account_management= true
account_autocreate= true
[filter:keystoneauth]
use =egg:swift#keystoneauth
operator_roles= admin,_member_
[filter:authtoken]
paste.filter_factory= keystonemiddleware.auth_token:filter_factory
auth_uri =http://controller:5000/v2.0
identity_uri= http://controller:35357
admin_tenant_name= service
admin_user= swift
admin_password= P@ssw0rd
delay_auth_decision= true
[filter:cache]
memcache_servers= 127.0.0.1:11211
# yuminstall xfsprogs rsync
#mkfs.xfs /dev/sda5
# vi /etc/fstab
/dev/sda5 /srv/node/sda5 xfsnoatime,nodiratime,nobarrier,logbufs=8 0 2
# mount/srv/node/sda5
# vim /etc/rsyncd.conf
uid = swift
gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = 10.0.0.2
[account]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/account.lock
[container]
max connections = 2
path = /srv/node/
read only = false
lock file =/var/lock/container.lock
[object]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/object.lock
#systemctl enable rsyncd.service
#systemctl start rsyncd.service
# yuminstall openstack-swift-account openstack-swift-containeropenstack-swift-object
# vim/etc/swift/account-server.conf
[DEFAULT]
bind_ip = 10.0.0.2[存儲節點的管理地址]
bind_port = 6002
user = swift
swift_dir = /etc/swift
devices = /srv/node
[pipeline:main]
pipeline = account-server
[官方手冊是pipeline = healthcheck reconaccount-server,可是是錯誤的]
[filter:recon]
recon_cache_path =/var/cache/swift
# vim /etc/swift/container-server.conf
[DEFAULT]
bind_ip = 10.0.0.2[存儲節點的管理地址]
bind_port = 6001
user = swift
swift_dir = /etc/swift
devices = /srv/node
[pipeline:main]
pipeline = account-server
[官方手冊是pipeline = healthcheck reconaccount-server,可是是錯誤的]
[filter:recon]
recon_cache_path =/var/cache/swift
# vim /etc/swift/object-server.conf
[DEFAULT]
bind_ip = 10.0.0.2[存儲節點的管理地址]
bind_port = 6000
user = swift
swift_dir = /etc/swift
devices = /srv/node
[pipeline:main]
pipeline = account-server
[官方手冊是pipeline = healthcheck reconaccount-server,可是是錯誤的]
[filter:recon]
recon_cache_path =/var/cache/swift
# chown-R swift:swift /srv/node
# mkdir-p /var/cache/swift
# chown-R swift:swift /var/cache/swift
賬戶 ring
# cd /etc/swift
# swift-ring-builder account.builder create 10 3 1
# swift-ring-builder account.builder addr1z1-10.0.0.2:6002/sda5 100
# swift-ring-builder account.builder
# swift-ring-builder account.builder rebalance
容器 ring
# cd/etc/swift
# swift-ring-builder container.builder create 10 31
# swift-ring-builder container.builder addr1z1-10.0.0.2:6001/sda5 100
# swift-ring-builder container.builder
# swift-ring-builder container.builder rebalance
對象環
# cd /etc/swift
# swift-ring-builder object.builder create 10 3 1
# swift-ring-builder object.builder addr1z1-10.0.0.2:6000/sda5 100
# swift-ring-builder object.builder
# swift-ring-builder object.builder rebalance
分發環配置文件
在控制節點上
vim/etc/swift.conf
[swift-hash]
swift_hash_path_suffix= P@ssw0rd
swift_hash_path_prefix= P@ssw0rd
[storage-policy:0]
name =Policy-0
default= yes
將switf.conf拷貝至每一個存儲節點和其餘運行了代理服務的額外節點
在控制節點和其餘運行了代理服務的節點上
# systemctl enable openstack-swift-proxy.servicememcached.service
# systemctl start openstack-swift-proxy.service memcached.service
在存儲節點上,啓動對象存儲服務,並將其設置爲隨系統啓動
# systemctl enable openstack-swift-account.serviceopenstack-swift-account-auditor.service openstack-swift-account-reaper.serviceopenstack-swift-account-replicator.service
# systemctl start openstack-swift-account.serviceopenstack-swift-account-auditor.service openstack-swift-account-reaper.serviceopenstack-swift-account-replicator.service
# systemctl enable openstack-swift-container.serviceopenstack-swift-container-auditor.service openstack-swift-container-replicator.serviceopenstack-swift-container-updater.service
# systemctl start openstack-swift-container.serviceopenstack-swift-container-auditor.serviceopenstack-swift-container-replicator.serviceopenstack-swift-container-updater.service
# systemctl enable openstack-swift-object.serviceopenstack-swift-object-auditor.serviceopenstack-swift-object-replicator.serviceopenstack-swift-object-updater.service
# systemctl startopenstack-swift-object.service openstack-swift-object-auditor.service openstack-swift-object-replicator.serviceopenstack-swift-object-updater.service
# sourcedemo-openrc.sh
# swiftstat