硬件需求html
操做系統:node
Centos7
內核版本:python
[root@controller ~]# uname -m x86_64 [root@controller ~]# uname -r 3.10.0-693.21.1.el7.x86_64
節點間以及網卡配置mysql
controller節點linux
[root@controller ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens6f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether ac:85:3d:bd:73:a0 brd ff:ff:ff:ff:ff:ff inet 10.71.11.12/24 brd 10.71.11.255 scope global ens6f0 valid_lft forever preferred_lft forever inet6 fe80::ffc8:8166:c284:eaa3/64 scope link valid_lft forever preferred_lft forever 3: ens6f1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether ac:85:3d:bd:73:a1 brd ff:ff:ff:ff:ff:ff 4: ens6f2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether ac:85:3d:bd:73:a2 brd ff:ff:ff:ff:ff:ff 5: ens6f3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether ac:85:3d:bd:73:a3 brd ff:ff:ff:ff:ff:ff 6: bond0: <BROADCAST,MULTICAST,MASTER> mtu 1500 qdisc noop state DOWN qlen 1000 link/ether 6e:28:d0:af:fe:b3 brd ff:ff:ff:ff:ff:ff
compute節點web
[root@compute ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens6f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:b4:0f:e0 brd ff:ff:ff:ff:ff:ff inet 10.71.11.13/24 brd 10.71.11.255 scope global ens6f0 valid_lft forever preferred_lft forever inet6 fe80::4e66:a096:a692:765d/64 scope link valid_lft forever preferred_lft forever 3: ens6f1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:b4:0f:e1 brd ff:ff:ff:ff:ff:ff 4: ens6f2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:b4:0f:e2 brd ff:ff:ff:ff:ff:ff 5: ens6f3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:b4:0f:e3 brd ff:ff:ff:ff:ff:ff 6: bond0: <BROADCAST,MULTICAST,MASTER> mtu 1500 qdisc noop state DOWN qlen 1000 link/ether 0e:1d:f6:3d:f3:40 brd ff:ff:ff:ff:ff:ff
存儲Cinder節點sql
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens6f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:af:ae:29 brd ff:ff:ff:ff:ff:ff inet 10.71.11.14/24 brd 10.71.11.255 scope global ens6f0 valid_lft forever preferred_lft forever inet6 fe80::b358:ad47:b704:c86/64 scope link valid_lft forever preferred_lft forever 3: ens6f1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:af:ae:2a brd ff:ff:ff:ff:ff:ff 4: ens6f2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:af:ae:2b brd ff:ff:ff:ff:ff:ff 5: ens6f3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:af:ae:2c brd ff:ff:ff:ff:ff:ff 6: bond0: <BROADCAST,MULTICAST,MASTER> mtu 1500 qdisc noop state DOWN qlen 1000 link/ether 86:2e:1d:fc:9c:c7 brd ff:ff:ff:ff:ff:ff
說明:這次部署搭建採用三臺物理節點手搭建社區openstack Queens環境shell
OpenStack項目是一個開源雲計算平臺,支持全部類型的雲環境。該項目旨在實現簡單,大規模的可擴展性和豐富的功能。數據庫
OpenStack經過各類補充服務提供基礎架構即服務(IaaS)解決方案。每項服務都提供了一個應用程序編程接口(API),以促進這種集成。apache
本文涵蓋了使用適用於具備足夠Linux經驗的OpenStack新用戶的功能性示例體系結構,逐步部署主要OpenStack服務。只用於學習OpenStack最小化環境。
1.概念性架構
下圖顯示了OpenStack服務之間的關係:
2.邏輯體系結構
下圖顯示了OpenStack雲中最多見但不是惟一可能的體系結構:
對於設計,部署和配置OpenStack,學習者必須瞭解邏輯體系結構。
如概念架構所示,OpenStack由幾個獨立的部分組成,稱爲OpenStack服務。全部服務都經過keystone服務進行身份驗證。
各個服務經過公共API相互交互,除非須要特權管理員命令。
在內部,OpenStack服務由多個進程組成。全部服務都至少有一個API進程,它監聽API請求,預處理它們並將它們傳遞給服務的其餘部分。除身份服務外,實際工做由不一樣的流程完成。
對於一個服務的進程之間的通訊,使用AMQP消息代理。該服務的狀態存儲在數據庫中。部署和配置OpenStack雲時,您能夠選擇多種消息代理和數據庫解決方案,例如RabbitMQ,MySQL,MariaDB和SQLite。
用戶能夠經過Horizon Dashboard實現的基於Web的用戶界面,經過命令行客戶端以及經過瀏覽器插件或curl等工具發佈API請求來訪問OpenStack。對於應用程序,有幾個SDK可用。最終,全部這些訪問方法都會對各類OpenStack服務發出REST API調用。
1.配置節點網卡IP(略)
2.設置主機名
hostnamectl set-hostname 主機名 bash ##使設置當即生效
3.配置域名解析,編輯編輯/etc/hosts文件,加入以下配置
10.71.11.12 controller 10.71.11.13 compute 10.71.11.14 cinder
4.驗證網絡連通性
在控制節點執行
root@controller ~]# ping -c 4 openstack.org PING openstack.org (162.242.140.107) 56(84) bytes of data. 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=1 ttl=46 time=248 ms 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=2 ttl=46 time=248 ms 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=3 ttl=46 time=248 ms 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=4 ttl=46 time=248 ms [root@controller ~]# ping -c 4 compute PING compute (10.71.11.13) 56(84) bytes of data. 64 bytes from compute (10.71.11.13): icmp_seq=1 ttl=64 time=0.395 ms 64 bytes from compute (10.71.11.13): icmp_seq=2 ttl=64 time=0.214 ms
在計算節點執行
[root@compute ~]# ping -c 4 openstack.org PING openstack.org (162.242.140.107) 56(84) bytes of data. 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=1 ttl=46 time=249 ms 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=2 ttl=46 time=248 ms [root@compute ~]# ping -c 4 controller PING controller (10.71.11.12) 56(84) bytes of data. 64 bytes from controller (10.71.11.12): icmp_seq=1 ttl=64 time=0.237 ms
5.配置阿里yum源
備份
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
下載
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
或者
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
6.安裝NTP時鐘服務(全部節點)
##controller節點##
安裝軟件包
yum install chrony -y
編輯/etc/chrony.conf文件,配置時鐘源同步服務端
server controlelr iburst ##全部節點向controller節點同步時間 allow 10.71.11.0/24 ##設置時間同步網段
設置NTP服務開機啓動
systemctl enable chronyd.service systemctl start chronyd.service
其餘節點
安裝軟件包
yum install chrony -y
配置全部節點指向controller同步時間
vi /etc/chrony.conf server controlelr iburst
重啓NTP服(略)
驗證時鐘同步服務
在controller節點執行
[root@controller ~]# chronyc sources 210 Number of sources = 4 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^* time4.aliyun.com 2 10 377 1015 +115us[ +142us] +/- 14ms ^- ntp8.flashdance.cx 2 10 347 428 +27ms[ +27ms] +/- 259ms ^- 85.199.214.101 1 10 377 988 +38ms[ +38ms] +/- 202ms ^- ntp7.flashdance.cx 2 10 367 836 +35ms[ +35ms] +/- 247ms MS列中的內容應該指明* NTP服務當前同步的服務器。
在其餘節點執行
[root@compute ~]# chronyc sources 210 Number of sources = 4 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^* leontp.ccgs.wa.edu.au 1 10 377 752 +49ms[ +49ms] +/- 121ms ^+ ntp5.flashdance.cx 2 10 373 1155 +15ms[ +16ms] +/- 258ms ^+ 85.199.214.101 1 10 377 46m -22ms[ -21ms] +/- 164ms ^+ ntp8.flashdance.cx 2 10 333 900 -6333us[-5976us] +/- 257ms [root@cinder ~]# chronyc sources 210 Number of sources = 4 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^+ 61-216-153-104.HINET-IP.> 3 10 377 748 -3373us[-3621us] +/- 87ms ^- 85.199.214.100 1 10 377 876 +37ms[ +36ms] +/- 191ms ^* 61-216-153-106.HINET-IP.> 3 10 377 869 +774us[ +527us] +/- 95ms ^- makaki.miuku.net 2 10 377 384 +30ms[ +30ms] +/- 254ms
注意:平常運維中常常碰見時鐘飄逸問題,致使集羣服務腦裂
說明:無特殊說明,如下操做在全部節點上執行
1.下載安裝openstack軟件倉庫(queens版本)
yum install centos-release-openstack-queens -y
2.更新全部節點軟件包
yum upgrade
3.安裝openstack client端
yum install python-openstackclient -y
4.安裝openstack-selinux
yum install openstack-selinux -y
大多數OpenStack服務使用SQL數據庫來存儲信息,數據庫一般在控制器節點上運行。 本文主要使用MariaDB或MySQL。
安裝軟件包
yum install mariadb mariadb-server python2-PyMySQL -y
編輯/etc/my.cnf.d/mariadb-server.cnf並完成如下操做
[root@controller ~]# vi /etc/my.cnf.d/mariadb-server.cnf # # These groups are read by MariaDB server. # Use it for options that only the server (but not clients) should see # # See the examples of server my.cnf files in /usr/share/mysql/ # # this is read by the standalone daemon and embedded servers [server] # this is only for the mysqld standalone daemon # Settings user and group are ignored when systemd is used. # If you need to run mysqld under a different user or group, # customize your systemd unit file for mysqld/mariadb according to the # instructions in http://fedoraproject.org/wiki/Systemd [mysqld] datadir=/var/lib/mysql socket=/var/lib/mysql/mysql.sock log-error=/var/log/mariadb/mariadb.log pid-file=/var/run/mariadb/mariadb.pid bind-address = 10.71.11.12 default-storage-engine = innodb innodb_file_per_table = on max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8
說明:bind-address使用controller節點的管理IP
設置服務開機啓動
systemctl enable mariadb.service systemctl start mariadb.service
經過運行mysql_secure_installation腳原本保護數據庫服務。
[root@controller ~]# mysql_secure_installation NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB SERVERS IN PRODUCTION USE! PLEASE READ EACH STEP CAREFULLY! In order to log into MariaDB to secure it, we'll need the current password for the root user. If you've just installed MariaDB, and you haven't set the root password yet, the password will be blank, so you should just press enter here. Enter current password for root (enter for none): OK, successfully used password, moving on... Setting the root password ensures that nobody can log into the MariaDB root user without the proper authorisation. Set root password? [Y/n] New password: Re-enter new password: Password updated successfully! Reloading privilege tables.. ... Success! By default, a MariaDB installation has an anonymous user, allowing anyone to log into MariaDB without having to have a user account created for them. This is intended only for testing, and to make the installation go a bit smoother. You should remove them before moving into a production environment. Remove anonymous users? [Y/n] ... Success! Normally, root should only be allowed to connect from 'localhost'. This ensures that someone cannot guess at the root password from the network. Disallow root login remotely? [Y/n] ... Success! By default, MariaDB comes with a database named 'test' that anyone can access. This is also intended only for testing, and should be removed before moving into a production environment. Remove test database and access to it? [Y/n] - Dropping test database... ... Success! - Removing privileges on test database... ... Success! Reloading the privilege tables will ensure that all changes made so far will take effect immediately. Reload privilege tables now? [Y/n] ... Success! Cleaning up... All done! If you've completed all of the above steps, your MariaDB installation should now be secure. Thanks for using MariaDB!
1.安裝配置消息隊列組件
yum install rabbitmq-server -y
2.設置服務開機啓動
systemctl enable rabbitmq-server.service;systemctl start rabbitmq-server.service
3.添加openstack 用戶
rabbitmqctl add_user openstack 123456
4.openstack用戶的權限配置
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
說明:服務的身份認證服務使用Memcached緩存令牌。 memcached服務一般在控制器節點上運行。 對於生產部署,咱們建議啓用防火牆,身份驗證和加密的組合來保護它。
1.安裝配置組件
yum install memcached python-memcached -y
2.編輯/etc/sysconfig/memcached
vi /etc/sysconfig/memcached OPTIONS="-l 10.71.11.12,::1,controller"
3.設置服務開機啓動
systemctl enable memcached.service;systemctl start memcached.service
1.安裝服務
yum install etcd -y
2.編輯/etc/etcd/etcd.conf文件
vi /etc/etcd/etcd.conf ETCD_INITIAL_CLUSTER ETCD_INITIAL_ADVERTISE_PEER_URLS ETCD_ADVERTISE_CLIENT_URLS ETCD_LISTEN_CLIENT_URLS #[Member] ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="http://10.71.11.12:2380" ETCD_LISTEN_CLIENT_URLS="http://10.71.11.12:2379" ETCD_NAME="controller" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.71.11.12:2380" ETCD_ADVERTISE_CLIENT_URLS="http://10.71.11.12:2379" ETCD_INITIAL_CLUSTER="controller=http://10.71.11.12:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01" ETCD_INITIAL_CLUSTER_STATE="new"
3.設置服務開機啓動
systemctl enable etcd;systemctl start etcd
1.建立keystone數據庫並受權
mysql -u root -p CREATE DATABASE keystone; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123456';
2.安裝、配置組件
yum install openstack-keystone httpd mod_wsgi -y
3.編輯 /etc/keystone/keystone.conf
[database] connection = mysql+pymysql://keystone:123456@controller/keystone [token] provider = fernet
4.同步keystone數據庫
su -s /bin/sh -c "keystone-manage db_sync" keystone
5.數據庫初始化
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
6.引導身份認證服務
keystone-manage bootstrap --bootstrap-password 123456 --bootstrap-admin-url http://controller:35357/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne
1.編輯/etc/httpd/conf/httpd.conf,配置ServerName參數
ServerName controller
2.建立 /usr/share/keystone/wsgi-keystone.conf連接文件
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
3.設置服務開機啓動
systemctl enable httpd.service;systemctl start httpd.service
啓動服務報錯
[root@controller ~]# systemctl start httpd.service Job for httpd.service failed because the control process exited with error code. See "systemctl status httpd.service" and "journalctl -xe" for details. [root@controller ~]# journalctl -xe Apr 01 02:31:03 controller systemd[1]: [/usr/lib/systemd/system/memcached.service:62] Unknown lvalue 'ProtectControlGroups' in section 'Service' Apr 01 02:31:03 controller systemd[1]: [/usr/lib/systemd/system/memcached.service:65] Unknown lvalue 'RestrictRealtime' in section 'Service' Apr 01 02:31:03 controller systemd[1]: [/usr/lib/systemd/system/memcached.service:72] Unknown lvalue 'RestrictNamespaces' in section 'Service' Apr 01 02:31:03 controller polkitd[928]: Unregistered Authentication Agent for unix-process:18932:9281785 (system bus name :1.157, object path /org/freedeskt Apr 01 02:31:09 controller polkitd[928]: Registered Authentication Agent for unix-process:18952:9282349 (system bus name :1.158 [/usr/bin/pkttyagent --notify Apr 01 02:31:09 controller systemd[1]: Starting The Apache HTTP Server... -- Subject: Unit httpd.service has begun start-up -- Defined-By: systemd -- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel -- -- Unit httpd.service has begun starting up. Apr 01 02:31:09 controller httpd[18958]: (13)Permission denied: AH00072: make_sock: could not bind to address [::]:5000 Apr 01 02:31:09 controller httpd[18958]: (13)Permission denied: AH00072: make_sock: could not bind to address 0.0.0.0:5000 Apr 01 02:31:09 controller httpd[18958]: no listening sockets available, shutting down Apr 01 02:31:09 controller httpd[18958]: AH00015: Unable to open logs Apr 01 02:31:09 controller systemd[1]: httpd.service: main process exited, code=exited, status=1/FAILURE Apr 01 02:31:09 controller kill[18960]: kill: cannot find process "" Apr 01 02:31:09 controller systemd[1]: httpd.service: control process exited, code=exited status=1 Apr 01 02:31:09 controller systemd[1]: Failed to start The Apache HTTP Server. -- Subject: Unit httpd.service has failed -- Defined-By: systemd -- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel -- -- Unit httpd.service has failed. -- -- The result is failed. Apr 01 02:31:09 controller systemd[1]: Unit httpd.service entered failed state. Apr 01 02:31:09 controller systemd[1]: httpd.service failed. Apr 01 02:31:09 controller polkitd[928]: Unregistered Authentication Agent for unix-process:18952:9282349 (system bus name :1.158, object path /org/freedeskt
通過判斷,是SELinux引起的問題
解決辦法:關閉防火牆
[root@controller ~]# vi /etc/selinux/config # This file controls the state of SELinux on the system. # SELINUX= can take one of these three values: # enforcing - SELinux security policy is enforced. # permissive - SELinux prints warnings instead of enforcing. # disabled - No SELinux policy is loaded. SELINUX=disabled # SELINUXTYPE= can take one of three two values: # targeted - Targeted processes are protected, # minimum - Modification of targeted policy. Only selected processes are protected. # mls - Multi Level Security protection. SELINUXTYPE=targeted
再次重啓服務報錯解決
[root@controller ~]# systemctl enable httpd.service;systemctl start httpd.service
4.配置administrative帳號
export OS_USERNAME=admin export OS_PASSWORD=123456 export OS_PROJECT_NAME=admin export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_DOMAIN_NAME=Default export OS_AUTH_URL=http://controller:35357/v3 export OS_IDENTITY_API_VERSION=3
1.建立域
openstack domain create --description "Domain" example [root@controller ~]# openstack domain create --description "Domain" example +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Domain | | enabled | True | | id | 199658b1d0234c3cb8785c944aa05780 | | name | example | | tags | [] | +-------------+----------------------------------+
openstack project create --domain default --description "Service Project" service [root@controller ~]# openstack project create --domain default --description "Service Project" service +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Service Project | | domain_id | default | | enabled | True | | id | 03e700ff43e44b29b97365bac6c7d723 | | is_domain | False | | name | service | | parent_id | default | | tags | [] | +-------------+----------------------------------+
3.建立平臺demo項目
openstack project create --domain default --description "Demo Project" demo [root@controller ~]# openstack project create --domain default --description "Demo Project" demo +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Demo Project | | domain_id | default | | enabled | True | | id | 61f8c9005ca84477b5bdbf485be1a546 | | is_domain | False | | name | demo | | parent_id | default | | tags | [] | +-------------+----------------------------------+
4.建立demo用戶
openstack user create --domain default --password-prompt demo [root@controller ~]# openstack user create --domain default --password-prompt demo User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | fa794c034a53472c827a94e6a6ad12c1 | | name | demo | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+
5.建立用戶角色
openstack role create user [root@controller ~]# openstack role create user +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | None | | id | 15ea413279a74770b79630b75932a596 | | name | user | +-----------+----------------------------------+
6.添加用戶角色到demo項目和用戶
openstack role add --project demo --user demo user
說明:此條命令執行成功後不返回參數
1.取消環境變量
unset OS_AUTH_URL OS_PASSWORD
2.admin用戶返回的認證token
[root@controller ~]# unset OS_AUTH_URL OS_PASSWORD [root@controller ~]# openstack --os-auth-url http://controller:35357/v3 \ > --os-project-domain-name Default --os-user-domain-name Default \ > --os-project-name admin --os-username admin token issue Password: +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | expires | 2018-04-01T07:45:18+0000 | | id | gAAAAABawH_-ke3POs9LLzpEEH3Wziuk6VlQmNZCtxlDovLaSmg_-dOOUSDWsF-gw9we4QvcHzdO5Ahc3eEdDl6sIztZ60QQTG3x5Kbt_75EbWCZsBa2HkybZ-nJYuN4o3tQugse2BDcs8HF7bT1pAtoW0UM29RQNlCMdvx9jfcIT4EBit1SMKM | | project_id | 4205b649750d4ea68ff5bea73de0faae | | user_id | 475b31138acc4cc5bb42ca64af418963 | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
3.demo用戶返回的認證token
[root@controller ~]# openstack --os-auth-url http://controller:5000/v3 \ > --os-project-domain-name Default --os-user-domain-name Default \ > --os-project-name demo --os-username demo token issue Password: +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | expires | 2018-04-01T07:45:58+0000 | | id | gAAAAABawIAmwGuiyDMjhqTmkwgDi0hKyj55WCDaMdPvyr4H8ZJbBNt7cUTtQ2AEHdP8Z_PRB4RI0uiJIvtOoMI0DUmMrKsmZU5G95tKY4y-kXPvvqdd8_JdUvQN4MgCStb-ZZ3OpNwN6500C891M8DTA6W1pWR8julBNaFrEQdlllhreOfdLc4 | | project_id | 61f8c9005ca84477b5bdbf485be1a546 | | user_id | fa794c034a53472c827a94e6a6ad12c1 | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
1.建立admin-openrc腳本
export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=123456 export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2
2.建立demo-openrc腳本
export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=demo export OS_USERNAME=demo export OS_PASSWORD=123456 export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2
3.使用腳本,返回認證token
[root@controller ~]# openstack token issue +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | expires | 2018-04-01T08:17:29+0000 | | id | gAAAAABawIeJ0z-3R2ltY6ublCGqZX80AIi4tQUxqEpw0xvPsFP9BLV8ALNsB2B7bsVivGB14KvhUncdoRl_G2ng5BtzVKAfzHyB-OxwiXeqAttkpQsuLCDKRHd3l-K6wRdaDqfNm-D1QjhtFoxHOTotOcjtujBHF12uP49TjJtl1Rrd6uVDk0g | | project_id | 4205b649750d4ea68ff5bea73de0faae | | user_id | 475b31138acc4cc5bb42ca64af418963 | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
1.建立glance數據庫,並受權
mysql -u root -p CREATE DATABASE glance; GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '123456';
2.獲取admin用戶的環境變量,並建立服務認證
. admin-openrc
建立glance用戶
[root@controller ~]# openstack user create --domain default --password-prompt glance User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | dd2363d365624c998dfd788b13e1282b | | name | glance | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+
把admin用戶添加到glance用戶和項目中
openstack role add --project service --user glance admin
說明:此條命令執行不返回不返回
建立glance服務
[root@controller ~]# openstack service create --name glance --description "OpenStack Image" image +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Image | | enabled | True | | id | 5927e22c745449869ff75b193ed7d7c6 | | name | glance | | type | image | +-------------+----------------------------------+
3.建立鏡像服務API端點
[root@controller ~]# openstack endpoint create --region RegionOne image public http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 0822449bf80f4f6897be5e3240b6bfcc | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 5927e22c745449869ff75b193ed7d7c6 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne image internal http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | f18ae583441b4d118526571cdc204d8a | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 5927e22c745449869ff75b193ed7d7c6 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne image admin http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 79eadf7829274b1b9beb2bfb6be91992 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 5927e22c745449869ff75b193ed7d7c6 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+
1.安裝軟件包
yum install openstack-glance -y
2.編輯/etc/glance/glance-api.conf文件
[database] connection = mysql+pymysql://glance:123456@controller/glance [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = glance password = 123456 [paste_deploy] flavor = keystone [glance_store] stores = file,http default_store = file filesystem_store_datadir = /var/lib/glance/images/
3.編輯/etc/glance/glance-registry.conf
[database] connection = mysql+pymysql://glance:123456@controller/glance [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = glance password = 123456 [paste_deploy] flavor = keystone
4.同步鏡像服務數據庫
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable openstack-glance-api.service openstack-glance-registry.service systemctl start openstack-glance-api.service openstack-glance-registry.service
使用CirrOS驗證Image服務的操做,這是一個小型Linux映像,可幫助您測試OpenStack部署。
有關如何下載和構建映像的更多信息,請參閱OpenStack虛擬機映像指南https://docs.openstack.org/image-guide/
有關如何管理映像的信息,請參閱OpenStack最終用戶指南https://docs.openstack.org/queens/user/
1.獲取admin用戶的環境變量,且下載鏡像
. admin-openrc wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
2.上傳鏡像
使用QCOW2磁盤格式,裸容器格式和公開可見性將圖像上傳到Image服務,以便全部項目均可以訪問它:
[root@controller ~]# openstack image create "cirros" --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public +------------------+------------------------------------------------------+ | Field | Value | +------------------+------------------------------------------------------+ | checksum | f8ab98ff5e73ebab884d80c9dc9c7290 | | container_format | bare | | created_at | 2018-04-01T08:00:05Z | | disk_format | qcow2 | | file | /v2/images/916faa2b-e292-46e0-bfe4-0f535069a1a0/file | | id | 916faa2b-e292-46e0-bfe4-0f535069a1a0 | | min_disk | 0 | | min_ram | 0 | | name | cirros | | owner | 4205b649750d4ea68ff5bea73de0faae | | protected | False | | schema | /v2/schemas/image | | size | 13267968 | | status | active | | tags | | | updated_at | 2018-04-01T08:00:06Z | | virtual_size | None | | visibility | public | +------------------+------------------------------------------------------+
3.查看上傳的鏡像
[root@controller ~]# openstack image list +--------------------------------------+--------+--------+ | ID | Name | Status | +--------------------------------------+--------+--------+ | 916faa2b-e292-46e0-bfe4-0f535069a1a0 | cirros | active | +--------------------------------------+--------+--------+
說明:glance具體配置選項:https://docs.openstack.org/glance/queens/configuration/index.html
1.建立nova_api, nova, nova_cell0數據庫
mysql -u root -p CREATE DATABASE nova_api; CREATE DATABASE nova; CREATE DATABASE nova_cell0;
數據庫登陸受權
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '123456';
2.建立nova用戶
[root@controller ~]# . admin-openrc [root@controller ~]# openstack user create --domain default --password-prompt nova User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 8e72103f5cc645669870a630ffb25065 | | name | nova | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+
3.添加admin用戶爲nova用戶
openstack role add --project service --user nova admin
4.建立nova服務端點
[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Compute | | enabled | True | | id | 9f8f8d8cb8e542b09694bee6016cc67c | | name | nova | | type | compute | +-------------+----------------------------------+
5.建立compute API 服務端點
[root@controller ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | cf260d5a56344c728840e2696f44f9bc | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 9f8f8d8cb8e542b09694bee6016cc67c | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+----------------------------------+ [root@controller ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | f308f29a78e04b888c7418e78c3d6a6d | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 9f8f8d8cb8e542b09694bee6016cc67c | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+----------------------------------+ [root@controller ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 022d96fa78de4b73b6212c09f13d05be | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 9f8f8d8cb8e542b09694bee6016cc67c | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+----------------------------------+
建立一個placement服務用戶
[root@controller ~]# openstack user create --domain default --password-prompt placement User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | fa239565fef14492ba18a649deaa6f3c | | name | placement | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+
6.添加placement用戶爲項目服務admin角色
openstack role add --project service --user placement admin
7.建立在服務目錄建立Placement API服務
[root@controller ~]# openstack service create --name placement --description "Placement API" placement +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Placement API | | enabled | True | | id | 32bb1968c08747ccb14f6e4a20cd509e | | name | placement | | type | placement | +-------------+----------------------------------+
8.建立Placement API服務端點
[root@controller ~]# openstack endpoint create --region RegionOne placement public http://controller:8778 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | b856962188484f4ba6fad500b26b00ee | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 32bb1968c08747ccb14f6e4a20cd509e | | service_name | placement | | service_type | placement | | url | http://controller:8778 | +--------------+----------------------------------+ [root@controller ~]# openstack endpoint create --region RegionOne placement internal http://controller:8778 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 62e5a3d82a994f048a8bb8ddd1adc959 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 32bb1968c08747ccb14f6e4a20cd509e | | service_name | placement | | service_type | placement | | url | http://controller:8778 | +--------------+----------------------------------+ [root@controller ~]# openstack endpoint create --region RegionOne placement admin http://controller:8778 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | f12f81ff7b72416aa5d035b8b8cc2605 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 32bb1968c08747ccb14f6e4a20cd509e | | service_name | placement | | service_type | placement | | url | http://controller:8778 | +--------------+----------------------------------+
1.安裝軟件包
yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api
2.編輯 /etc/nova/nova.conf
[DEFAULT] enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:123456@controller my_ip = 10.71.11.12 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver [api_database] connection = mysql+pymysql://nova:123456@controller/nova_api [database] connection = mysql+pymysql://nova:123456@controller/nova [api] auth_strategy = keystone [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = nova password = 123456 [vnc] enabled = true server_listen = $my_ip server_proxyclient_address = $my_ip [glance] api_servers = http://controller:9292 [oslo_concurrency] lock_path = /var/lib/nova/tmp [placement] os_region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:35357/v3 username = placement password = 123456
3.因爲軟件包的一個bug,須要在/etc/httpd/conf.d/00-nova-placement-api.conf文件中添加以下配置
<Directory /usr/bin> <IfVersion >= 2.4> Require all granted </IfVersion> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> </Directory>
4.從新http服務
systemctl restart httpd
5.同步nova-api數據庫
su -s /bin/sh -c "nova-manage api_db sync" nova
同步數據庫報錯
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova Traceback (most recent call last): File "/usr/bin/nova-manage", line 10, in <module> sys.exit(main()) File "/usr/lib/python2.7/site-packages/nova/cmd/manage.py", line 1597, in main config.parse_args(sys.argv) File "/usr/lib/python2.7/site-packages/nova/config.py", line 52, in parse_args default_config_files=default_config_files) File "/usr/lib/python2.7/site-packages/oslo_config/cfg.py", line 2502, in __call__ else sys.argv[1:]) File "/usr/lib/python2.7/site-packages/oslo_config/cfg.py", line 3166, in _parse_cli_opts return self._parse_config_files() File "/usr/lib/python2.7/site-packages/oslo_config/cfg.py", line 3183, in _parse_config_files ConfigParser._parse_file(config_file, namespace) File "/usr/lib/python2.7/site-packages/oslo_config/cfg.py", line 1950, in _parse_file raise ConfigFileParseError(pe.filename, str(pe)) oslo_config.cfg.ConfigFileParseError: Failed to parse /etc/nova/nova.conf: at /etc/nova/nova.conf:8, No ':' or '=' found in assignment: '/etc/nova/nova.conf'
根據報錯,把/etc/nova/nova.conf中第八行註釋掉,解決報錯
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning
6.註冊cell0數據庫
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova [root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning
7.建立cell1 cell
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning 6c689e8c-3e13-4e6d-974c-c2e4e22e510b
8.同步nova數據庫
[root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning /usr/lib/python2.7/site-packages/pymysql/cursors.py:165: Warning: (1831, u'Duplicate index `block_device_mapping_instance_uuid_virtual_name_device_name_idx`. This is deprecated and will be disallowed in a future release.') result = self._query(query) /usr/lib/python2.7/site-packages/pymysql/cursors.py:165: Warning: (1831, u'Duplicate index `uniq_instances0uuid`. This is deprecated and will be disallowed in a future release.') result = self._query(query)
9.驗證 nova、 cell0、 cell1數據庫是否註冊正確
[root@controller ~]# nova-manage cell_v2 list_cells /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning +-------+--------------------------------------+------------------------------------+-------------------------------------------------+ | Name | UUID | Transport URL | Database Connection | +-------+--------------------------------------+------------------------------------+-------------------------------------------------+ | cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0 | | cell1 | 6c689e8c-3e13-4e6d-974c-c2e4e22e510b | rabbit://openstack:****@controller | mysql+pymysql://nova:****@controller/nova | +-------+--------------------------------------+------------------------------------+-------------------------------------------------+
10.設置服務爲開機啓動
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
1.安裝軟件包
yum install openstack-nova-compute
2.編輯/etc/nova/nova.conf
[DEFAULT] enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:123456@controller my_ip = 10.71.11.13 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver [api] auth_strategy = keystone [keystone_authtoken] auth_uri = http://10.71.11.12:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = nova password = 123456 [vnc] enabled = True server_listen = 0.0.0.0 server_proxyclient_address = $my_ip novncproxy_base_url = http://controller:6080/vnc_auto.html [glance] api_servers = http://controller:9292 [oslo_concurrency] lock_path = /var/lib/nova/tmp [placement] os_region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:35357/v3 username = placement password = 123456
3.設置服務開機啓動
systemctl enable libvirtd.service openstack-nova-compute.service systemctl start libvirtd.service openstack-nova-compute.service
說明:若是nova-compute服務沒法啓動,請檢查/var/log/nova/nova-compute.log,會出現以下報錯信息
2018-04-01 12:03:43.362 18612 INFO os_vif [-] Loaded VIF plugins: ovs, linux_bridge 2018-04-01 12:03:43.431 18612 WARNING oslo_config.cfg [-] Option "use_neutron" from group "DEFAULT" is deprecated for removal ( nova-network is deprecated, as are any related configuration options. ). Its value may be silently ignored in the future. 2018-04-01 12:03:43.609 18612 INFO nova.virt.driver [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] Loading compute driver 'libvirt.LibvirtDriver' 2018-04-01 12:03:43.825 18612 WARNING oslo_config.cfg [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] Option "firewall_driver" from group "DEFAULT" is deprecated for removal ( nova-network is deprecated, as are any related configuration options. ). Its value may be silently ignored in the future. 2018-04-01 12:03:43.832 18612 WARNING os_brick.initiator.connectors.remotefs [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] Connection details not present. RemoteFsClient may not initialize properly. 2018-04-01 12:03:43.938 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 1 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH 2018-04-01 12:03:45.042 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 2 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH 2018-04-01 12:03:47.140 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 4 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH 2018-04-01 12:03:51.244 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 6 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH 2018-04-01 12:03:57.351 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 8 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH 2018-04-01 12:04:05.458 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 10 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH @ "/var/log/nova/nova-compute.log" 947L, 240212C
控制器:5672上的錯誤消息AMQP服務器沒法訪問可能表示控制器節點上的防火牆阻止了對端口5672的訪問。配置防火牆以在控制器節點上打開端口5672,並在計算節點上從新啓動nova-compute服務。
清除controller的防火牆
[root@controller ~]# iptables -F [root@controller ~]# iptables -X [root@controller ~]# iptables -Z
重啓計算服務成功
4.添加compute節點到cell數據庫(controller)
驗證有幾個計算節點在數據庫中
[root@controller ~]. admin-openrc [root@controller ~]# openstack compute service list --service nova-compute +----+--------------+---------+------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+--------------+---------+------+---------+-------+----------------------------+ | 8 | nova-compute | compute | nova | enabled | up | 2018-04-01T22:24:14.000000 | +----+--------------+---------+------+---------+-------+----------------------------+
5.發現計算節點
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning Found 2 cell mappings. Skipping cell0 since it does not contain hosts. Getting compute nodes from cell 'cell1': 6c689e8c-3e13-4e6d-974c-c2e4e22e510b Found 1 unmapped computes in cell: 6c689e8c-3e13-4e6d-974c-c2e4e22e510b Checking host mapping for compute host 'compute': 32861a0d-894e-4af9-a57c-27662d27e6bd Creating host mapping for compute host 'compute': 32861a0d-894e-4af9-a57c-27662d27e6b
1.列出服務組件
[root@controller ~]#. admin-openrc [root@controller ~]# openstack compute service list +----+------------------+----------------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+------------------+----------------+----------+---------+-------+----------------------------+ | 1 | nova-consoleauth | controller | internal | enabled | up | 2018-04-01T22:25:29.000000 | | 2 | nova-conductor | controller | internal | enabled | up | 2018-04-01T22:25:33.000000 | | 3 | nova-scheduler | controller | internal | enabled | up | 2018-04-01T22:25:30.000000 | | 6 | nova-conductor | ansible-server | internal | enabled | up | 2018-04-01T22:25:55.000000 | | 7 | nova-scheduler | ansible-server | internal | enabled | up | 2018-04-01T22:25:59.000000 | | 8 | nova-compute | compute | nova | enabled | up | 2018-04-01T22:25:34.000000 | | 9 | nova-consoleauth | ansible-server | internal | enabled | up | 2018-04-01T22:25:57.000000 | +----+------------------+----------------+----------+---------+-------+----------------------------+
2.列出身份服務中的API端點以驗證與身份服務的鏈接:
[root@controller ~]# openstack catalog list +-----------+-----------+-----------------------------------------+ | Name | Type | Endpoints | +-----------+-----------+-----------------------------------------+ | placement | placement | RegionOne | | | | internal: http://controller:8778 | | | | RegionOne | | | | public: http://controller:8778 | | | | RegionOne | | | | admin: http://controller:8778 | | | | | | keystone | identity | RegionOne | | | | public: http://controller:5000/v3/ | | | | RegionOne | | | | admin: http://controller:35357/v3/ | | | | RegionOne | | | | internal: http://controller:5000/v3/ | | | | | | glance | image | RegionOne | | | | public: http://controller:9292 | | | | RegionOne | | | | admin: http://controller:9292 | | | | RegionOne | | | | internal: http://controller:9292 | | | | | | nova | compute | RegionOne | | | | admin: http://controller:8774/v2.1 | | | | RegionOne | | | | public: http://controller:8774/v2.1 | | | | RegionOne | | | | internal: http://controller:8774/v2.1 | | | | | +-----------+-----------+-----------------------------------------+
3.列出鏡像
[root@controller ~]# openstack image list
+--------------------------------------+--------+--------+
| ID | Name | Status |
+--------------------------------------+--------+--------+
| 916faa2b-e292-46e0-bfe4-0f535069a1a0 | cirros | active |
+--------------------------------------+--------+--------+
4.檢查cells和placement API是否正常
[root@controller ~]# nova-status upgrade check /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning Option "os_region_name" from group "placement" is deprecated. Use option "region-name" from group "placement". +---------------------------+ | Upgrade Check Results | +---------------------------+ | Check: Cells v2 | | Result: Success | | Details: None | +---------------------------+ | Check: Placement API | | Result: Success | | Details: None | +---------------------------+ | Check: Resource Providers | | Result: Success | | Details: None | +---------------------------+
nova知識點https://docs.openstack.org/nova/queens/admin/index.html
1.建立nuetron數據庫和受權
mysql -u root -p CREATE DATABASE neutron; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '123456';
2.建立服務
. admin-openrc openstack user create --domain default --password-prompt neutron
添加admin角色爲neutron用戶
openstack role add --project service --user neutron admin
建立neutron服務
openstack service create --name neutron --description "OpenStack Networking" network
3.建立網絡服務端點
openstack endpoint create --region RegionOne network public http://controller:9696 openstack endpoint create --region RegionOne network internal http://controller:9696 openstack endpoint create --region RegionOne network admin http://controller:969
1.安裝組件
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
2.配置服務組件,編輯 /etc/neutron/neutron.conf
[database] connection = mysql+pymysql://neutron:123456@controller/neutron [DEFAULT] auth_strategy = keystone core_plugin = ml2 service_plugins = transport_url = rabbit://openstack:123456@controller notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = 123456 [nova] auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = 123456 [oslo_concurrency] lock_path = /var/lib/neutron/tmp
編輯/etc/neutron/plugins/ml2/ml2_conf.ini
[ml2] type_drivers = flat,vlan tenant_network_types = mechanism_drivers = linuxbridge extension_drivers = port_security [ml2_type_flat] flat_networks = provider [securitygroup] enable_ipset = true
編輯 /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge] physical_interface_mappings = provider:ens6f0 [vxlan] enable_vxlan = false [securitygroup] enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
編輯 /etc/neutron/dhcp_agent.ini
[DEFAULT] interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true
編輯 /etc/neutron/metadata_agent.ini
DEFAULT] nova_metadata_host = controller metadata_proxy_shared_secret = 123456
編輯/etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123456
service_metadata_proxy = true
metadata_proxy_shared_secret = 123456
1.建立服務軟鏈接
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
2.同步數據庫
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
3.重啓compute API服務
systemctl restart openstack-nova-api.service
4.配置網絡服務開機啓動
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
1.安裝組件
yum install openstack-neutron-linuxbridge ebtables ipset
2.配置公共組件
編輯/etc/neutron/neutron.conf
[DEFAULT] auth_strategy = keystone transport_url = rabbit://openstack:123456@controller [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = 123456 [oslo_concurrency] lock_path = /var/lib/neutron/tmp
1.配置Linux網橋,編輯 /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge] physical_interface_mappings = provider:ens6f0 [vxlan] enable_vxlan = false [securitygroup] enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
編輯/etc/nova/nova.conf
[neutron] url = http://controller:9696 auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = 123456
1.重啓compute服務
systemctl restart openstack-nova-compute.service
2.設置網橋服務開機啓動
systemctl enable neutron-linuxbridge-agent.service systemctl start neutron-linuxbridge-agent.service
1.安裝軟件包
yum install openstack-dashboard -y
編輯/etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller" ALLOWED_HOSTS = ['*']
配置memcache會話存儲
SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'controller:11211', } }
開啓身份認證API 版本v3
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HO
開啓domains版本支持
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
配置API版本
OPENSTACK_API_VERSIONS = { "identity": 3, "image": 2, "volume": 2, } OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default" : OPENSTACK_NEUTRON_NETWORK = { 'enable_router': False, 'enable_quotas': False, 'enable_distributed_router': False, 'enable_ha_router': False, 'enable_lb': False, 'enable_firewall': False, 'enable_***': False, 'enable_fip_topology_check': False, }
2.完成安裝,重啓web服務和會話存儲
systemctl restart httpd.service memcached.service
在瀏覽器輸入http://10.71.11.12/dashboard.,訪問openstack的web頁面
default admin 123456
本節介紹如何爲Block Storage服務安裝和配置存儲節點。 爲簡單起見,此配置使用空的本地塊存儲設備引用一個存儲節點。
該服務使用LVM驅動程序在該設備上配置邏輯卷,並經過iSCSI傳輸將其提供給實例。 您能夠按照這些說明進行小的修改,以便使用其餘存儲節點水平擴展您的環境。
1.安裝支持的軟件包
安裝LVM
yum install lvm2 device-mapper-persistent-data
設置LVM服務開機啓動
systemctl enable lvm2-lvmetad.service systemctl start lvm2-lvmetad.service
2.建立LVM物理邏輯卷/dev/sdb
[root@cinder ~]# pvcreate /dev/sdb1 Device /dev/sdb not found (or ignored by filtering).
解決方案:
編輯 vim /etc/lvm/lvm.conf,找到global_filter一行,配置以下
global_filter = [ "a|.*/|","a|sdb1|"]
以後再執行pvcreate命令,問題解決。
[root@cinder ~]# pvcreate /dev/sdb1 Physical volume "/dev/sdb1" successfully created.
3.建立cinder-volumes邏輯卷組
[root@cinder ~]# vgcreate cinder-volumes /dev/sdb1 Volume group "cinder-volumes" successfully created
4.安裝和配置組件
安裝軟件包
yum install openstack-cinder targetcli python-keystone -y
編輯/etc/cinder/cinder.conf
[DEFAULT] transport_url = rabbit://openstack:123456@controller auth_strategy = keystone my_ip = 10.71.11.14 enabled_backends = lvm glance_api_servers = http://controller:9292 [database] connection = mysql+pymysql://cinder:123456@controller/cinder [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_id = default user_domain_id = default project_name = service username = cinder password = 123456 在[lvm]部分中,使用LVM驅動程序,cinder-volumes卷組,iSCSI協議和相應的iSCSI服務配置LVM後端。 若是[lvm]部分不存在,請建立它: [lvm] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes iscsi_protocol = iscsi iscsi_helper = lioadm [oslo_concurrency] lock_path = /var/lib/cinder/tmp
設置存儲服務開機啓動
systemctl enable openstack-cinder-volume.service target.service systemctl start openstack-cinder-volume.service target.service
[root@controller ~]# openstack image create --disk-format qcow2 --container-format bare --public --file /root/CentOS-7-x86_64-Minimal-1708.iso CentOS-7-x86_64
. admin-openrc openstack network create --share --external --provider-physical-network provider --provider-network-type flat provider
參數
--share 容許全部項目使用虛擬網絡
--external 定義外接虛擬網絡 若是須要建立外網使用 --internal
--provider-physical-network provider && --provider-network-type flat 鏈接flat 虛擬網絡
openstack subnet create --network provider --allocation-pool start=10.71.11.50,end=10.71.11.60 --dns-nameserver 114.114.114.114 --gateway 10.71.11.254 --subnet-range 10.71.11.0/24 provider
openstack flavor create --id 1 --vcpus 4 --ram 128 --disk 1 m2.nano
. demo-openrc ssh-keygen -q -N "" openstack keypair create --public-key ~/.ssh/id_rsa.pub liukey
openstack security group rule create --proto icmp default
openstack security group rule create --proto tcp --dst-port 22 default
openstack flavor list