openstack--部暑

##1.Centos7環境準備-- openstack pikehtml

 

[https://blog.csdn.net/shiyu1157758655/article/category/7063423]python

 

##1.Centos7環境準備mysql

#Centos 7 x86_64linux

 

#安裝web

yum -y install wget vim ntp net-tools tree opensshsql

 

#更換阿里源數據庫

mv /etc/yum.repos.d/CentOS-Base.repo{,.bak}apache

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repojson

 

yum install centos-release-openstack-pike -y #安裝OpenStack庫

yum clean all && yum makecache #生成緩存

 

yum install python-openstackclient openstack-selinux python2-PyMySQL -y #OpenStack客戶端

yum install openstack-utils -y #openstack工具

 

#關閉selinux、防火牆

systemctl stop firewalld.service

systemctl disable firewalld.service

firewall-cmd --state

sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config

sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config

grep --color=auto '^SELINUX' /etc/selinux/config

setenforce 0

 

#設置hostname

Host=controller.www.local

hostnamectl set-hostname $Host

# hostname $Host

# echo $Host>/etc/hostname

 

#設置網卡(Vlan、bond等),規劃好IP地址
#controller節點須要外網IP(使用其它網段IP地址)做爲VNC代理

#hosts添加 ,controller也要添加

echo "10.2.1.20   controller">>/etc/hosts

echo "10.2.1.21   computer01">>/etc/hosts

echo "10.2.1.22   computer02">>/etc/hosts

 

#查看本機IP

ip add|sed -nr  's#^.*inet (.*)/24.*$#\1#gp'

 

#時間同步

/usr/sbin/ntpdate ntp6.aliyun.com

echo "*/3 * * * * /usr/sbin/ntpdate ntp6.aliyun.com  &> /dev/null" > /tmp/crontab

crontab /tmp/crontab

 

#升級,重啓

yum update -y  && reboot

 

yum install mariadb mariadb-server python2-PyMySQL -y
echo "#
[mysqld]
bind-address = 0.0.0.0
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
#">/etc/my.cnf.d/openstack.cnf
 
#啓動數據庫服務
systemctl enable mariadb.service
systemctl start mariadb.service
netstat -antp|grep mysqld
#mysql_secure_installation #初始化設置密碼,自動交互
#數據庫配置,建立數據庫、用戶受權
#mysql -u root -p 
create database keystone;
create database nova;
create database cinder;
create database neutron;
create database nova_aip;
create database glance;
 
受權:
grant all privileges on keystone.* to 'keystone'@'localhost' identified by 'keystone';
grant all privileges on keystone.* to 'keystone'@'%' identified by 'keystone';
create database glance;
grant all privileges on glance.* to 'glance'@'localhost' identified by 'glance';
grant all privileges on glance.* to 'glance'@'%' identified by 'glance';
 
database nova;
grant all privileges on nova.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova.* to 'nova'@'%' identified by 'nova';
nova_api;
grant all privileges on nova_api.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova_api.* to 'nova'@'%' identified by 'nova';
create database nova_cell0;
grant all privileges on nova_cell0.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova_cell0.* to 'nova'@'%' identified by 'nova';
 
neutron;
grant all privileges on neutron.* to 'neutron'@'localhost' identified by 'neutron';
grant all privileges on neutron.* to 'neutron'@'%' identified by 'neutron';
 
cinder;
grant all privileges on cinder.* to 'cinder'@'localhost' identified by 'cinder';
 grant all privileges on cinder.* to 'cinder'@'%' identified by 'cinder';
 
flush privileges;
select user,host from mysql.user;
show databases
 
驗證:這一步必定一個一個都要試
[root@node01 ~ ]#mysql -u nova –pnova
show databases;
[root@node01 ~ ]#mysql -u glance –pglance
show databases;
[root@node01 ~ ]#mysql -u neutron –pneutron
show databases;
[root@node01 ~ ]#mysql -u cinder –pcinder
show databases;
 
 
 
#RabbitMQ #消息隊列
yum -y install erlang socat
yum install -y rabbitmq-server
#啓動 rabbitmq ,端口5672
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
rabbitmq-plugins enable rabbitmq_management  #啓動web插件端口15672
#添加用戶及密碼
rabbitmqctl  add_user admin admin
rabbitmqctl  set_user_tags admin administrator
rabbitmqctl add_user openstack openstack 
rabbitmqctl set_permissions openstack ".*" ".*" ".*" 
rabbitmqctl  set_user_tags openstack administrator
systemctl restart rabbitmq-server.service
netstat -antp|grep '5672'
 
# rabbitmq-plugins list  #查看支持的插件
# lsof -i:15672
#訪問RabbitMQ,訪問地址是http://ip:15672
#默認用戶名密碼都是guest,瀏覽器添加openstack用戶到組並登錄測試

 

 

保證兩節點能通:

[root@node01 ~ ]#vi /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

nameserver 8.8.8.8

192.168.5.107 node01

192.168.5.106  node02

 

[root@node02 ~ ]#cat /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.5.106 node02

192.168.5.107 node01

 

 

[root@node01 ~ ]#mysql -u keystone -h 192.168.5.107 –pkeystone   驗證是否正常

show databases;

| Database           |

+--------------------+

| information_schema |

| keystone           |

 

 

#SQL上建立數據庫並受權
 
#Keystone安裝
yum install -y openstack-keystone httpd mod_wsgi memcached python-memcached
yum install apr apr-util -y
#memcached啓動和設置
cp /etc/sysconfig/memcached{,.bak}
systemctl enable memcached.service
systemctl start memcached.service
netstat -antp|grep 11211
 
#Keystone 配置
cp /etc/keystone/keystone.conf{,.bak}  #備份默認配置
[root@node01 ~ ]#openssl rand -hex 10   生成隨機密碼
0309f3af5f912cd164be
[root@node01 ~ ]#echo "kestone 0309f3af5f912cd164be" >> ~/openstack.log
[root@node01 ~ ]#cat  /etc/keystone/keystone.conf
[DEFAULT]
admin_token = 0309f3af5f912cd164be
verbose = true
[database]
connection = mysql+pymysql://keystone:keystone@node01/keystone
[token]
provider = fernet
driver = memcache
[memcache]
servers = node01:11211
 
 
#初始化身份認證服務的數據庫
su -s /bin/sh -c "keystone-manage db_sync" keystone
#檢查表是否建立成功
mysql -h node01 -ukeystone -pkeystone -e "use keystone;show tables;"
#初始化密鑰存儲庫
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
#設置admin用戶(管理用戶)和密碼
keystone-manage bootstrap --bootstrap-password admin \
  --bootstrap-admin-url http://192.168.5.107:35357/v3/ \
  --bootstrap-internal-url http://192.168.5.107:5000/v3/ \
  --bootstrap-public-url http://192.168.5.107:5000/v3/ \
  --bootstrap-region-id RegionOne
 
 
openstack endpoint create --region RegionOne \
  identity admin http://192.168.5.107:35357/v3
 
#apache配置
cp /etc/httpd/conf/httpd.conf{,.bak}
修改成 "ServerName nodeo1" >>/etc/httpd/conf/httpd.conf
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
 
#Apache HTTP 啓動並設置開機自啓動
systemctl enable httpd.service
systemctl restart httpd.service
netstat -antp|egrep ':5000|:35357|:80'
# systemctl disable
 
#建立 OpenStack 客戶端環境腳本
#admin環境腳本
echo "
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default 
export OS_PROJECT_NAME=admin 
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://192.168.5.107:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
">./admin-openstack.sh
#測試腳本是否生效
source ./admin-openstack.sh
openstack token issue
 
 
#建立service項目,建立glance,nova,neutron用戶,並受權
openstack project create --domain default --description "Service Project" service
openstack user create --domain default --password=glance glance
openstack role add --project service --user glance admin
openstack user create --domain default --password=nova nova
openstack role add --project service --user nova admin
openstack user create --domain default --password=neutron neutron
openstack role add --project service --user neutron admin
 
 
 
#建立demo項目(普通用戶密碼及角色)
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password=demo demo
openstack role create user
openstack role add --project demo --user demo user
#demo環境腳本
echo "
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
">./demo-openstack.sh
#測試腳本是否生效
source ./demo-openstack.sh
openstack token issue

 

 

#建立Glance數據庫、用戶、認證,前面已設置
 
# keystone上服務註冊 ,建立glance服務實體,API端點(公有、私有、admin)
source ./admin-openstack.sh || { echo "加載前面設置的admin-openstack.sh環境變量腳本";exit; }
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://192.168.5.107:9292
openstack endpoint create --region RegionOne image internal http://192.168.5.107:9292
openstack endpoint create --region RegionOne image admin http://192.168.5.107:9292
 
# Glance 安裝
yum install -y openstack-glance python-glance
#配置
cp /etc/glance/glance-api.conf{,.bak}
cp /etc/glance/glance-registry.conf{,.bak}
# images默認/var/lib/glance/images/
[root@node01 ~ ]#mkdir -p /XLH_DATE/images
chown glance:nobody /XLH_DATE/images  
 
[root@node01 ~ ]#vim /etc/glance/glance-api.conf
 [database]
connection = mysql+pymysql://glance:glance@192.168.5.107/glance
[keystone_authtoken]
auth_uri = http://192.168.5.107:5000/v3
auth_url = http://192.168.5.107:35357/v3
memcached_servers = 192.168.5.107:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /XLH_DATE/images
#">/etc/glance/glance-api.conf
#
echo "#
[database]
connection = mysql+pymysql://glance:glance@192.168.5.107/glance
[keystone_authtoken]
auth_uri = http://192.168.5.107:5000/v3
auth_url = http://192.168.5.107:35357/v3
memcached_servers = 192.168.5.107:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
#">/etc/glance/glance-registry.conf
 
#同步數據庫,檢查數據庫
su -s /bin/sh -c "glance-manage db_sync" glance
mysql -h 192.168.5.107 -u glance -pglance -e "use glance;show tables;"
 
#啓動服務並設置開機自啓動
systemctl enable openstack-glance-api openstack-glance-registry
systemctl start openstack-glance-api openstack-glance-registry
#systemctl restart openstack-glance-api  openstack-glance-registry
netstat -antp|egrep '9292|9191' #檢測服務端口
 
#鏡像測試,下載有時很慢
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img #下載測試鏡像源
#使用qcow2磁盤格式,bare容器格式,上傳鏡像到鏡像服務並設置公共可見
source ./admin-openstack.sh
openstack image create "cirros" \
  --file cirros-0.3.5-x86_64-disk.img \
  --disk-format qcow2 --container-format bare \
  --public
 
#檢查是否上傳成功
openstack image list
#glance image-list
ls $Imgdir
 
#刪除鏡像 glance image-delete 鏡像id

 

 

##5.1 Nova控制節點
# controller 安裝
#
 
#建立Nova數據庫、用戶、認證,前面已設置
source ./admin-openstack.sh || { echo "加載前面設置的admin-openstack.sh環境變量腳本";exit; }
 
# keystone上服務註冊 ,建立nova用戶、服務、API
# nova用戶前面已建
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://192.168.5.107:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://192.168.5.107:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://192.168.5.107:8774/v2.1
#建立placement用戶、服務、API
openstack user create --domain default --password=placement placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://192.168.5.107:8778
openstack endpoint create --region RegionOne placement internal http://192.168.5.107:8778
openstack endpoint create --region RegionOne placement admin http://192.168.5.107:8778
#openstack endpoint delete id?
 
## 安裝nova控制節點
yum install -y openstack-nova-api openstack-nova-conductor \
  openstack-nova-console openstack-nova-novncproxy \
  openstack-nova-scheduler openstack-nova-placement-api
yum install -y openstack-utils
 
 
# #nova控制節點配置

2、配置部分---數據庫鏈接配置

/etc/nova/nova.conf
在[api_database]和[database]部分,配置數據庫的鏈接:

更改結果以下

[root@linux-node1 ~]# grep -n '^[a-Z]' /etc/nova/nova.conf
2161:connection = mysql+pymysql://nova:nova@192.168.5.107/nova_api
3106:connection = mysql+pymysql://nova:nova@192.168.5.107/nova
[root@linux-node1 ~]# 

同步數據到mysql庫,出現警告也不要緊

[root@linux-node1 ~]#  su -s /bin/sh -c "nova-manage api_db sync" nova
[root@linux-node1 ~]# su -s /bin/sh -c "nova-manage db sync" nova


檢查數據庫表的建立狀況

[root@linux-node1 ~]# mysql -h192.168.5.107 -unova -pnova -e "use nova;show tables;"
root@linux-node1 ~]# mysql -h192.168.5.107 -unova -pnova -e "use nova_api;show tables;"

3、 配置部分---keystone配置

在[DEFAULT] 和 [keystone_authtoken] 部分,配置認證服務訪問:

auth_strategy = keystone

 

[keystone_authtoken]

...

auth_uri = http:// 192.168.5.107:5000

auth_url = http:// 192.168.5.107:35357

memcached_servers = 192.168.5.107:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = nova

password = nova



4、配置部分---RabbitMQ配置

修改rabbitmq的配置,nova它們各個組件之間要用到 

在 [DEFAULT] 和 [oslo_messaging_rabbit]部分,配置 RabbitMQ 消息隊列訪問,

[DEFAULT]

...

rpc_backend = rabbit

 

[oslo_messaging_rabbit]

...

rabbit_host=192.168.5.107

rabbit_userid = openstack

rabbit_password = openstack



5、配置部分---nova自身功能模塊的配置

在[DEFAULT]部分,只啓用計算和元數據API: 

[DEFAULT]

...

enabled_apis = osapi_compute,metadata

 

文檔裏說設置my_ip ,這裏咱們不設置它,由於有後面配置調用my_ip這個變量。咱們能夠直接配置,雖然麻煩點,可是知道哪些地方使用了這個IP

因此下面my_ip就不配置了

在 [DEFAULT] 部分,使能 Networking 服務:
默認狀況下,計算服務使用內置的防火牆服務。因爲網絡服務包含了防火牆服務
你必須使用nova.virt.firewall.NoopFirewallDriver防火牆服務來禁用掉計算服務內置的防火牆服務

[DEFAULT]

...

use_neutron = True

firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver

在[vnc]部分,配置VNC代理使用控制節點的管理接口IP地址 (這裏原來是$my_ip):

[vnc]

...

vncserver_listen =192.168.5.107

vncserver_proxyclient_address =192.168.5.107

在 [glance] 區域,配置鏡像服務 API 的位置:

[glance]

...

api_servers = http:// 192.168.5.107:9292



在 [oslo_concurrency] 部分,配置鎖路徑:

[oslo_concurrency]

...

lock_path =/var/lib/nova/tmp

6、檢查配置

配置完畢,過濾一下
[root@linux-node1 ~]# grep -n '^[a-Z]' /etc/nova/nova.conf
267:enabled_apis=osapi_compute,metadata
382:auth_strategy = keystone
1561:firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
1684:use_neutron=True
2119:rpc_backend=rabbit
2161:connection = mysql+pymysql://nova:nova@192.168.5.107/nova_api
3106:connection = mysql+pymysql://nova:nova@192.168.5.107/nova
3323:api_servers = http://192.168.5.107:9292
3523:auth_uri = http://192.168.5.107:5000
3524:auth_url = http://192.168.5.107:35357
3525:memcached_servers = 192.168.5.107:11211
3526:auth_type = password
3527:project_domain_name = default
3528:user_domain_name = default
3529:project_name = service
3530:username = nova
3531:password = nova
4292:lock_path = /var/lib/nova/tmp
4403:rabbit_host=192.168.5.107
4404:rabbit_userid = openstack
4405:rabbit_password = openstack
4465:rabbit_port=5672
5359:vncserver_listen = 192.168.5.107
5360:vncserver_proxyclient_address = 192.168.5.107

 
# 把下面這段追加
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
">>/etc/httpd/conf.d/00-nova-placement-api.conf
systemctl restart httpd
sleep 2
 
#同步數據庫
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
 
#檢測數據
nova-manage cell_v2 list_cells
mysql -h controller -u nova -pnova -e "use nova_api;show tables;"
mysql -h controller -u nova -pnova -e "use nova;show tables;" 
mysql -h controller -u nova -pnova -e "use nova_cell0;show tables;"
 
#開機自啓動
 systemctl enable openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service
#啓動服務
systemctl start openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service
 
#查看節點
#nova service-list 
openstack catalog list
nova-status upgrade check
openstack compute service list
 
#nova-manage cell_v2 delete_cell --cell_uuid  b736f4f4-2a67-4e60-952a-14b5a68b0f79
 

計算節點安裝和配置nova

nova compute經過libvirt管理kvm,計算節點是真正運行虛擬機的

vmware支持嵌套虛擬機,其它虛擬機軟件不支持

 計算節點機器必須打開vt-x

1、安裝軟件包

[root@linux-node2 ~]# yum install openstack-nova-compute -y
關於novncproxy

novncproxy的端口是6080 ,登陸控制節點查看下

[root@linux-node1 ~]# netstat -lntp|grep 6080
tcp        0      0 0.0.0.0:6080            0.0.0.0:*               LISTEN      931/python2 

2、計算節點配置文件修改

計算節點的配置文件更改的地方和控制節點幾乎一致,所以能夠把控制節點的配置文件拷貝過來使用,還須要修改個別地方
一、計算節點沒有配置鏈接數據庫。(其實拷貝過來不刪數據庫的配置也能運行正常,可是不規範)
二、計算節點vnc多配置一行

拷貝以前先查看控制節點的權限

[root@linux-node1 ~]# ls /etc/nova/ -l
total 224
-rw-r----- 1 root nova   3673 Mar 22 18:14 api-paste.ini
-rw-r----- 1 root nova 184584 Jul 30 20:13 nova.conf
-rw-r----- 1 root nova  27914 Mar 22 18:14 policy.json
-rw-r--r-- 1 root root     72 May 24 06:43 release
-rw-r----- 1 root nova    966 Mar 22 18:13 rootwrap.conf

拷貝文件過去

[root@linux-node1 ~]# scp -r /etc/nova/nova.conf 192.168.1.3:/etc/nova/

查看推送過來的文件權限是否正確
[root@linux-node2 ~]# ll /etc/nova/
total 224
-rw-r----- 1 root nova   3673 Jul 31 08:36 api-paste.ini
-rw-r----- 1 root nova 184584 Jul 31 08:36 nova.conf
-rw-r----- 1 root nova  27914 Jul 31 08:36 policy.json
-rw-r--r-- 1 root root     72 Jul 31 08:36 release
-rw-r----- 1 root nova    966 Jul 31 08:36 rootwrap.conf

更改配置,把mysql的部分刪除了。而後註釋掉這行

涉及到[api_database]和[database]模塊兩個地方
[api_database]
#connection = mysql+pymysql://nova:nova@192.168.1.2/nova_api

[database]
#connection = mysql+pymysql://nova:nova@192.168.1.2/nova

在[vnc]部分,啓用並配置遠程控制檯訪問:

[vnc]
enabled=true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 192.168.1.3
novncproxy_base_url = http://192.168.1.2:6080/vnc_auto.html

修改[libvirt]模塊

修改以前,肯定您的計算節點是否支持虛擬機的硬件加速。
[root@linux-node2 ~]# egrep -c '(vmx|svm)' /proc/cpuinfo
1

若是這個命令返回了 one or greater 的值,那麼你的計算節點支持硬件加速且不須要額外的配置。
若是這個命令返回了 zero 值,那麼你的計算節點不支持硬件加速。你必須配置 libvirt 來使用 QEMU 去代替 KVM在
/etc/nova/nova.conf 文件的 [libvirt] 區域作出以下的編輯:
[libvirt]
virt_type=kvm

查看下全部修改的地方
[root@linux-node2 ~]# grep -n '^[a-Z]'  /etc/nova/nova.conf 
267:enabled_apis=osapi_compute,metadata
382:auth_strategy=keystone
1561:firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
1684:use_neutron=true
2119:rpc_backend=rabbit
3323:api_servers=http://192.168.1.2:9292
3523:auth_uri=http://192.168.1.2:5000
3524:auth_url=http://192.168.1.2:35357
3525:memcached_servers=192.168.1.2:11211
3526:auth_type = password
3527:project_domain_name = default
3528:user_domain_name = default
3529:project_name = service
3530:username = nova
3531:password = nova
3682:virt_type=kvm
4292:lock_path=/var/lib/nova/tmp
4403:rabbit_host=192.168.1.2
4404:rabbit_userid = openstack
4405:rabbit_password = openstack
4465:rabbit_port=5672
5359:enabled=true
5360:vncserver_listen=0.0.0.0
5361:vncserver_proxyclient_address=192.168.1.3
5362:novncproxy_base_url = http://192.168.1.2:6080/vnc_auto.html



3、啓動服務和檢查狀態

[root@linux-node2 ~]# systemctl enable libvirtd.service openstack-nova-compute.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service to /usr/lib/systemd/system/openstack-nova-compute.service.
[root@linux-node2 ~]# systemctl start libvirtd.service openstack-nova-compute.service

控制節點查看,有了計算節點。說明計算節點的服務正常啓動了,並且配置沒問題

[root@linux-node1 ~]# openstack host list



控制節點列出nova的服務,後面的update時間都幾乎一致,若是差距過大,可能形成沒法建立虛擬機

[root@linux-node1 ~]# nova service-list

下面命令測試nova鏈接glance是否正常

[root@linux-node1 ~]# nova image-list

 
# #發現計算節點,新增計算節點時執行
#su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
 
##6.1 Neutron控制節點-- openstack pike

網絡介紹

配置網絡選項,分公共網絡和私有網絡
部署網絡服務使用公共網絡和私有網絡兩種架構中的一種來部署網絡服務。
公共網絡:採用儘量簡單的架構進行部署,只支持實例鏈接到公有網絡(外部網絡)。沒有私有網絡(我的網絡),路由器以及浮動IP地址。
只有admin或者其餘特權用戶才能夠管理公有網絡。
私有網絡:在公共網絡的基礎上多了layer3服務,支持實例鏈接到私有網絡

 本次實驗使用公共網絡

控制節點安裝配置Neutron

1控制節點安裝組件

[root@linux-node1 ~]# yum install openstack-neutron openstack-neutron-ml2   openstack-neutron-linuxbridge ebtables

2、控制節點配置部分---數據庫

編輯/etc/neutron/neutron.conf 文件並完成以下操做:
在 [database] 部分,配置數據庫訪問:

[database]
...
connection = mysql+pymysql://neutron:neutron@192.168.5.107/neutron

neutron改完數據庫鏈接配置以後,並不須要當即同步數據庫,還須要繼續配置

3、控制節點配置部分---keystone

在[DEFAULT]和[keystone_authtoken]部分,配置認證服務訪問:

[DEFAULT]
...
auth_strategy = keystone

[keystone_authtoken]模塊配置
加入下面參數

 

auth_uri = http:// 192.168.5.107:5000
auth_url = http://192.168.1.2:35357
memcached_servers = 192.168.5.107
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

4、控制節點配置部分---RabbitMQ


在 [DEFAULT] 和 [oslo_messaging_rabbit]部分,配置 RabbitMQ 消息隊列的鏈接:
[DEFAULT]
...
rpc_backend = rabbit

[oslo_messaging_rabbit]模塊下面配置

[oslo_messaging_rabbit]

...

rabbit_host = 192.168.5.107

rabbit_userid = openstack

rabbit_password = openstack



5、控制節點配置部分---Neutron核心配置

在[DEFAULT]部分,啓用ML2插件並禁用其餘插件,等號後面不寫,就表示禁用其它插件的意思

[DEFAULT]
...
core_plugin = ml2
service_plugins =

6控制節點配置部分---結合nova的配置

在[DEFAULT]和[nova]部分,配置網絡服務來通知計算節點的網絡拓撲變化
打開這兩行的註釋
意思是端口狀態發生改變,通知nova

[DEFAULT]
...
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True

[nova]模塊下面配置(Neutron配置文件有nova模塊)

auth_url = http:// 192.168.5.107:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova



7、控制節點配置部分---結合鎖路徑配置

 在 [oslo_concurrency] 部分,配置鎖路徑:

[oslo_concurrency]
...
lock_path = /var/lib/neutron/tmp


8、控制節點檢查主配置文件

控制節點neutron主配置文件的配置完畢

[root@linux-node1 ~]# grep -n '^[a-Z]' /etc/neutron/neutron.conf
2:auth_strategy = keystone
3:core_plugin = ml2
4:service_plugins =
5:notify_nova_on_port_status_changes = true
6:notify_nova_on_port_data_changes = true
515:rpc_backend = rabbit
658:connection = mysql+pymysql://neutron:neutron@192.168.5.107/neutron
767:auth_uri = http:// 192.168.5.107:5000
768:auth_url = http:// 192.168.5.107:35357
769:memcached_servers = 192.168.1.2:11211
770:auth_type = password
771:project_domain_name = default
772:user_domain_name = default
773:project_name = service
774:username = neutron
775:password = neutron
944:auth_url = http:// 192.168.5.107:35357
945:auth_type = password
946:project_domain_name = default
947:user_domain_name = default
948:region_name = RegionOne
949:project_name = service
950:username = nova
951:password = nova
1050:lock_path = /var/lib/neutron/tmp
1069:rabbit_host = 192.168.5.107

1070:rabbit_userid = openstack
1071:rabbit_password = openstack
1224:rabbit_port = 5672

9、控制節點配置 Modular Layer 2 (ML2) 插件

ML2是2層網絡的配置,ML2插件使用Linuxbridge機制來爲實例建立layer-2虛擬網絡基礎設施
編輯/etc/neutron/plugins/ml2/ml2_conf.ini文件並完成如下操做:
在[ml2]部分,啓用flat和VLAN網絡:
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,gre,vxlan,geneve
tenant_network_types =


在[ml2]部分,啓用Linuxbridge機制:
這個的做用是你告訴neutron使用哪幾個插件建立網絡,此時是linuxbridge

[ml2]
...
mechanism_drivers = linuxbridge


它是個列表,你能夠寫多個,好比再添加個openvswitch

mechanism_drivers = linuxbridge,openvswitch


在[ml2]部分,啓用端口安全擴展驅動:

[ml2]
...
extension_drivers = port_security


在[ml2_type_flat]部分,配置公共虛擬網絡爲flat網絡,官方文檔寫的改成provider,咱們改成flat_networks = public

[ml2_type_flat]

...

flat_networks = public

在[securitygroup]部分,啓用 ipset 增長安全組規則的高效性:

[securitygroup]

...

enable_ipset = True

 

10、控制節點檢查ML2配置文件

至此控制節點,ML2的配置更改完畢,以下

[root@linux-node1 ~]# grep -n '^[a-Z]' /etc/neutron/plugins/ml2/ml2_conf.ini
107:type_drivers = flat,vlan,gre,vxlan,geneve
112:tenant_network_types = 
116:mechanism_drivers = linuxbridge,openvswitch
121:extension_drivers = port_security
153:flat_networks = public
215:enable_ipset = true

11、控制節點配置Linuxbridge代理

Linuxbridge代理爲實例創建layer-2虛擬網絡而且處理安全組規則。
編輯/etc/neutron/plugins/ml2/linuxbridge_agent.ini文件而且完成如下操做:
在[linux_bridge]部分,將公共虛擬網絡和公共物理網絡接口對應起來:
將PUBLIC_INTERFACE_NAME替換爲底層的物理公共網絡接口

[linux_bridge]

physical_interface_mappings = public:ens33

在[vxlan]部分,禁止VXLAN覆蓋網絡: 

[vxlan]

enable_vxlan = False

在 [securitygroup]部分,啓用安全組並配置 Linuxbridge iptables firewall driver:

[securitygroup]

...

enable_security_group = True

firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver


查看更改了哪些配置
[root@linux-node1 ~]# grep -n '^[a-Z]' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
128:physical_interface_mappings = public:ens33
156:enable_security_group = true
157:firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
165:enable_vxlan = false

12、控制節點配置DHCP代理

編輯/etc/neutron/dhcp_agent.ini文件並完成下面的操做:
在[DEFAULT]部分,配置Linuxbridge驅動接口,DHCP驅動並啓用隔離元數據,這樣在公共網絡上的實例就能夠經過網絡來訪問元數據

[DEFAULT]

...

interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver

dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq

enable_isolated_metadata = True

 

查看更改了哪些配置

第一行是底層接口的配置

第二行dnsmasq是一個小的dhcp開源項目

第三行是刷新路由用的

[root@linux-node1 ~]# grep -n '^[a-Z]' /etc/neutron/dhcp_agent.ini
2:interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
3:dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
4:enable_isolated_metadata = true


13、控制節點配置元數據代理 

編輯/etc/neutron/metadata_agent.ini文件並完成如下操做:
在[DEFAULT] 部分,配置元數據主機以及共享密碼:

[DEFAULT]

...

nova_metadata_ip = controller

metadata_proxy_shared_secret = METADATA_SECRET

用你爲元數據代理設置的密碼替換 METADATA_SECRET。下面的zyx是自定義的共享密鑰
這個共享密鑰,在nova裏還要配置一遍,你要保持一致的
[root@linux-node1 ~]# grep -n '^[a-Z]'  /etc/neutron/metadata_agent.ini
2:nova_metadata_ip = 192.168.5.107

3:metadata_proxy_shared_secret = shi

14、在控制節點的nova上面配置neutron

下面配置的是neutron的keystone的認證地址。9696是neutron-server的端口
編輯/etc/nova/nova.conf文件並完成如下操做:
在[neutron]部分,配置訪問參數,啓用元數據代理並設置密碼:

url = http:// 192.168.5.107:9696

auth_url = http://192.168.1.2:35357

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = neutron

而後打開下面並配置以下

service_metadata_proxy = True

metadata_proxy_shared_secret = shi

15、控制節點配置超連接

網絡服務初始化腳本須要一個超連接 /etc/neutron/plugin.ini指向ML2插件配置文件/etc/neutron/plugins/ml2/ml2_conf.ini 若是超連接不存在,使用下面的命令建立它:

[root@linux-node1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

16、控制節點同步數據庫

[root@linux-node1 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

  OK


17、控制節點重啓nova服務以及啓動neutron服務

重啓計算nova-api 服務,在控制節點上操做:

[root@linux-node1 ~]# systemctl restart openstack-nova-api.service

啓動如下neutron相關服務,並設置開機啓動

[root@linux-node1 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

官方文檔提到下面,咱們用不到,不用操做,這裏以刪除線標識

對於網絡選項2,一樣啓用layer3服務並設置其隨系統自啓動

# systemctl enable neutron-l3-agent.service

# systemctl start neutron-l3-agent.service

查看監聽,多了9696端口

[root@linux-node1 ~]# netstat -nltp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:3306            0.0.0.0:*               LISTEN      1874/mysqld         
tcp        0      0 0.0.0.0:11211           0.0.0.0:*               LISTEN      910/memcached       
tcp        0      0 0.0.0.0:9292            0.0.0.0:*               LISTEN      4019/python2        
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      912/httpd           
tcp        0      0 0.0.0.0:4369            0.0.0.0:*               LISTEN      1/systemd           
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      997/sshd            
tcp        0      0 0.0.0.0:15672           0.0.0.0:*               LISTEN      898/beam            
tcp        0      0 0.0.0.0:35357           0.0.0.0:*               LISTEN      912/httpd           
tcp        0      0 0.0.0.0:9696            0.0.0.0:*               LISTEN      6659/python2        
tcp        0      0 0.0.0.0:6080            0.0.0.0:*               LISTEN      4569/python2        
tcp        0      0 0.0.0.0:8774            0.0.0.0:*               LISTEN      6592/python2        
tcp        0      0 0.0.0.0:8775            0.0.0.0:*               LISTEN      6592/python2        
tcp        0      0 0.0.0.0:9191            0.0.0.0:*               LISTEN      4020/python2        
tcp        0      0 0.0.0.0:25672           0.0.0.0:*               LISTEN      898/beam            
tcp        0      0 0.0.0.0:5000            0.0.0.0:*               LISTEN      912/httpd           
tcp6       0      0 :::22                   :::*                    LISTEN      997/sshd            
tcp6       0      0 :::5672                 :::*                    LISTEN      898/beam 


18、控制節點建立服務實體和註冊端點

在keystone上建立服務和註冊端點

建立neutron服務實體:

[root@linux-node1 ~]# source admin-openstack.sh 
[root@linux-node1 ~]# openstack service create --name neutron  --description "OpenStack Networking" network


建立網絡服務API端點

 

建立public端點

[root@linux-node1 ~]# openstack endpoint create --region RegionOne network public http://192.168.5.107:9696



建立internal端點

[root@linux-node1 ~]# openstack endpoint create --region RegionOne  network internal http://192.168.5.107:9696


建立admin端點

[root@linux-node1 ~]# openstack endpoint create --region RegionOne  network admin http://192.168.5.107:9696



檢查,看到下面3行,說明沒問題,右邊alive是笑臉狀態。表示正常

[root@linux-node1 ~]# neutron agent-list

計算節點安裝和配置neutron

早期版本nova-compute能夠直接鏈接數據庫,那麼存在一個問題,任何一個計算節點被入侵了。那麼數據庫整個就危險了。後來就出現了個nova-condutor,它做爲中間訪問的

1、安裝組件
[root@linux-node2 ~]# yum install openstack-neutron-linuxbridge ebtables ipset -y


計算節點要改2個文件配置通用組件和配置網絡選項

配置通用組件
Networking 通用組件的配置包括認證機制、消息隊列和插件。
/etc/neutron/neutron.conf

配置網絡選項
配置Linuxbridge代理
/etc/neutron/plugins/ml2/linuxbridge_agent.ini

文檔鏈接能夠參照
https://docs.openstack.org/mitaka/zh_CN/install-guide-rdo/neutron-compute-install-option1.html

由於計算節點和控制節點neutron配置類似,能夠在控制節點配置文件基礎上完善下

[root@linux-node1 ~]# scp -p /etc/neutron/neutron.conf 192.168.1.3:/etc/neutron/


2、計算節點更改配置

刪除mysql的配置,並註釋這行

[database]
#connection =

把下面nova的下面配置刪除
[nova]
auth_url = http://192.168.1.2:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova

註釋下面4行
#notify_nova_on_port_status_changes = true
#notify_nova_on_port_data_changes = true
#core_plugin = ml2
#service_plugins =


3、查看更改後的配置

[root@linux-node2 neutron]# grep -n '^[a-Z]' /etc/neutron/neutron.conf
2:auth_strategy = keystone
515:rpc_backend = rabbit
767:auth_uri = http://192.168.1.2:5000
768:auth_url = http://192.168.1.2:35357
769:memcached_servers = 192.168.1.2:11211
770:auth_type = password
771:project_domain_name = default
772:user_domain_name = default
773:project_name = service
774:username = neutron
775:password = neutron
1042:lock_path = /var/lib/neutron/tmp
1061:rabbit_host = 192.168.1.2
1062:rabbit_userid = openstack
1063:rabbit_password = openstack
1216:rabbit_port = 5672


4、計算節點更改nova主配置文件

編輯/etc/nova/nova.conf文件並完成下面的操做:
在[neutron] 部分,配置訪問參數:

[neutron]

...

url = http://192.168.1.2:9696

auth_url = http://192.168.1.2:35357

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = neutron

 

5、計算節點配置Linuxbridge代理

(1)Linuxbridge代理爲實例創建layer-2虛擬網絡而且處理安全組規則。
編輯/etc/neutron/plugins/ml2/linuxbridge_agent.ini文件而且完成如下操做:
在[linux_bridge]部分,將公共虛擬網絡和公共物理網絡接口對應起來:

[linux_bridge]

physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
將PUBLIC_INTERFACE_NAME 替換爲底層的物理公共網絡接口

(2)在[vxlan]部分,禁止VXLAN覆蓋網絡:

[vxlan]

enable_vxlan = False

(3)在 [securitygroup]部分,啓用安全組並配置 Linuxbridge iptables firewall driver: 

[securitygroup]

...

enable_security_group = True

firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

因爲上面3處的配置和控制節點的如出一轍,直接拷貝控制節點文件到此替換便可
[root@linux-node1 ~]# scp -r /etc/neutron/plugins/ml2/linuxbridge_agent.ini 192.168.1.3/etc/neutron/plugins/ml2/


6、在計算節點檢查linuxbridge_agent配置文件

[root@linux-node2 neutron]# grep -n '^[a-Z]' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
128:physical_interface_mappings = public:ens33
156:enable_security_group = true
157:firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
165:enable_vxlan = false

7、重啓nova服務並啓動neutron服務

由於改動了nova主配置文件,須要重啓nova服務

同時啓動neutron服務,並設置開機啓動

[root@linux-node2 neutron]# systemctl restart openstack-nova-compute.service
[root@linux-node2 neutron]# systemctl enable neutron-linuxbridge-agent.service
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
[root@linux-node2 neutron]# systemctl start neutron-linuxbridge-agent.service


8、控制節點檢查

看到多了個計算節點Linux bridge agent
[root@linux-node1 ~]# source admin-openstack.sh
[root@linux-node1 ~]# neutron agent-list
+--------------------------------+--------------------+---------------------+-------------------+-------+----------------+---------------------------+
| id                             | agent_type         | host                | availability_zone | alive | admin_state_up | binary                    |
+--------------------------------+--------------------+---------------------+-------------------+-------+----------------+---------------------------+
| 0ebb213b-4933-4a34-be61-2aeeb4 | DHCP agent         | linux-node1.shi.com | nova              | :-)   | True           | neutron-dhcp-agent        |
| 6574a6                         |                    |                     |                   |       |                |                           |
| 4677fa97-6569-4ab1-a3db-       | Linux bridge agent | linux-node1.shi.com |                   | :-)   | True           | neutron-linuxbridge-agent |
| 71d5736b40fb                   |                    |                     |                   |       |                |                           |
| 509da84b-                      | Linux bridge agent | linux-node2.shi.com |                   | :-)   | True           | neutron-linuxbridge-agent |
| 8bd3-4be0-9688-94d45225c3c0    |                    |                     |                   |       |                |                           |
| 5ec0f2c1-3dd3-40ba-            | Metadata agent     | linux-node1.shi.com |                   | :-)   | True           | neutron-metadata-agent    |
| a42e-e53313864087              |                    |                     |                   |       |                |                           |
+--------------------------------+--------------------+---------------------+-------------------+-------+----------------+---------------------------+



下面映射,能夠理解爲給網卡起個別名,便於區分用途
同時你的物理網卡名字必須是eth0,對應上配置文件。或者說配置文件對用上實際的網卡名
[root@linux-node2 ~]# grep physical_interface_mappings /etc/neutron/plugins/ml2/linuxbridge_agent.ini
physical_interface_mappings = public:ens33

 
 
 

7.openstack搭建--7--建立一臺虛擬機

建立一個單一扁平網絡和子網

 

 

1、建立單一扁平網絡

在控制節點上,加載 admin 憑證來獲取管理員能執行的命令訪問權限:
source admin-openstack.sh ,提供者網絡必須使用admin建立,若是source demo-openstack.sh ,不會建立成功

 執行命令語法以下

下面命令把provider改爲public,表示物理網卡是public這個,這個是映射的那個public,它對應eth0

neutron net-create --shared --provider:physical_network provider--provider:network_type flat provider

執行過程以下:

[root@linux-node1 ~]# source admin-openstack.sh 
[root@linux-node1 ~]# neutron net-create --shared --provider:physical_network public --provider:network_type flat public-net
Created a new network:    |

上面的tenant_id 和下面的project的id一致。由於咱們是admin建立的,屬於admin的

[root@linux-node1 ~]# openstack project list
+----------------------------------+---------+
查看建立的網絡

[root@linux-node1 ~]#  neutron net-list

2、建立一個子網
執行過以下:

[root@linux-node1 ~]# neutron subnet-create --name public-subnet --allocation-pool start=192.168.1.100,end=192.168.1.200 --dns-nameserver 8.8.8.8 --gateway 192.168.1.1 public-net 192.168.1.0/24
Created a new subnet:


再次執行下面命令,能夠看到subnets這裏列有值了

[root@linux-node1 ~]# neutron net-list

建立一個nano規格的實例

1、建立nano套餐類型

網絡建立完畢後,建立一個nano規格的主機

默認的最小規格的主機須要512 MB內存。對於環境中計算節點內存不足4 GB的,咱們推薦建立只須要64 MB的m1.nano規格的主機。
若單純爲了測試的目的,請使用m1.nano規格的主機來加載CirrOS鏡像

 硬盤是1GB,內存64MB,cpu是1個

[root@linux-node1 ~]# openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano

查看主機類型列表

1-5是默認的,0是我建立的

[root@linux-node1 ~]# openstack flavor list

大部分雲鏡像支持公共密鑰認證而不是傳統的密碼認證。在啓動實例前,你必須添加一個公共密鑰到計算服務。

建立一個密鑰,並把這個密鑰加到openstack上

[root@linux-node1 ~]# source demo-openstack.sh 
[root@linux-node1 ~]# ssh-keygen -q -N ""
Enter file in which to save the key (/root/.ssh/id_rsa): 
[root@linux-node1 ~]# openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
+
驗證公鑰的添加

[root@linux-node1 ~]# openstack keypair list
+-------+-------------------------------------------------+
2、增長安全組規則

默認狀況下,它有一個default安全組,這個安全組阻止了全部訪問,這裏添加icmp和ssh 

[root@linux-node1 ~]# openstack security group rule create --proto icmp default
 +-----------------------+--------------------------------------+
| Field                 | Value                                |
[root@linux-node1 ~]# openstack security group rule create --proto tcp --dst-port 22 default





3、列出可用鏡像、網絡、安全組等

建立以前先列出可用類型和列出可用鏡像:

[root@linux-node1 ~]# source demo-openstack.sh 
[root@linux-node1 ~]# openstack flavor list
[root@linux-node1 ~]# openstack image list
+--------------------------------------+--------+--------+
列出可用網絡

[root@linux-node1 ~]#  openstack network list

列出可用的安全組

[root@linux-node1 ~]# openstack security group list
4、建立實例

建立實例的語法以下

openstack server create --flavor m1.tiny --image cirros \

  --nic net-id=PROVIDER_NET_ID --security-group default \

  --key-name mykey provider-instance

若是你選擇選項1而且你的環境只有一個網絡,你能夠省去–nic 選項由於OpenStack會自動選擇這個惟一可用的網絡。
net-id就是openstack network list 顯示的id, 不是subnet的id

執行過程以下:

[root@linux-node1 ~]# openstack server create --flavor m1.nano --image cirros --nic net-id=dc3a90b3-b3ca-4c3d-8d7a-24587907659e --security-group default --key-name mykey provider-instance             |

5、檢查實例的狀態和登陸實例

[root@linux-node1 ~]# openstack server list計算節點能夠看到kvm起來了

[root@linux-node2 ~]#  virsh list
 Id    Name                           State
----------------------------------------------------
 1     instance-00000001              running

登陸機器成功。由於密鑰傳進去了。不用密碼

建立過程當中能夠查看計算節點的日誌,由於是計算節點建立的虛擬

[root@linux-node2 ~]# tail -f /var/log/nova/nova-compute.log 
2017-08-06 14:13:19.589 3221 INFO nova.compute.resource_tracker [req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Final resource view: name=linux-node2.shi.com phys_ram=1023MB used_ram=576MB phys_disk=46GB used_disk=1GB total_vcpus=1 used_vcpus=1 pci_stats=[]
2017-08-06 14:13:19.841 3221 INFO nova.compute.resource_tracker [req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Compute_service record updated for linux-node2.shi.com:linux-node2.shi.com
2017-08-06 14:14:14.270 3221 INFO nova.compute.resource_tracker [req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Auditing locally available compute resources for node linux-node2.shi.com
2017-08-06 14:14:17.336 3221 INFO nova.compute.resource_tracker [req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Total usable vcpus: 1, total allocated vcpus: 1
2017-08-06 14:14:17.338 3221 INFO nova.compute.resource_tracker [req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Final resource view: name=linux-node2.shi.com phys_ram=1023MB used_ram=576MB phys_disk=46GB used_disk=1GB total_vcpus=1 used_vcpus=1 pci_stats=[]
2017-08-06 14:14:17.539 3221 INFO nova.compute.resource_tracker [req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Compute_service record updated for linux-node2.shi.com:linux-node2.shi.com
2017-08-06 14:15:18.450 3221 INFO nova.compute.resource_tracker [req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Auditing locally available compute resources for node linux-node2.shi.com
2017-08-06 14:15:19.397 3221 INFO nova.compute.resource_tracker [req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Total usable vcpus: 1, total allocated vcpus: 1
2017-08-06 14:15:19.397 3221 INFO nova.compute.resource_tracker [req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Final resource view: name=linux-node2.shi.com phys_ram=1023MB used_ram=576MB phys_disk=46GB used_disk=1GB total_vcpus=1 used_vcpus=1 pci_stats=[]
2017-08-06 14:15:19.445 3221 INFO nova.compute.resource_tracker [req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Compute_service record updated for linux-node2.shi.com:linux-node2.shi.com

虛擬機建立失敗,須要看全部服務的全部日誌,根據時間查看可疑的緣由

[root@linux-node1 ~]# grep 'ERROR' /var/log/glance/*
[root@linux-node1 ~]#grep 'ERROR' /var/log/keystone/*
[root@linux-node1 ~]# grep 'ERROR' /var/log/nova/*
[root@linux-node1 ~]#grep 'ERROR' /var/log/neutron/*
 

要使用demo用戶才能查看建立的主機,由於原本就是demo用戶建立的

[root@linux-node1 ~]# openstack server list

[root@linux-node1 ~]# source demo-openstack.sh

[root@linux-node1 ~]# openstack server list
+--------------------------------------+-------------------+--------+--------------------------+
| ID                                   | Name              | Status | Networks                 |
+--------------------------------------+-------------------+--------+--------------------------+
| 3365c4b4-d487-4778-ad28-e2c675f085eb | provider-instance | ACTIVE | public-net=192.168.1.101 |
+--------------------------------------+-------------------+--------+--------------------------+

獲取它控制檯的地址
[root@linux-node1 ~]# openstack console url show provider-instance
+-------+----------------------------------------------------------------------------------+
| Field | Value                                                                            |
+-------+----------------------------------------------------------------------------------+
| type  | novnc                                                                            |
| url   | http://192.168.1.2:6080/vnc_auto.html?token=7f9daf00-54b3-4b9f-99eb-a3c30981de38 |
+-------+----------------------------------------------------------------------------------+

把上面這一串複製到瀏覽器。能夠以網頁方式打開一個vnc窗口,上面連接的token是有時間有效期的,會變的

上面的6080端口映射到了192.168.1.3的5900端口

瀏覽器頁面能夠登陸



查看計算節點端口啓動狀況,有個5900端口,就是vnc的

[root@linux-node2 ~]#  netstat -lntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:5900            0.0.0.0:*               LISTEN      3456/qemu-kvm       
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/systemd           
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      1041/sshd           
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      1728/master         
tcp6       0      0 :::111                  :::*                    LISTEN      1/systemd           
tcp6       0      0 :::22                   :::*                    LISTEN      1041/sshd           
tcp6       0      0 ::1:25                  :::*                    LISTEN      1728/master  

查看下鏈接狀況

計算節點的5900端口和控制節點的6080端口

[root@linux-node2 ~]# lsof -i:5900 COMMAND   PID USER   FD   TYPE DEVICE SIZE/OFF NODE NAME qemu-kvm 3456 qemu   21u  IPv4  54787      0t0  TCP *:rfb (LISTEN) qemu-kvm 3456 qemu   24u  IPv4  56301      0t0  TCP linux-node2:rfb->linux-node1:53158 (ESTABLISHED) [root@linux-node1 ~]# lsof -i:6080 COMMAND    PID USER   FD   TYPE DEVICE SIZE/OFF NODE NAME nova-novn 3509 nova    4u  IPv4  26745      0t0  TCP *:6080 (LISTEN) nova-novn 9627 nova    4u  IPv4  26745      0t0  TCP *:6080 (LISTEN) nova-novn 9627 nova    5u  IPv4  74962      0t0  TCP linux-node1:6080->promote.cache-dns.local:52711 (ESTABLISHED) nova-novn 9629 nova    4u  IPv4  26745      0t0  TCP *:6080 (LISTEN) nova-novn 9629 nova    5u  IPv4  74964      0t0  TCP linux-node1:6080->promote.cache-dns.local:52715 (ESTABLISHED) nova-novn 9630 nova    4u  IPv4  26745      0t0  TCP *:6080 (LISTEN) nova-novn 9630 nova    5u  IPv4  74965      0t0  TCP linux-node1:6080->promote.cache-dns.local:52716 (ESTABLISHED) nova-novn 9631 nova    4u  IPv4  26745      0t0  TCP *:6080 (LISTEN) nova-novn 9631 nova    5u  IPv4  74966      0t0  TCP linux-node1:6080->promote.cache-dns.local:52717 (ESTABLISHED) nova-novn 9633 nova    4u  IPv4  26745      0t0  TCP *:6080 (LISTEN) nova-novn 9633 nova    5u  IPv4  74969      0t0  TCP linux-node1:6080->promote.cache-dns.local:52721 (ESTABLISHED)

相關文章
相關標籤/搜索