1.安裝前更新系統 html
安裝好ubuntu 12.04 Server 64bits後,進入root模式下完成配置:node
sudo su -
#apt-get install python-software-properties
#add-apt-repository cloud-archive:havana
升級系統:python
apt-get update
apt-get upgrade
apt-get dist-upgrade
2.安裝更新ntp服務mysql
apt-get install ntp
sed -i 's/server 0.ubuntu.pool.ntp.org/#server 0.ubuntu.pool.ntp.org/g' /etc/ntp.conf sed -i 's/server 1.ubuntu.pool.ntp.org/#server 1.ubuntu.pool.ntp.org/g' /etc/ntp.conf sed -i 's/server 2.ubuntu.pool.ntp.org/#server 2.ubuntu.pool.ntp.org/g' /etc/ntp.conf sed -i 's/server 3.ubuntu.pool.ntp.org/#server 3.ubuntu.pool.ntp.org/g' /etc/ntp.conf #Set the network node to follow up your conroller node sed -i 's/server ntp.ubuntu.com/server 10.10.10.2/g' /etc/ntp.conf service ntp restart
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).linux
# The loopback network interface
auto lo
iface lo inet loopbacksql
# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.122.3
netmask 255.255.255.0
gateway 192.168.122.1
dns-nameservers 192.168.122.1ubuntu
auto eth0:1
iface eth0:1 inet static
address 10.10.10.3
netmask 255.255.255.0api
auto eth0:2
iface eth0:2 inet static
address 10.20.20.3
netmask 255.255.255.0網絡
net.ipv4.ip_forward=1 net.ipv4.conf.all.rp_filter=0 net.ipv4.conf.default.rp_filter=0
#運行下面命令,使生效
sysctl -p
3.安裝OpenVSwitch閉包
apt-get install openvswitch-controller openvswitch-switch openvswitch-datapath-dkms openvswitch-datapath-source
module-assistant auto-install openvswitch-datapath
/etc/init.d/openvswitch-switch restart
#br-int will be used for VM integration
ovs-vsctl add-br br-int
#br-ex is used to make to VM accessable from the internet
ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex eth0
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
auto br-ex
iface br-ex inet static
address 192.168.122.3
netmask 255.255.255.0
gateway 192.168.122.1
dns-nameservers 192.168.122.1
#For Exposing OpenStack API over the internet
auto eth0
iface eth0 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ip link set $IFACE promisc off
down ifconfig $IFACE down
auto eth0:1
iface eth0:1 inet static
address 10.10.10.3
netmask 255.255.255.0
auto eth0:2
iface eth0:2 inet static
address 10.20.20.3
netmask 255.255.255.0
/etc/init.d/networking restart
eth0讓網橋br-ex接管以後,訪問外網就都br-ex處理了。不要忘了,咱們只有一塊網卡,接在同一個「交換機上」,因此你要注意一下eth0:1,eth0:2的route設置。
一切正常的話,輸入route命令的輸出應該以下:
Destination Gateway Genmask Flags Metric Ref Use Iface
default 192.168.122.1 0.0.0.0 UG 100 0 0 br-ex
10.10.10.0 * 255.255.255.0 U 0 0 0 br-ex
10.20.20.0 * 255.255.255.0 U 0 0 0 br-ex
192.168.122.0 * 255.255.255.0 U 0 0 0 br-ex
或者對應的ip route show 的輸出爲:
root@Network:~# ip route show
default via 192.168.122.1 dev br-ex metric 100
10.10.10.0/24 dev br-ex proto kernel scope link src 10.10.10.3
10.20.20.0/24 dev br-ex proto kernel scope link src 10.20.20.3
192.168.122.0/24 dev br-ex proto kernel scope link src 192.168.122.3
沒錯,10.10.10.0/24 與10.20.20.0/24指定的路由設備都是br-ex,不然你ping 控制節點(10.10.10.2),是ping不通的。若是這二者指定的iface仍是eth0,你應該按照以下處理:
route del -net 10.10.10.0/24 dev eth0
route del -net 10.20.20.0/24 dev eth0
ip route add 10.10.10.0/24 proto kernel scope link src 10.10.10.3 dev br-ex
ip route add 10.20.20.0/24 proto kernel scope link src 10.20.20.3 dev br-ex
爲了每次重啓主機以後,也能按照上面的網卡設置,你能夠將上述內容加入到/etc/rc.local的腳本。固然,若是是在物理機上的單網卡,設置別名設備的時候,能夠直接設爲br-ex:1,br-ex:2,應該就沒什麼問題。可是若是在KVM的虛擬機上,即便用br-ex設置別名,你也要用ip route 設置爲proto kernel scope link的屬性。
root@network:~# ovs-vsctl list-br br-ex br-int root@network:~# ovs-vsctl show Bridge br-int Port br-int Interface br-int type: internal Bridge br-ex Port "eth0" Interface "eth0" Port br-ex Interface br-ex type: internal ovs_version: "1.4.0+build0"
4.Neutron-*
apt-get install neutron-plugin-openvswitch-agent neutron-dhcp-agent neutron-l3-agent neutron-metadata-agent
[filter:authtoken] paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory auth_host = 10.10.10.2 auth_port = 35357 auth_protocol = http admin_tenant_name = service admin_user = neutron admin_password = admin
[OVS] tenant_network_type = gre enable_tunneling = True tunnel_id_ranges = 1:1000 integration_bridge = br-int tunnel_bridge = br-tun local_ip = 10.20.20.3 #Firewall driver for realizing neutron security group function [SECURITYGROUP] firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
auth_url = http://10.10.10.2:35357/v2.0 auth_region = RegionOne admin_tenant_name = service admin_user = neutron admin_password = admin # IP address used by Nova metadata server nova_metadata_ip = 10.10.10.2 # TCP Port used by Nova metadata server nova_metadata_port = 8775 metadata_proxy_shared_secret = helloOpenStack
rabbit_host = 10.10.10.2 [keystone_authtoken] auth_host = 10.10.10.2 auth_port = 35357 auth_protocol = http admin_tenant_name = service admin_user = neutron admin_password = admin signing_dir = /var/lib/quantum/keystone-signing [database] connection = mysql://neutronUser:neutronPass@10.10.10.2/neutron
[DEFAULT] interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver use_namespaces = True external_network_bridge = br-ex signing_dir = /var/cache/neutron admin_tenant_name = service admin_user = neutron admin_password = admin auth_url = http://10.10.10.2:35357/v2.0 l3_agent_manager = neutron.agent.l3_agent.L3NATAgentWithStateReport root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
[DEFAULT] interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq use_namespaces = True signing_dir = /var/cache/neutron admin_tenant_name = service admin_user = neutron admin_password = admin auth_url = http://10.10.10.2:35357/v2.0 dhcp_agent_manager = neutron.agent.dhcp_agent.DhcpAgentWithStateReport root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf state_path = /var/lib/neutron
cd /etc/init.d/; for i in $( ls neutron-* ); do service $i restart; done
網絡節點的服務部署完畢,下面就是計算節點的安裝了