首页 > 代码库 > 私有云 openstack部署

私有云 openstack部署

                               
控制节点 计算节点 两台机器
环境准备
centos7.1
控制节点
外网卡Linux-node0.openstack 192.168.31.151
内网卡Linux-node0.openstack 192.168.1.17
计算节点
外网卡Linux-node2.openstack 192.168.31.219
内网卡Linux-node2.openstack 192.168.1.8
关闭防火墙 firewalld
关闭selinux
/etc/hosts  #主机名一开始设置好,后面就不能更改了,否则就会出问题!这里设置好ip与主机名的对应关系
192.168.1.17  linux-node1.openstack      
192.168.1.8   linux-node2.openstack  

#Base 安装源文件
yum install -y http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm
yum install -y centos-release-openstack-liberty
yum install -y python-openstackclient
##MySQL 
yum install -y mariadb mariadb-server MySQL-python
##RabbitMQ
yum install -y rabbitmq-server
##Keystone
yum install -y openstack-keystone httpd mod_wsgi memcached python-memcached
##Glance
yum install -y openstack-glance python-glance python-glanceclient
##Nova
yum install -y openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient
##Neutron linux-node1.example.com
yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset
##Dashboard
yum install -y openstack-dashboard
##Cinder
yum install -y openstack-cinder python-cinderclient
*************************************************************************************

##Base
yum install -y http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm
yum install centos-release-openstack-liberty
yum install python-openstackclient
##Nova linux-node2.openstack
yum install -y openstack-nova-compute sysfsutils
##Neutron linux-node2.openstack
yum install -y openstack-neutron openstack-neutron-linuxbridge ebtables ipset
##Cinder
yum install -y openstack-cinder python-cinderclient targetcli python-oslo-policy
*************************************************************************************

设置时间同步、 关闭 selinux 和 iptables
在 linux-node0 上配置( 只有 centos7 能用, 6 还用 ntp)
[root@linux-node0 ~]# yum install -y chrony
vim /etc/chrony.conf
allow 192.168/16 #允许那些服务器和自己同步时间
[root@linux-node1 ~]# 
systemctl enable chronyd.service    #开机启动
systemctl start chronyd.service
timedatectl set-timezone Asia/Shanghai     #设置时区
timedatectl status
在 linux-node2 上配置
[root@linux-node2 ~]# 
yum install -y chrony
vim /etc/chrony.conf
server 192.168.1.17 iburst #只留一行
[root@linux-node2 ~]# 
systemctl enable chronyd.service
systemctl start chronyd.service
timedatectl set-timezone Asia/Shanghai
chronyc sources

[root@linux-node0 ~]# 
cp /usr/share/mysql/my-medium.cnf /etc/my.cnf  或 /usr/share/mariadb/my-medium.cnf
[mysqld]
default-storage-engine = innodb
innodb_file_per_table
collation-server = utf8_general_ci
init-connect = ‘SET NAMES utf8‘
character-set-server = utf8
[root@linux-node0 ~]# 
systemctl enable mariadb.service                                             
mysql_install_db --datadir="/var/lib/mysql" --user="mysql"  #初始化数据库
systemctl start mariadb.service
mysql_secure_installation                                                     #设置密码及初始化
密码 123456,一路 y 回车
CREATE DATABASE keystone; 
GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone‘@‘localhost‘ IDENTIFIED BY ‘keystone‘;
GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone‘@‘%‘ IDENTIFIED BY ‘keystone‘;

CREATE DATABASE glance;   
GRANT ALL PRIVILEGES ON glance.* TO ‘glance‘@‘localhost‘ IDENTIFIED BY ‘glance‘;
GRANT ALL PRIVILEGES ON glance.* TO ‘glance‘@‘%‘ IDENTIFIED BY ‘glance‘;

CREATE DATABASE nova;    
GRANT ALL PRIVILEGES ON nova.* TO ‘nova‘@‘localhost‘ IDENTIFIED BY ‘nova‘;
GRANT ALL PRIVILEGES ON nova.* TO ‘nova‘@‘%‘ IDENTIFIED BY ‘nova‘;

CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron‘@‘localhost‘ IDENTIFIED BY ‘neutron‘;
GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron‘@‘%‘ IDENTIFIED BY ‘neutron‘;

CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder‘@‘localhost‘ IDENTIFIED BY ‘cinder‘;
GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder‘@‘%‘ IDENTIFIED BY ‘cinder‘;
flush privileges; 更新数据库


[root@control-node0 ~]# systemctl enable rabbitmq-server.service
[root@control-node0 ~]# systemctl start rabbitmq-server.service
创建openstack的用户名和密码
[root@control-node0 ~]# rabbitmqctl add_user openstack openstack
Creating user "openstack" ...
...done.
用户授权
[root@control-node0 ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
Setting permissions for user "openstack" in vhost "/" ...
...done.
列出rabbitmq的插件
[root@control-node0 ~]# rabbitmq-plugins list
[ ] amqp_client                       3.3.5
[ ] cowboy                            0.5.0-rmq3.3.5-git4b93c2d
[ ] eldap                             3.3.5-gite309de4
[ ] mochiweb                          2.7.0-rmq3.3.5-git680dba8
[ ] rabbitmq_amqp1_0                  3.3.5
[ ] rabbitmq_auth_backend_ldap        3.3.5
[ ] rabbitmq_auth_mechanism_ssl       3.3.5
[ ] rabbitmq_consistent_hash_exchange 3.3.5
[ ] rabbitmq_federation               3.3.5
[ ] rabbitmq_federation_management    3.3.5
[ ] rabbitmq_management               3.3.5
[ ] rabbitmq_management_agent         3.3.5
[ ] rabbitmq_management_visualiser    3.3.5
[ ] rabbitmq_mqtt                     3.3.5
[ ] rabbitmq_shovel                   3.3.5
[ ] rabbitmq_shovel_management        3.3.5
[ ] rabbitmq_stomp                    3.3.5
[ ] rabbitmq_test                     3.3.5
[ ] rabbitmq_tracing                  3.3.5
[ ] rabbitmq_web_dispatch             3.3.5
[ ] rabbitmq_web_stomp                3.3.5
[ ] rabbitmq_web_stomp_examples       3.3.5
[ ] sockjs                            0.3.4-rmq3.3.5-git3132eb9
[ ] webmachine                        1.10.3-rmq3.3.5-gite9359c7
rabbitmq管理插件启动
[root@control-node0 ~]# rabbitmq-plugins enable rabbitmq_management 
重新启动rabbitmq
[root@control-node0 ~]# systemctl restart rabbitmq-server.service
再次查看监听的端口:web管理端口:15672
lsof -i:15672 查看进程
[root@control-node0 ~]# netstat -lntup
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:25672           0.0.0.0:*               LISTEN      38649/beam          
tcp        0      0 0.0.0.0:3306            0.0.0.0:*               LISTEN      38154/mysqld        

打开http://192.168.31.151:15672  用户名 guest      密码 guest 
登录进去之后:
Admin------->复制administrator------->点击openstack------>Update this user-------->
Tags:粘帖administrator--------->密码都设置为openstack-------->logout
然后在登陆:用户名 openstack  密码  openstack

[root@control-node0 ~]# openssl rand -hex 10
 8097f01ca96d056655cf 产生的随机数
[root@control-node0 ~]# grep -n ‘^[a-z]‘  /etc/keystone/keystone.conf
12:admin_token =  8097f01ca96d056655cf
107:verbose = true
495:connection = mysql://keystone:keystone@192.168.1.17/keystone
1313:servers =  192.168.1.17:11211
1349:driver = sql
1911:provider = uuid
1916:driver = memcache
同步数据库:注意权限,所以要用su -s 切换到keystone用户下执行:
[root@control-node0 ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
No handlers could be found for logger "oslo_config.cfg"
验证数据是否创建成功
[root@control-node0 ~]# mysql -ukeystone -pkeystone
MariaDB [(none)]> use keystone
Database changed
MariaDB [keystone]> show tables;

[root@control-node0 ~]# systemctl enable memcached
[root@control-node0 ~]# systemctl start memcached.service
必须要配置httpd的ServerName,否则keystone服务不能起来
[root@control-node0 ~]# vi /etc/httpd/conf/httpd.conf
ServerName 192.168.1.17:80
[root@control-node0 ~]# grep -n ‘^ServerName‘ /etc/httpd/conf/httpd.conf      
95:ServerName 192.168.1.17:80
新建keystone配置文件,并用apache来代理它:5000  正常的api来访问  35357  管理访问的端口
[root@control-node0 ~]# vim /etc/httpd/conf.d/wsgi-keystone.conf
Listen 5000
Listen 35357
<VirtualHost *:5000>
    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-public
    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    <IfVersion >= 2.4>
      ErrorLogFormat "%{cu}t %M"
    </IfVersion>
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined
    <Directory /usr/bin>
        <IfVersion >= 2.4>
            Require all granted
        </IfVersion>
        <IfVersion < 2.4>
            Order allow,deny
            Allow from all
        </IfVersion>
    </Directory>
</VirtualHost>
<VirtualHost *:35357>
    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-admin
    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    <IfVersion >= 2.4>
      ErrorLogFormat "%{cu}t %M"
    </IfVersion>
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined
    <Directory /usr/bin>
        <IfVersion >= 2.4>
            Require all granted
        </IfVersion>
        <IfVersion < 2.4>
            Order allow,deny
            Allow from all
        </IfVersion>
    </Directory>
</VirtualHost>
启动memcache与httpd服务
[root@control-node0 ~]# systemctl enable httpd
[root@control-node0 ~]# systemctl start httpd
查看端口
[root@control-node0 ~]# netstat -lntup|grep httpd
tcp6       0      0 :::5000                 :::*                    LISTEN      39324/httpd         
tcp6       0      0 :::80                   :::*                    LISTEN      39324/httpd         
tcp6       0      0 :::35357                :::*                    LISTEN      39324/httpd       
创建验证用户及地址版本信息
[root@control-node0 ~]# grep -n ‘^admin_token‘ /etc/keystone/keystone.conf
12:admin_token = 8097f01ca96d056655cf
[root@control-node0 ~]# export OS_TOKEN=8097f01ca96d056655cf
[root@control-node0 ~]# export OS_URL=http://192.168.1.17:35357/v3
[root@control-node0 ~]# export OS_IDENTITY_API_VERSION=3
[root@control-node0 ~]# env

创建 admin 项目---创建 admin 用户(密码 admin,生产不要这么玩) ---创建 admin 角色---把 admin 用户加入到 admin 项目赋予 admin 的角色(三个 admin 的位置:项目,用户,角色)
创建租户用户
[root@control-node0 ~]# openstack project create --domain default --description "Admin Project" admin
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Admin Project                    |
| domain_id   | default                          |
| enabled     | True                             |
| id          | b5a578cfdb4848dba2b91dd38d1e2b93 |
| is_domain   | False                            |
| name        | admin                            |
| parent_id   | None                             |
+-------------+----------------------------------+
创建admin的用户
[root@control-node0 ~]# openstack user create --domain default --password-prompt admin
User Password:admin
Repeat User Password:admin
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | default                          |
| enabled   | True                             |
| id        | ad4f6c3d88a047d6802a05735a03ba8f |
| name      | admin                            |
+-----------+----------------------------------+
创建admin的角色
[root@control-node0 ~]# openstack role create admin
+-------+----------------------------------+
| Field | Value                            |
+-------+----------------------------------+
| id    | 0b546d54ed7f467fa90f18bb899452d3 |
| name  | admin                            |
+-------+----------------------------------+
把admin用户加入到admin项目,并赋予admin的角色
[root@control-node0 ~]# openstack role add --project admin --user admin admin
创建普通用户密码及角色
[root@control-node0 ~]# openstack project create --domain default --description "Demo Project" demo
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Demo Project                     |
| domain_id   | default                          |
| enabled     | True                             |
| id          | 5f4aaeb328f049ddbfe2717ded103c67 |
| is_domain   | False                            |
| name        | demo                             |
| parent_id   | None                             |
+-------------+----------------------------------+
[root@control-node0 ~]# openstack user create --domain default --password=demo demo
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | default                          |
| enabled   | True                             |
| id        | 46dc3686bc0a4ea6b8d09505603ccecc |
| name      | demo                             |
+-----------+----------------------------------+
[root@control-node0 ~]# openstack role create user
+-------+----------------------------------+
| Field | Value                            |
+-------+----------------------------------+
| id    | 314a22500bf042ba9a970701e2c39998 |
| name  | user                             |
+-------+----------------------------------+
[root@control-node0 ~]# openstack role add --project demo --user demo user
创建一个Service的项目 用来管理其他服务用
[root@control-node0 ~]# 
openstack project create --domain default --description "Service Project" service
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Service Project                  |
| domain_id   | default                          |
| enabled     | True                             |
| id          | de068df7bbad42379c0c6050fa306fbb |
| is_domain   | False                            |
| name        | service                          |
| parent_id   | None                             |
+-------------+----------------------------------+
查看创建的用户及角色
[root@control-node0 ~]# openstack user list
+----------------------------------+-------+
| ID                               | Name  |
+----------------------------------+-------+
| 46dc3686bc0a4ea6b8d09505603ccecc | demo  |
| ad4f6c3d88a047d6802a05735a03ba8f | admin |
+----------------------------------+-------+
[root@control-node0 ~]#  openstack role list
+----------------------------------+-------+
| ID                               | Name  |
+----------------------------------+-------+
| 0b546d54ed7f467fa90f18bb899452d3 | admin |
| 314a22500bf042ba9a970701e2c39998 | user  |
+----------------------------------+-------+
[root@control-node0 ~]# openstack project list
+----------------------------------+---------+
| ID                               | Name    |
+----------------------------------+---------+
| 5f4aaeb328f049ddbfe2717ded103c67 | demo    |
| b5a578cfdb4848dba2b91dd38d1e2b93 | admin   |
| de068df7bbad42379c0c6050fa306fbb | service |
+----------------------------------+---------+
keystone本身也需要注册
[root@control-node0 ~]# 
openstack service create --name keystone --description "OpenStack Identity" identity
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Identity               |
| enabled     | True                             |
| id          | d632e3036b974943978631b9cabcafe0 |
| name        | keystone                         |
| type        | identity                         |
+-------------+----------------------------------+
公共的api接口
[root@control-node0 ~]# openstack endpoint create --region RegionOne identity public http://192.168.1.17:5000/v2.0
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 1a8eb7b97ff64c56886942a38054b9bb |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | d632e3036b974943978631b9cabcafe0 |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://192.168.1.17:5000/v2.0       |
+--------------+----------------------------------+
私有的api接口
[root@control-node0 ~]# 
openstack endpoint create --region RegionOne identity internal http://192.168.1.17:5000/v2.0
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 4caf182c26dd457ba86d9974dfb00c1b |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | d632e3036b974943978631b9cabcafe0 |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://192.168.1.17:5000/v2.0       |
+--------------+----------------------------------+
管理的api接口
[root@control-node0 ~]# 
openstack endpoint create --region RegionOne identity admin http://192.168.1.17:35357/v2.0
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 34c8185306c340a0bb4efbfc9da21003 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | d632e3036b974943978631b9cabcafe0 |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://192.168.1.17:35357/v2.0      |
+--------------+----------------------------------+
查看api接口
[root@control-node0 ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+-
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                         |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-
| 1a8eb7b97ff64c56886942a38054b9bb | RegionOne | keystone     | identity     | True    | public    | http://19.168.1.17:5000/v2.0  |
| 34c8185306c340a0bb4efbfc9da21003 | RegionOne | keystone     | identity     | True    | admin     | http://192.168.1.17:35357/v2.0 |
| 4caf182c26dd457ba86d9974dfb00c1b | RegionOne | keystone     | identity     | True    | internal  | http://192.168.1.17:5000/v2.0  |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-
删除  openstack endpoint delete ID号 
使用用户名密码的方式登录:必须要先取消环境变量
[root@control-node0 ~]# unset OS_TOKEN
[root@control-node0 ~]# unset OS_URL
[root@control-node0 ~]# openstack --os-auth-url http://192.168.1.17:35357/v3 --os-project-domain-id default --os-user-domain-id default --os-project-name admin --os-username admin --os-auth-type password token issue
Password: 
+------------+----------------------------------+
| Field      | Value                            |
+------------+----------------------------------+
| expires    | 2016-05-27T05:25:30.193235Z      |
| id         | 4e8c0c1e0f20481d959c977db7f689b6 |
| project_id | b5a578cfdb4848dba2b91dd38d1e2b93 |
| user_id    | ad4f6c3d88a047d6802a05735a03ba8f |
+------------+----------------------------------+
密码 admin

便快捷的使用keystone,我们需要设置两个环境变量:
[root@control-node0 ~]# cat admin-openrc.sh 
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=admin
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://192.168.1.17:35357/v3
export OS_IDENTITY_API_VERSION=3
[root@control-node0 ~]# cat demo-openrc.sh 
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=demo
export OS_TENANT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://192.168.1.17:5000/v3
export OS_IDENTITY_API_VERSION=3
添加执行权限
[root@control-node0 ~]# chmod +x admin-openrc.sh demo-openrc.sh 
测试获取TOKEN
[root@control-node0 ~]# source admin-openrc.sh 
[root@control-node0 ~]# openstack token issue
+------------+----------------------------------+
| Field      | Value                            |
+------------+----------------------------------+
| expires    | 2016-05-27T05:30:03.600977Z      |
| id         | 409443b07f5948f2a437443090927621 |
| project_id | b5a578cfdb4848dba2b91dd38d1e2b93 |
| user_id    | ad4f6c3d88a047d6802a05735a03ba8f |
+------------+----------------------------------+

修改配置文件添加数据库连接glance-api.conf与glance-registry.conf
[root@control-node0 ~]# vim /etc/glance/glance-api.conf 
[root@control-node0 ~]# vim /etc/glance/glance-registry.conf 
[root@control-node0 ~]# grep -n ‘^connection‘ /etc/glance/glance-api.conf
538:connection=mysql://glance:glance@19.168.1.17/glance
[root@control-node0 ~]# grep -n ‘^connection‘ /etc/glance/glance-registry.conf 
363:connection=mysql://glance:glance@192.168.1.17/glance
同步数据库
[root@control-node0 ~]# su -s /bin/sh -c "glance-manage db_sync" glance
No handlers could be found for logger "oslo_config.cfg"
查看数据库同步是否成功
[root@control-node0 ~]#  mysql -uglance -pglance -h 192.168.1.17
MariaDB [(none)]> use glance;
Database changed
MariaDB [glance]> show tables

创建glance用户
[root@control-node0 ~]# source admin-openrc.sh 
[root@control-node0 ~]# openstack user create --domain default --password=glance glance
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | default                          |
| enabled   | True                             |
| id        | 9009c0e0431646d193744d445411a0ab |
| name      | glance                           |
+-----------+----------------------------------+
将此用户加入到项目里面并给它赋予admin的权限
[root@control-node0 ~]# openstack role add --project service --user glance admin

[root@control-node0 ~]# vim  /etc/glance/glance-api.conf 
[root@control-node0 ~]# grep -n ^[a-z]  /etc/glance/glance-api.conf 
363:verbose=True
491:notification_driver = noop
538:connection=mysql://glance:glance@192.168.1.17/glance
642:default_store=file
701:filesystem_store_datadir=/var/lib/glance/images/
974:auth_uri = http://192.168.1.17:5000
975:auth_url = http://192.168.1.17:35357
976:auth_plugin = password
977:project_domain_id = default
978:user_domain_id = default
979:project_name = service
980:username = glance
981:password = glance
1484:flavor= keystone

[root@control-node0 ~]# grep -n ‘^[a-z]‘ /etc/glance/glance-registry.conf 
363:connection=mysql://glance:glance@192.168.1.17/glance
767:auth_uri = http://192.168.1.17:5000
768:auth_url = http://192.168.1.17:35357
769:auth_plugin = password
770:project_domain_id = default
771:user_domain_id = default
772:project_name = service
773:username = glance
774:password = glance
1256:flavor=keystone
启动glance服务并设置开机启动
[root@control-node0 ~]# systemctl enable openstack-glance-api
[root@control-node0 ~]# systemctl enable openstack-glance-registry
[root@control-node0 ~]# systemctl start openstack-glance-api
[root@control-node0 ~]# systemctl start openstack-glance-registry
监听端口: registry:9191     api:9292
[root@control-node0 ~]# netstat -antup
  

[root@control-node0 ~]# source admin-openrc.sh 
[root@control-node0 ~]# 
openstack service create --name glance --description "OpenStack Image service" image
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Image service          |
| enabled     | True                             |
| id          | 5ab719816a7f4294a7f843950fcd2e59 |
| name        | glance                           |
| type        | image                            |
+-------------+----------------------------------+
openstack endpoint create --region RegionOne   image public http://192.168.1.17:9292
openstack endpoint create --region RegionOne   image internal http://192.168.1.17:9292
openstack endpoint create --region RegionOne   image admin http://192.168.1.17:9292
[root@control-node0 ~]# 
openstack endpoint create --region RegionOne   image public http://192.168.1.17:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | a181ddd3ee8b4d72be1a0fda87b542ef |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 5ab719816a7f4294a7f843950fcd2e59 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://192.168.1.17:9292            |
+--------------+----------------------------------+
[root@control-node0 ~]# openstack endpoint create --region RegionOne   image internal http://10.0.0.80:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 4df72061901c40efa3905e95674fc5bc |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 5ab719816a7f4294a7f843950fcd2e59 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://192.168.1.17:9292            |
+--------------+----------------------------------+
[root@control-node0 ~]# openstack endpoint create --region RegionOne   image admin http://192.168.1.17:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | f755b7c22ab04ea3857840086b7c7754 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 5ab719816a7f4294a7f843950fcd2e59 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://192.168.1.17:9292            |
+--------------+----------------------------------+

环境变量添加export OS_IMAGE_API_VERSION=2
[root@control-node0 ~]# cat admin-openrc.sh 
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=admin
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://192.168.1.17:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@control-node0 ~]# cat demo-openrc.sh 
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=demo
export OS_TENANT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://192.168.1.17:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@control-node0 ~]# glance image-list

上传镜像
[root@control-node0 ~]# glance image-create --name "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare  --visibility public --progress
[=============================>] 100%
+------------------+--------------------------------------+
| Property         | Value                                |
+------------------+--------------------------------------+
| checksum         | ee1eca47dc88f4879d8a229cc70a07c6     |
| container_format | bare                                 |
| created_at       | 2016-05-27T05:09:36Z                 |
| disk_format      | qcow2                                |
| id               | 07245ea1-5f76-453d-a320-f1b08433a10a |
| min_disk         | 0                                    |
| min_ram          | 0                                    |
| name             | cirros                               |
| owner            | b5a578cfdb4848dba2b91dd38d1e2b93     |
| protected        | False                                |
| size             | 13287936                             |
| status           | active                               |
| tags             | []                                   |
| updated_at       | 2016-05-27T05:09:36Z                 |
| virtual_size     | None                                 |
| visibility       | public                               |
+------------------+--------------------------------------+
查看镜像
[root@control-node0 ~]# glance image-list
+--------------------------------------+--------+
| ID                                   | Name   |
+--------------------------------------+--------+
| 07245ea1-5f76-453d-a320-f1b08433a10a | cirros |
+--------------------------------------+--------+


配置nova.conf文件
1)、配置nova连接及数据表的创建
[root@control-node0 ~]# grep -n ^[a-z] /etc/nova/nova.conf 
1740:connection=mysql://nova:nova@192.168.1.17/nova
同步数据库
[root@control-node0 ~]# su -s /bin/sh -c "nova-manage db sync" nova
检查数据库
[root@control-node0 ~]# mysql -unova -pnova -h 192.168.1.17
MariaDB [(none)]> use nova
Database changed
MariaDB [nova]> show tables;

2)、Keystone配置
[root@control-node0 ~]# vim /etc/nova/nova.conf 
[root@control-node0 ~]# grep -n ^[a-z] /etc/nova/nova.conf 
1420:rpc_backend=rabbit
1740:connection=mysql://nova:nova@192.168.1.17/nova
2922:rabbit_host=192.168.1.17
2926:rabbit_port=5672
2938:rabbit_userid=openstack
2942:rabbit_password=openstack
[root@control-node0 ~]# source admin-openrc.sh 
[root@control-node0 ~]# openstack user create --domain default --password=nova nova
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | default                          |
| enabled   | True                             |
| id        | 6b4986f51d7749fd8dc9668d92e21e01 |
| name      | nova                             |
+-----------+----------------------------------+
[root@control-node0 ~]# openstack role add --project service --user nova admin
[root@control-node0 nova]# grep -n ^[a-z] nova.conf 
61:rpc_backend=rabbit
124:my_ip=192.168.1.17
268:enabled_apis=osapi_compute,metadata
425:auth_strategy=keystone
1053:network_api_class=nova.network.neutronv2.api.API
1171:linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
1331:security_group_api=neutron
1760:firewall_driver = nova.virt.firewall.NoopFirewallDriver
1828:vncserver_listen=$my_ip
1832:vncserver_proxyclient_address=$my_ip
2213:connection=mysql://nova:nova@192.168.1.17/nova
2334:host=$my_ip
2542:auth_uri = http://192.168.1.17:5000
2543:auth_url = http://192.168.1.17:35357
2544:auth_plugin = password
2545:project_domain_id = default
2546:user_domain_id = default
2547:project_name = service
2548:username = nova
2549:password = nova
3033:url = http://192.168.1.17:9696
3034:auth_url = http://192.168.1.17:35357
3035:auth_plugin = password
3036:project_domain_id = default
3037:user_domain_id = default
3038:region_name = RegionOne
3039:project_name = service
3040:username = neutron
3041:password = neutron
3049:service_metadata_proxy=true
3053:metadata_proxy_shared_secret=neutron
3804:lock_path=/var/lib/nova/tmp
3967:rabbit_host=192.168.1.17
3971:rabbit_port=5672
3983:rabbit_userid=openstack
3987:rabbit_password=openstack
设置开机自启动
systemctl enable openstack-nova-api.service 
openstack-nova-cert.service openstack-nova-consoleauth.service 
openstack-nova-scheduler.service openstack-nova-conductor.service 
openstack-nova-novncproxy.service
启动全部服务
[root@linux-node1 ~]# systemctl start openstack-nova-api.service 
openstack-nova-cert.service openstack-nova-consoleauth.service 
openstack-nova-scheduler.service openstack-nova-conductor.service 
openstack-nova-novncproxy.service
注册服务
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://192.168.1.17:8774/v2/%\(tenant_id\)s
openstack endpoint create --region RegionOne compute internal http://192.168.1.17:8774/v2/%\(tenant_id\)s
openstack endpoint create --region RegionOne compute admin http://192.168.1.17:8774/v2/%\(tenant_id\)s
[root@control-node0 ~]# source admin-openrc.sh 
[root@control-node0 ~]# openstack service create --name nova --description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Compute                |
| enabled     | True                             |
| id          | 47c979dc1312436fb912b8e8b842f293 |
| name        | nova                             |
| type        | compute                          |
+-------------+----------------------------------+
[root@control-node0 ~]# openstack endpoint create --region RegionOne compute public http://192.168.1.17:8774/v2/%\(tenant_id\)s
+--------------+----------------------------------------+
| Field        | Value                                  |
+--------------+----------------------------------------+
| enabled      | True                                   |
| id           | b42b8696b4e84d0581228f8fef746ce2       |
| interface    | public                                 |
| region       | RegionOne                              |
| region_id    | RegionOne                              |
| service_id   | 47c979dc1312436fb912b8e8b842f293       |
| service_name | nova                                   |
| service_type | compute                                |
| url          | http://192.168.1.17:8774/v2/%(tenant_id)s |
+--------------+----------------------------------------+
[root@control-node0 ~]# openstack endpoint create --region RegionOne compute internal http://192.168.1.17:8774/v2/%\(tenant_id\)s
+--------------+----------------------------------------+
| Field        | Value                                  |
+--------------+----------------------------------------+
| enabled      | True                                   |
| id           | b54df18a4c23471399858df476a98d5f       |
| interface    | internal                               |
| region       | RegionOne                              |
| region_id    | RegionOne                              |
| service_id   | 47c979dc1312436fb912b8e8b842f293       |
| service_name | nova                                   |
| service_type | compute                                |
| url          | http://192.168.1.17:8774/v2/%(tenant_id)s |
+--------------+----------------------------------------+
[root@control-node0 ~]# openstack endpoint create --region RegionOne compute admin http://192.168.1.17:8774/v2/%\(tenant_id\)s
+--------------+----------------------------------------+
| Field        | Value                                  |
+--------------+----------------------------------------+
| enabled      | True                                   |
| id           | 71daf94628384f1e8315060f86542696       |
| interface    | admin                                  |
| region       | RegionOne                              |
| region_id    | RegionOne                              |
| service_id   | 47c979dc1312436fb912b8e8b842f293       |
| service_name | nova                                   |
| service_type | compute                                |
| url          | http://192.168.1.17:8774/v2/%(tenant_id)s |
+--------------+----------------------------------------+
验证是否成功:
[root@control-node0 ~]# openstack host list
+-------------------------+-------------+----------+
| Host Name               | Service     | Zone     |
+-------------------------+-------------+----------+
| control-node0.xiegh.com | conductor   | internal |
| control-node0.xiegh.com | consoleauth | internal |
| control-node0.xiegh.com | scheduler   | internal |
| control-node0.xiegh.com | cert        | internal |
+-------------------------+-------------+----------+
如果出现此四个服务则代表nova创建成功了

nova-compute一般运行在计算节点上,通过message queue接收并管理VM的生命周期
nova-compute通过libvirt管理KVM,通过XenAPI管理Xen
[root@compute-node1 ~]# grep -n ‘^[a-z]‘ /etc/nova/nova.conf 
61:rpc_backend=rabbit
124:my_ip=10.0.0.81
268:enabled_apis=osapi_compute,metadata
425:auth_strategy=keystone
1053:network_api_class=nova.network.neutronv2.api.API
1171:linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
1331:security_group_api=neutron
1760:firewall_driver = nova.virt.firewall.NoopFirewallDriver
1820:novncproxy_base_url=http://192.168.1.17:6080/vnc_auto.html
1828:vncserver_listen=0.0.0.0
1832:vncserver_proxyclient_address=10.0.0.81
1835:vnc_enabled=true
1838:vnc_keymap=en-us
2213:connection=mysql://nova:nova@192.168.1.17/nova
2334:host=192.168.1.17
2542:auth_uri = http://192.168.1.17:5000
2543:auth_url = http://192.168.1.17:35357
2544:auth_plugin = password
2545:project_domain_id = default
2546:user_domain_id = default
2547:project_name = service
2548:username = nova
2549:password = nova
2727:virt_type=kvm
3033:url = http://192.168.1.17:9696
3034:auth_url = http://192.168.1.17:35357
3035:auth_plugin = password
3036:project_domain_id = default
3037:user_domain_id = default
3038:region_name = RegionOne
3039:project_name = service
3040:username = neutron
3041:password = neutron
3804:lock_path=/var/lib/nova/tmp
3967:rabbit_host=192.168.1.17
3971:rabbit_port=5672
3983:rabbit_userid=openstack
3987:rabbit_password=openstack

[root@compute-node1 ~]#  systemctl enable libvirtd openstack-nova-compute
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service to /usr/lib/systemd/system/openstack-nova-compute.service
[root@compute-node1 ~]# systemctl start libvirtd openstack-nova-compute
在控制节点上面查看注册状态
[root@control-node0 ~]# openstack host list
+-------------------------+-------------+----------+
| Host Name               | Service     | Zone     |
+-------------------------+-------------+----------+
| control-node0.xiegh.com | conductor   | internal |
| control-node0.xiegh.com | consoleauth | internal |
| control-node0.xiegh.com | scheduler   | internal |
| control-node0.xiegh.com | cert        | internal |
| compute-node1.xiegh.com | compute     | nova     |
+-------------------------+-------------+----------+
计算节点上nova安装成功并注册成功
镜像出于活动的状态
[root@control-node0 ~]#  nova image-list
+--------------------------------------+--------+--------+--------+
| ID                                   | Name   | Status | Server |
+--------------------------------------+--------+--------+--------+
| 07245ea1-5f76-453d-a320-f1b08433a10a | cirros | ACTIVE |        |
+--------------------------------------+--------+--------+--------+
验证nova与keystone的连接,如下说明成功
[root@control-node0 ~]# nova endpoints
WARNING: keystone has no endpoint in ! Available endpoints for this service:
+-----------+----------------------------------+
| keystone  | Value                            |
+-----------+----------------------------------+
| id        | 1a8eb7b97ff64c56886942a38054b9bb |
| interface | public                           |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:5000/v2.0       |
+-----------+----------------------------------+
+-----------+----------------------------------+
| keystone  | Value                            |
+-----------+----------------------------------+
| id        | 34c8185306c340a0bb4efbfc9da21003 |
| interface | admin                            |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:35357/v2.0      |
+-----------+----------------------------------+
+-----------+----------------------------------+
| keystone  | Value                            |
+-----------+----------------------------------+
| id        | 4caf182c26dd457ba86d9974dfb00c1b |
| interface | internal                         |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:5000/v2.0       |
+-----------+----------------------------------+
WARNING: glance has no endpoint in ! Available endpoints for this service:
+-----------+----------------------------------+
| glance    | Value                            |
+-----------+----------------------------------+
| id        | 4df72061901c40efa3905e95674fc5bc |
| interface | internal                         |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:9292            |
+-----------+----------------------------------+
+-----------+----------------------------------+
| glance    | Value                            |
+-----------+----------------------------------+
| id        | a181ddd3ee8b4d72be1a0fda87b542ef |
| interface | public                           |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:9292            |
+-----------+----------------------------------+
+-----------+----------------------------------+
| glance    | Value                            |
+-----------+----------------------------------+
| id        | f755b7c22ab04ea3857840086b7c7754 |
| interface | admin                            |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:9292            |
+-----------+----------------------------------+
WARNING: nova has no endpoint in ! Available endpoints for this service:
+-----------+-----------------------------------------------------------+
| nova      | Value                                                     |
+-----------+-----------------------------------------------------------+
| id        | 71daf94628384f1e8315060f86542696                          |
| interface | admin                                                     |
| region    | RegionOne                                                 |
| region_id | RegionOne                                                 |
| url       | http://192.168.1.17:8774/v2/b5a578cfdb4848dba2b91dd38d1e2b93 |
+-----------+-----------------------------------------------------------+
+-----------+-----------------------------------------------------------+
| nova      | Value                                                     |
+-----------+-----------------------------------------------------------+
| id        | b42b8696b4e84d0581228f8fef746ce2                          |
| interface | public                                                    |
| region    | RegionOne                                                 |
| region_id | RegionOne                                                 |
| url       | http://192.168.1.17:8774/v2/b5a578cfdb4848dba2b91dd38d1e2b93 |
+-----------+-----------------------------------------------------------+
+-----------+-----------------------------------------------------------+
| nova      | Value                                                     |
+-----------+-----------------------------------------------------------+
| id        | b54df18a4c23471399858df476a98d5f                          |
| interface | internal                                                  |
| region    | RegionOne                                                 |
| region_id | RegionOne                                                 |
| url       | http://192.168.1.17:8774/v2/b5a578cfdb4848dba2b91dd38d1e2b93 |
+-----------+-----------------------------------------------------------+
Neutron部署
注册网络服务:
source admin-openrc.sh 
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://192.168.1.17:9696
openstack endpoint create --region RegionOne network internal http://192.168.1.17:9696
openstack endpoint create --region RegionOne network admin http://192.168.1.17:9696
[root@control-node0 ~]# openstack service create --name neutron --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Networking             |
| enabled     | True                             |
| id          | eb5f03d85c774f48940654811a22b581 |
| name        | neutron                          |
| type        | network                          |
+-------------+----------------------------------+
[root@control-node0 ~]# openstack endpoint create --region RegionOne network public http://192.168.1.17:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | f782d738018a4dc5b80931f67f31d974 |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | eb5f03d85c774f48940654811a22b581 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://192.168.1.17:9696            |
+--------------+----------------------------------+
[root@control-node0 ~]# openstack endpoint create --region RegionOne network internal http://192.168.1.17:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 21565236fb1b4bc8b0c37c040369d7d4 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | eb5f03d85c774f48940654811a22b581 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://192.168.1.17:9696            |
+--------------+----------------------------------+
[root@control-node0 ~]# openstack endpoint create --region RegionOne network admin http://192.168.1.17:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | f2c83846242d4443a7cd3f205cf3bb56 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | eb5f03d85c774f48940654811a22b581 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://192.168.1.17:9696            |
+--------------+----------------------------------+
[root@control-node0 ~]#grep -n ‘^[a-z]‘ /etc/neutron/neutron.conf 
20:state_path = /var/lib/neutron
60:core_plugin = ml2
77:service_plugins = router
92:auth_strategy = keystone
360:notify_nova_on_port_status_changes = True
364:notify_nova_on_port_data_changes = True
367:nova_url = http://192.168.1.17:8774/v2
573:rpc_backend=rabbit
717:auth_uri = http://192.168.1.17:5000
718:auth_url = http://192.168.1.17:35357
719:auth_plugin = password
720:project_domain_id = default
721:user_domain_id = default
722:project_name = service
723:username = neutron
724:password = neutron
737:connection = mysql://neutron:neutron@192.168.1.17:3306/neutron
780:auth_url = http://192.168.1.17:35357
781:auth_plugin = password
782:project_domain_id = default
783:user_domain_id = default
784:region_name = RegionOne
785:project_name = service
786:username = nova
787:password = nova
818:lock_path = $state_path/lock
998:rabbit_host = 192.168.1.17
1002:rabbit_port = 5672
1014:rabbit_userid = openstack
1018:rabbit_password = openstack
[root@control-node0 ~]# grep -n ‘^[a-z]‘ /etc/neutron/plugins/ml2/ml2_conf.ini
5:type_drivers = flat,vlan,gre,vxlan,geneve
12:tenant_network_types = vlan,gre,vxlan,geneve
18:mechanism_drivers = openvswitch,linuxbridge
27:extension_drivers = port_security
67:flat_networks = physnet1
120:enable_ipset = True
[root@control-node0 ~]# grep -n ‘^[a-z]‘ /etc/neutron/plugins/ml2/linuxbridge_agent.ini
9:physical_interface_mappings = physnet1:eth0
16:enable_vxlan = false
51:prevent_arp_spoofing = True
57:firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
61:enable_security_group = True
[root@control-node0 ~]# grep -n ‘^[a-z]‘ /etc/neutron/dhcp_agent.ini
27:interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
31:dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
52:enable_isolated_metadata = true
[root@control-node0 ~]# grep -n ‘^[a-z]‘ /etc/neutron/metadata_agent.ini
4:auth_uri = http://192.168.1.17:5000
5:auth_url = http://192.168.1.17:35357
6:auth_region = RegionOne
7:auth_plugin = password
8:project_domain_id = default
9:user_domain_id = default
10:project_name = service
11:username = neutron
12:password = neutron
29:nova_metadata_ip = 192.168.1.17
52:metadata_proxy_shared_secret = neutron
[root@control-node0 ~]# grep -n ‘^[a-z]‘ /etc/nova/nova.conf 
61:rpc_backend=rabbit
124:my_ip=192.168.1.17
268:enabled_apis=osapi_compute,metadata
425:auth_strategy=keystone
1053:network_api_class=nova.network.neutronv2.api.API
1171:linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
1331:security_group_api=neutron
1760:firewall_driver = nova.virt.firewall.NoopFirewallDriver
1828:vncserver_listen=$my_ip
1832:vncserver_proxyclient_address=$my_ip
2213:connection=mysql://nova:nova@192.168.1.17/nova
2334:host=$my_ip
2542:auth_uri = http://192.168.1.17:5000
2543:auth_url = http://192.168.1.17:35357
2544:auth_plugin = password
2545:project_domain_id = default
2546:user_domain_id = default
2547:project_name = service
2548:username = nova
2549:password = nova
3033:url = http://192.168.1.17:9696
3034:auth_url = http://192.168.1.17:35357
3035:auth_plugin = password
3036:project_domain_id = default
3037:user_domain_id = default
3038:region_name = RegionOne
3039:project_name = service
3040:username = neutron
3041:password = neutron
3049:service_metadata_proxy=true
3053:metadata_proxy_shared_secret=neutron
3804:lock_path=/var/lib/nova/tmp
3967:rabbit_host=192.168.1.17
3971:rabbit_port=5672
3983:rabbit_userid=openstack
3987:rabbit_password=openstack
[root@control-node0 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@control-node0 ~]# openstack user create --domain default --password=neutron neutron
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | default                          |
| enabled   | True                             |
| id        | 85c411a092354b29b58c7505a8905824 |
| name      | neutron                          |
+-----------+----------------------------------+
[root@control-node0 ~]# openstack role add --project service --user neutron admin

更新数据库
[root@control-node0 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

重新驱动下服务:
[root@control-node0 ~]# systemctl restart openstack-nova-api
开机自动加载neutron及启动neutron服务
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
执行结果:
[root@control-node0 ~]# systemctl restart openstack-nova-api
[root@control-node0 ~]# systemctl enable neutron-server.service > neutron-linuxbridge-agent.service neutron-dhcp-agent.service > neutron-metadata-agent.service
ln -s ‘/usr/lib/systemd/system/neutron-server.service‘ ‘/etc/systemd/system/multi-user.target.wants/neutron-server.service‘
ln -s ‘/usr/lib/systemd/system/neutron-linuxbridge-agent.service‘ ‘/etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service‘
ln -s ‘/usr/lib/systemd/system/neutron-dhcp-agent.service‘ ‘/etc/systemd/system/multi-user.target.wants/neutron-dhcp-agent.service‘
ln -s ‘/usr/lib/systemd/system/neutron-metadata-agent.service‘ ‘/etc/systemd/system/multi-user.target.wants/neutron-metadata-agent.service‘
[root@control-node0 ~]# systemctl restart neutron-server.service > neutron-linuxbridge-agent.service neutron-dhcp-agent.service > neutron-metadata-agent.service
查看网卡的配置
[root@control-node0 ~]# source admin-openrc.sh 
[root@control-node0 ~]# neutron agent-list 
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host                    | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| 4de08ae7-5699-47ea-986b-7c855d7eb7bd | Linux bridge agent | control-node0.xiegh.com | :-)   | True           | neutron-linuxbridge-agent |
| adf5abfc-2a74-4baa-b4cd-da7f7f05a378 | Metadata agent     | control-node0.xiegh.com | :-)   | True           | neutron-metadata-agent    |
| c1562203-c8ff-4189-a59b-bcf480ca70c1 | DHCP agent         | control-node0.xiegh.com | :-)   | True           | neutron-dhcp-agent        |
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
将控制节点的配置文件neutron.conf 拷贝到计算节点的目录/etc/neutron/
[root@control-node0 ~]# scp -r /etc/neutron/neutron.conf 10.0.0.81:/etc/neutron/
[root@control-node0 ~]# scp -r /etc/neutron/plugins/ml2/linuxbridge_agent.ini 10.0.0.81:/etc/neutron/plugins/ml2/
[root@control-node0 ~]# scp -r /etc/neutron/plugins/ml2/ml2_conf.ini 10.0.0.81:/etc/neutron/plugins/ml2/
在已经拷贝了,这里就不拷贝了nova.conf 
[root@compute-node1 ~]# grep -n ‘^[a-z]‘  /etc/neutron/neutron.conf
20:state_path = /var/lib/neutron
60:core_plugin = ml2
77:service_plugins = router
92:auth_strategy = keystone
360:notify_nova_on_port_status_changes = True
364:notify_nova_on_port_data_changes = True
367:nova_url = http://192.168.1.17:8774/v2
573:rpc_backend=rabbit
717:auth_uri = http://192.168.1.17:5000
718:auth_url = http://192.168.1.17:35357
719:auth_plugin = password
720:project_domain_id = default
721:user_domain_id = default
722:project_name = service
723:username = neutron
724:password = neutron
737:connection = mysql://neutron:neutron@192.168.1.17:3306/neutron
780:auth_url = http://192.168.1.17:35357
781:auth_plugin = password
782:project_domain_id = default
783:user_domain_id = default
784:region_name = RegionOne
785:project_name = service
786:username = nova
787:password = nova
818:lock_path = $state_path/lock
998:rabbit_host = 192.168.1.17
1002:rabbit_port = 5672
1014:rabbit_userid = openstack
1018:rabbit_password = openstack

[root@compute-node1 ~]# grep -n ‘^[a-z]‘  /etc/neutron/plugins/ml2/linuxbridge_agent.ini
9:physical_interface_mappings = physnet1:eth0
16:enable_vxlan = false
51:prevent_arp_spoofing = True
57:firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
61:enable_security_group = True
[root@compute-node1 ~]# grep -n ‘^[a-z]‘  /etc/neutron/plugins/ml2/ml2_conf.ini
5:type_drivers = flat,vlan,gre,vxlan,geneve
12:tenant_network_types = vlan,gre,vxlan,geneve
18:mechanism_drivers = openvswitch,linuxbridge
27:extension_drivers = port_security
67:flat_networks = physnet1
120:enable_ipset = True

[root@compute-node1 ~]# grep -n ‘^[a-z]‘  /etc/nova/nova.conf 
61:rpc_backend=rabbit
124:my_ip=10.0.0.81
268:enabled_apis=osapi_compute,metadata
425:auth_strategy=keystone
1053:network_api_class=nova.network.neutronv2.api.API
1171:linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
1331:security_group_api=neutron
1760:firewall_driver = nova.virt.firewall.NoopFirewallDriver
1820:novncproxy_base_url=http://192.168.1.17:6080/vnc_auto.html
1828:vncserver_listen=0.0.0.0
1832:vncserver_proxyclient_address=10.0.0.81
1835:vnc_enabled=true
1838:vnc_keymap=en-us
2213:connection=mysql://nova:nova@192.168.1.17/nova
2334:host=192.168.1.17
2542:auth_uri = http://192.168.1.17:5000
2543:auth_url = http://192.168.1.17:35357
2544:auth_plugin = password
2545:project_domain_id = default
2546:user_domain_id = default
2547:project_name = service
2548:username = nova
2549:password = nova
2727:virt_type=kvm
3033:url = http://192.168.1.17:9696
3034:auth_url = http://192.168.1.17:35357
3035:auth_plugin = password
3036:project_domain_id = default
3037:user_domain_id = default
3038:region_name = RegionOne
3039:project_name = service
3040:username = neutron
3041:password = neutron
3804:lock_path=/var/lib/nova/tmp
3967:rabbit_host=192.168.1.17
3971:rabbit_port=5672
3983:rabbit_userid=openstack
3987:rabbit_password=openstack

[root@compute-node1 ~]# systemctl restart openstack-nova-compute
[root@compute-node1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@compute-node1 ~]# systemctl enable neutron-linuxbridge-agent.service
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
[root@compute-node1 ~]# systemctl restart neutron-linuxbridge-agent.service
故障:
在控制不能发现计算节点neutron-linuxbridge-agent
重启计算计算节点恢复正常
[root@control-node0 ~]#  neutron agent-list
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host                    | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| 4de08ae7-5699-47ea-986b-7c855d7eb7bd | Linux bridge agent | control-node0.xiegh.com | :-)   | True           | neutron-linuxbridge-agent |
| adf5abfc-2a74-4baa-b4cd-da7f7f05a378 | Metadata agent     | control-node0.xiegh.com | :-)   | True           | neutron-metadata-agent    |
| c1562203-c8ff-4189-a59b-bcf480ca70c1 | DHCP agent         | control-node0.xiegh.com | :-)   | True           | neutron-dhcp-agent        |
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
在控制节点查看:
[root@control-node0 ~]# neutron agent-list
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host                    | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| 4de08ae7-5699-47ea-986b-7c855d7eb7bd | Linux bridge agent | control-node0.xiegh.com | :-)   | True           | neutron-linuxbridge-agent |
| a7b2c76e-2c9e-42a3-89ac-725716a0c370 | Linux bridge agent | compute-node1.xiegh.com | :-)   | True           | neutron-linuxbridge-agent |
| adf5abfc-2a74-4baa-b4cd-da7f7f05a378 | Metadata agent     | control-node0.xiegh.com | :-)   | True           | neutron-metadata-agent    |
| c1562203-c8ff-4189-a59b-bcf480ca70c1 | DHCP agent         | control-node0.xiegh.com | :-)   | True           | neutron-dhcp-agent        |
+--------------------------------------+--------------------+-------------------------+-------+------
代表计算节点的Linux bridge agent已成功连接到控制节点。
创建一个网络:
neutron net-create flat --shared --provider:physical_network physnet1 --provider:network_type flat
[root@control-node0 ~]# neutron net-create flat --shared --provider:physical_network physnet1 --provider:network_type flat
Created a new network:
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | True                                 |
| id                        | 516b5a4d-7fa5-43ae-8328-965c5e0e21d7 |
| mtu                       | 0                                    |
| name                      | flat                                 |
| port_security_enabled     | True                                 |
| provider:network_type     | flat                                 |
| provider:physical_network | physnet1                             |
| provider:segmentation_id  |                                      |
| router:external           | False                                |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tenant_id                 | b5a578cfdb4848dba2b91dd38d1e2b93     |
+---------------------------+--------------------------------------+
创建一个子网
neutron subnet-create flat 10.0.0.0/24 --name flat-subnet --allocation-pool start=10.0.0.100,end=10.0.0.200 --dns-nameserver 10.0.0.2 --gateway 10.0.0.2
[root@control-node0 ~]# neutron subnet-create flat 10.0.0.0/24 --name flat-subnet --allocation-pool start=10.0.0.100,end=10.0.0.200 --dns-nameserver 10.0.0.2 --gateway 10.0.0.2
Created a new subnet:
+-------------------+----------------------------------------------+
| Field             | Value                                        |
+-------------------+----------------------------------------------+
| allocation_pools  | {"start": "10.0.0.100", "end": "10.0.0.200"} |
| cidr              | 10.0.0.0/24                                  |
| dns_nameservers   | 10.0.0.2                                     |
| enable_dhcp       | True                                         |
| gateway_ip        | 10.0.0.2                                     |
| host_routes       |                                              |
| id                | 64ba9f36-3e3e-4988-a863-876759ad43c3         |
| ip_version        | 4                                            |
| ipv6_address_mode |                                              |
| ipv6_ra_mode      |                                              |
| name              | flat-subnet                                  |
| network_id        | 516b5a4d-7fa5-43ae-8328-965c5e0e21d7         |
| subnetpool_id     |                                              |
| tenant_id         | b5a578cfdb4848dba2b91dd38d1e2b93             |
+-------------------+----------------------------------------------+
查看网络和子网
[root@control-node0 ~]# neutron subnet-list 
+--------------------------------------+-------------+-------------+---------------------------------
| id                                   | name        | cidr        | allocation_pools                             |
+--------------------------------------+-------------+-------------+---------------------------------
| 64ba9f36-3e3e-4988-a863-876759ad43c3 | flat-subnet | 10.0.0.0/24 | {"start": "10.0.0.100", "end": "10.0.0.200"} |
+--------------------------------------+-------------+-------------+---------------------------------
[root@control-node0 ~]#  source demo-openrc.sh 
[root@control-node0 ~]# ssh-keygen -q -N ""
Enter file in which to save the key (/root/.ssh/id_rsa): 
[root@control-node0 ~]# ls .ssh/
id_rsa  id_rsa.pub  known_hosts
[root@control-node0 ~]# nova keypair-add --pub-key .ssh/id_rsa.pub mykey
[root@control-node0 ~]# nova keypair-list
+-------+-------------------------------------------------+
| Name  | Fingerprint                                     |
+-------+-------------------------------------------------+
| mykey | ce:ad:3c:51:2a:db:dc:4c:d1:a5:22:e6:20:53:cf:65 |
+-------+-------------------------------------------------+
[root@control-node0 ~]# nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
+-------------+-----------+---------+-----------+--------------+
| IP Protocol | From Port | To Port | IP Range  | Source Group |
+-------------+-----------+---------+-----------+--------------+
| icmp        | -1        | -1      | 0.0.0.0/0 |              |
+-------------+-----------+---------+-----------+--------------+
[root@control-node0 ~]# nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
+-------------+-----------+---------+-----------+--------------+
| IP Protocol | From Port | To Port | IP Range  | Source Group |
+-------------+-----------+---------+-----------+--------------+
| tcp         | 22        | 22      | 0.0.0.0/0 |              |
+-------------+-----------+---------+-----------+--------------+
[root@control-node0 ~]# nova flavor-list
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| ID | Name      | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| 1  | m1.tiny   | 512       | 1    | 0         |      | 1     | 1.0         | True      |
| 2  | m1.small  | 2048      | 20   | 0         |      | 1     | 1.0         | True      |
| 3  | m1.medium | 4096      | 40   | 0         |      | 2     | 1.0         | True      |
| 4  | m1.large  | 8192      | 80   | 0         |      | 4     | 1.0         | True      |
| 5  | m1.xlarge | 16384     | 160  | 0         |      | 8     | 1.0         | True      |
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
[root@control-node0 ~]# nova image-list
+--------------------------------------+--------+--------+--------+
| ID                                   | Name   | Status | Server |
+--------------------------------------+--------+--------+--------+
| 07245ea1-5f76-453d-a320-f1b08433a10a | cirros | ACTIVE |        |
+--------------------------------------+--------+--------+--------+
[root@control-node0 ~]# neutron net-list
+--------------------------------------+------+--------------------------------------------------+
| id                                   | name | subnets                                          |
+--------------------------------------+------+--------------------------------------------------+
| 516b5a4d-7fa5-43ae-8328-965c5e0e21d7 | flat | 64ba9f36-3e3e-4988-a863-876759ad43c3 10.0.0.0/24 |
+--------------------------------------+------+--------------------------------------------------+
[root@control-node0 ~]# nova secgroup-list
+--------------------------------------+---------+------------------------+
| Id                                   | Name    | Description            |
+--------------------------------------+---------+------------------------+
| ba83d14c-2516-427b-8e88-89a49270b8d7 | default | Default security group |
+--------------------------------------+---------+------------------------+
nova boot --flavor m1.tiny --image cirros --nic net-id=516b5a4d-7fa5-43ae-8328-965c5e0e21d7 --security-group default --key-name mykey hehe-instance

私有云 openstack部署