返回顶部
首页 > 资讯 > 数据库 >私有云 openstack部署
  • 824
分享到

私有云 openstack部署

2024-04-02 19:04:59 824人浏览 独家记忆
摘要

                 &

                               
控制节点 计算节点 两台机器
环境准备
Centos7.1
控制节点
外网卡linux-node0.openstack 192.168.31.151
内网卡Linux-node0.openstack 192.168.1.17
计算节点
外网卡linux-node1.openstack 192.168.31.219
内网卡linux-node1.openstack 192.168.1.8
关闭防火墙 firewalld
关闭selinux
/etc/hosts  #主机名一开始设置好,后面就不能更改了,否则就会出问题!这里设置好ip与主机名的对应关系
192.168.1.17  linux-node0.openstack      
192.168.1.8   linux-node1.openstack  

#Base 安装源文件
yum install -y Http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm
yum install -y centos-release-openstack-liberty
yum install -y python-openstackclient
##Mysql 
yum install -y mariadb mariadb-server mysql-Python
##RabbitMQ
yum install -y rabbitMQ-server
##Keystone
yum install -y openstack-keystone httpd mod_wsgi memcached python-memcached
##Glance
yum install -y openstack-glance python-glance python-glanceclient
##Nova
yum install -y openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient
##Neutron linux-node1.example.com
yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset
##Dashboard
yum install -y openstack-dashboard
##Cinder
yum install -y openstack-cinder python-cinderclient
*************************************************************************************

##Base
yum install -y http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm
yum install centos-release-openstack-liberty
yum install python-openstackclient
##Nova linux-node1.openstack
yum install -y openstack-nova-compute sysfsutils
##Neutron linux-node1.openstack
yum install -y openstack-neutron openstack-neutron-linuxbridge ebtables ipset
##Cinder
yum install -y openstack-cinder python-cinderclient targetcli python-oslo-policy
*************************************************************************************

设置时间同步、 关闭 selinux 和 iptables
在 linux-node0 上配置( 只有 centos7 能用, 6 还用 ntp)
[root@linux-node0 ~]# yum install -y chrony
vim /etc/chrony.conf
allow 192.168/16 #允许那些服务器和自己同步时间
[root@linux-node1 ~]# 
systemctl enable chronyd.service    #开机启动
systemctl start chronyd.service
timedatectl set-timezone Asia/Shanghai     #设置时区
timedatectl status
在 linux-node1 上配置
[root@linux-node1 ~]# 
yum install -y chrony
vim /etc/chrony.conf
server 192.168.1.17 iburst #只留一行
[root@linux-node1 ~]# 
systemctl enable chronyd.service
systemctl start chronyd.service
timedatectl set-timezone Asia/Shanghai
chronyc sources

[root@linux-node0 ~]# 
cp /usr/share/mysql/my-medium.cnf /etc/my.cnf  或 /usr/share/mariadb/my-medium.cnf
[mysqld]
default-storage-engine = innodb
innodb_file_per_table
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
[root@linux-node0 ~]# 
systemctl enable mariadb.service                                             
mysql_install_db --datadir="/var/lib/mysql" --user="mysql"  #初始化数据库
systemctl start mariadb.service
mysql_secure_installation                                                     #设置密码及初始化
密码 123456,一路 y 回车
CREATE DATABASE keystone; 
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone';

CREATE DATABASE glance;   
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';

CREATE DATABASE nova;    
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';

CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';

CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';
flush privileges; 更新数据库


[root@linux-node0 ~]# systemctl enable rabbitmq-server.service
[root@linux-node0 ~]# systemctl start rabbitmq-server.service
创建openstack的用户名和密码
[root@linux-node0 ~]# rabbitmqctl add_user openstack openstack
Creating user "openstack" ...
...done.
用户授权
[root@linux-node0 ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
Setting permissions for user "openstack" in vhost "/" ...
...done.
列出rabbitmq的插件
[root@linux-node0 ~]# rabbitmq-plugins list
[ ] amqp_client                       3.3.5
[ ] cowboy                            0.5.0-rmq3.3.5-git4b93c2d
[ ] eldap                             3.3.5-gite309de4
[ ] mochiWEB                          2.7.0-rmq3.3.5-git680dba8
[ ] rabbitmq_amqp1_0                  3.3.5
[ ] rabbitmq_auth_backend_ldap        3.3.5
[ ] rabbitmq_auth_mechanism_ssl       3.3.5
[ ] rabbitmq_consistent_hash_exchange 3.3.5
[ ] rabbitmq_federation               3.3.5
[ ] rabbitmq_federation_management    3.3.5
[ ] rabbitmq_management               3.3.5
[ ] rabbitmq_management_agent         3.3.5
[ ] rabbitmq_management_visualiser    3.3.5
[ ] rabbitmq_mQtt                     3.3.5
[ ] rabbitmq_shovel                   3.3.5
[ ] rabbitmq_shovel_management        3.3.5
[ ] rabbitmq_stomp                    3.3.5
[ ] rabbitmq_test                     3.3.5
[ ] rabbitmq_tracing                  3.3.5
[ ] rabbitmq_web_dispatch             3.3.5
[ ] rabbitmq_web_stomp                3.3.5
[ ] rabbitmq_web_stomp_examples       3.3.5
[ ] sockjs                            0.3.4-rmq3.3.5-git3132eb9
[ ] webMachine                        1.10.3-rmq3.3.5-gite9359c7
rabbitmq管理插件启动
[root@linux-node0 ~]# rabbitmq-plugins enable rabbitmq_management 
重新启动rabbitmq
[root@linux-node0 ~]# systemctl restart rabbitmq-server.service
再次查看监听的端口:web管理端口:15672
lsof -i:15672 查看进程
[root@linux-node0 ~]# netstat -lntup
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:25672           0.0.0.0:*               LISTEN      38649/beam          
tcp        0      0 0.0.0.0:3306            0.0.0.0:*               LISTEN      38154/mysqld        

打开http://192.168.31.151:15672  用户名 guest      密码 guest 
登录进去之后:
Admin------->复制administrator------->点击openstack------>Update this user-------->
Tags:粘帖administrator--------->密码都设置为openstack-------->loGout
然后在登陆:用户名 openstack  密码  openstack

[root@linux-node0 ~]# openssl rand -hex 10
 8097f01ca96d056655cf 产生的随机数
[root@linux-node0 ~]# grep -n '^[a-z]'  /etc/keystone/keystone.conf
12:admin_token =  8097f01ca96d056655cf
107:verbose = true
495:connection = mysql://keystone:keystone@192.168.1.17/keystone
1313:servers =  192.168.1.17:11211
1349:driver = sql
1911:provider = uuid
1916:driver = memcache
同步数据库:注意权限,所以要用su -s 切换到keystone用户下执行:
[root@linux-node0 ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
No handlers could be found for logger "oslo_config.cfg"
验证数据是否创建成功
[root@linux-node0 ~]# mysql -ukeystone -pkeystone
MariaDB [(none)]> use keystone
Database changed
MariaDB [keystone]> show tables;

[root@linux-node0 ~]# systemctl enable memcached
[root@linux-node0 ~]# systemctl start memcached.service
必须要配置httpd的ServerName,否则keystone服务不能起来
[root@linux-node0 ~]# vi /etc/httpd/conf/httpd.conf
ServerName 192.168.1.17:80
[root@linux-node0 ~]# grep -n '^ServerName' /etc/httpd/conf/httpd.conf      
95:ServerName 192.168.1.17:80
新建keystone配置文件,并用apache来代理它:5000  正常的api来访问  35357  管理访问的端口
[root@linux-node0 ~]# vim /etc/httpd/conf.d/wsgi-keystone.conf
Listen 5000
Listen 35357
<VirtualHost *:5000>
    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-public
    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    <IfVersion >= 2.4>
      ErrorLogFORMat "%{cu}t %M"
    </IfVersion>
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined
    <Directory /usr/bin>
        <IfVersion >= 2.4>
            Require all granted
        </IfVersion>
        <IfVersion < 2.4>
            Order allow,deny
            Allow from all
        </IfVersion>
    </Directory>
</VirtualHost>
<VirtualHost *:35357>
    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-admin
    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    <IfVersion >= 2.4>
      ErrorLogFormat "%{cu}t %M"
    </IfVersion>
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined
    <Directory /usr/bin>
        <IfVersion >= 2.4>
            Require all granted
        </IfVersion>
        <IfVersion < 2.4>
            Order allow,deny
            Allow from all
        </IfVersion>
    </Directory>
</VirtualHost>
启动memcache与httpd服务
[root@linux-node0 ~]# systemctl enable httpd
[root@linux-node0 ~]# systemctl start httpd
查看端口
[root@linux-node0 ~]# netstat -lntup|grep httpd
tcp6       0      0 :::5000                 :::*                    LISTEN      39324/httpd         
tcp6       0      0 :::80                   :::*                    LISTEN      39324/httpd         
tcp6       0      0 :::35357                :::*                    LISTEN      39324/httpd       
创建验证用户及地址版本信息
[root@linux-node0 ~]# grep -n '^admin_token' /etc/keystone/keystone.conf
12:admin_token = 8097f01ca96d056655cf
[root@linux-node0 ~]# export OS_TOKEN=8097f01ca96d056655cf
[root@linux-node0 ~]# export OS_URL=http://192.168.1.17:35357/v3
[root@linux-node0 ~]# export OS_IDENTITY_API_VERSION=3
[root@linux-node0 ~]# env

创建 admin 项目---创建 admin 用户(密码 admin,生产不要这么玩) ---创建 admin 角色---把 admin 用户加入到 admin 项目赋予 admin 的角色(三个 admin 的位置:项目,用户,角色)
创建租户用户
[root@linux-node0 ~]# openstack project create --domain default --description "Admin Project" admin
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Admin Project                    |
| domain_id   | default                          |
| enabled     | True                             |
| id          | b5a578cfdb4848dba2b91dd38d1e2b93 |
| is_domain   | False                            |
| name        | admin                            |
| parent_id   | None                             |
+-------------+----------------------------------+
创建admin的用户
[root@linux-node0 ~]# openstack user create --domain default --passWord-prompt admin
User Password:admin
Repeat User Password:admin
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | default                          |
| enabled   | True                             |
| id        | ad4f6c3D88a047d6802a05735a03ba8f |
| name      | admin                            |
+-----------+----------------------------------+
创建admin的角色
[root@linux-node0 ~]# openstack role create admin
+-------+----------------------------------+
| Field | Value                            |
+-------+----------------------------------+
| id    | 0b546d54ed7f467fa90f18bb899452d3 |
| name  | admin                            |
+-------+----------------------------------+
把admin用户加入到admin项目,并赋予admin的角色
[root@linux-node0 ~]# openstack role add --project admin --user admin admin
创建普通用户密码及角色
[root@linux-node0 ~]# openstack project create --domain default --description "Demo Project" demo
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Demo Project                     |
| domain_id   | default                          |
| enabled     | True                             |
| id          | 5f4aaeb328f049ddbfe2717ded103c67 |
| is_domain   | False                            |
| name        | demo                             |
| parent_id   | None                             |
+-------------+----------------------------------+
[root@linux-node0 ~]# openstack user create --domain default --password=demo demo
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | default                          |
| enabled   | True                             |
| id        | 46dc3686bc0a4ea6b8d09505603ccecc |
| name      | demo                             |
+-----------+----------------------------------+
[root@linux-node0 ~]# openstack role create user
+-------+----------------------------------+
| Field | Value                            |
+-------+----------------------------------+
| id    | 314a22500bf042ba9a970701e2c39998 |
| name  | user                             |
+-------+----------------------------------+
[root@linux-node0 ~]# openstack role add --project demo --user demo user
创建一个Service的项目 用来管理其他服务用
[root@linux-node0 ~]# 
openstack project create --domain default --description "Service Project" service
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Service Project                  |
| domain_id   | default                          |
| enabled     | True                             |
| id          | de068df7bbad42379c0c6050fa306fbb |
| is_domain   | False                            |
| name        | service                          |
| parent_id   | None                             |
+-------------+----------------------------------+
查看创建的用户及角色
[root@linux-node0 ~]# openstack user list
+----------------------------------+-------+
| ID                               | Name  |
+----------------------------------+-------+
| 46dc3686bc0a4ea6b8d09505603ccecc | demo  |
| ad4f6c3d88a047d6802a05735a03ba8f | admin |
+----------------------------------+-------+
[root@linux-node0 ~]#  openstack role list
+----------------------------------+-------+
| ID                               | Name  |
+----------------------------------+-------+
| 0b546d54ed7f467fa90f18bb899452d3 | admin |
| 314a22500bf042ba9a970701e2c39998 | user  |
+----------------------------------+-------+
[root@linux-node0 ~]# openstack project list
+----------------------------------+---------+
| ID                               | Name    |
+----------------------------------+---------+
| 5f4aaeb328f049ddbfe2717ded103c67 | demo    |
| b5a578cfdb4848dba2b91dd38d1e2b93 | admin   |
| de068df7bbad42379c0c6050fa306fbb | service |
+----------------------------------+---------+
keystone本身也需要注册
[root@linux-node0 ~]# 
openstack service create --name keystone --description "OpenStack Identity" identity
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Identity               |
| enabled     | True                             |
| id          | d632e3036b974943978631b9cabcafe0 |
| name        | keystone                         |
| type        | identity                         |
+-------------+----------------------------------+
公共的api接口
[root@linux-node0 ~]# openstack endpoint create --region RegionOne identity public http://192.168.1.17:5000/v2.0
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 1a8eb7b97ff64c56886942a38054b9bb |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | d632e3036b974943978631b9cabcafe0 |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://192.168.1.17:5000/v2.0       |
+--------------+----------------------------------+
私有的api接口
[root@linux-node0 ~]# 
openstack endpoint create --region RegionOne identity internal http://192.168.1.17:5000/v2.0
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 4caf182c26dd457ba86d9974dfb00c1b |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | d632e3036b974943978631b9cabcafe0 |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://192.168.1.17:5000/v2.0       |
+--------------+----------------------------------+
管理的api接口
[root@linux-node0 ~]# 
openstack endpoint create --region RegionOne identity admin http://192.168.1.17:35357/v2.0
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 34c8185306c340a0bb4efbfc9da21003 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | d632e3036b974943978631b9cabcafe0 |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://192.168.1.17:35357/v2.0      |
+--------------+----------------------------------+
查看api接口
[root@linux-node0 ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+-
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                         |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-
| 1a8eb7b97ff64c56886942a38054b9bb | RegionOne | keystone     | identity     | True    | public    | http://19.168.1.17:5000/v2.0  |
| 34c8185306c340a0bb4efbfc9da21003 | RegionOne | keystone     | identity     | True    | admin     | http://192.168.1.17:35357/v2.0 |
| 4caf182c26dd457ba86d9974dfb00c1b | RegionOne | keystone     | identity     | True    | internal  | http://192.168.1.17:5000/v2.0  |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-
删除  openstack endpoint delete ID号 
使用用户名密码的方式登录:必须要先取消环境变量
[root@linux-node0 ~]# unset OS_TOKEN
[root@linux-node0 ~]# unset OS_URL
[root@linux-node0 ~]# openstack --os-auth-url http://192.168.1.17:35357/v3 --os-project-domain-id default --os-user-domain-id default --os-project-name admin --os-username admin --os-auth-type password token issue
Password: 
+------------+----------------------------------+
| Field      | Value                            |
+------------+----------------------------------+
| expires    | 2016-05-27T05:25:30.193235Z      |
| id         | 4e8c0c1e0f20481d959c977db7f689b6 |
| project_id | b5a578cfdb4848dba2b91dd38d1e2b93 |
| user_id    | ad4f6c3d88a047d6802a05735a03ba8f |
+------------+----------------------------------+
密码 admin

便快捷的使用keystone,我们需要设置两个环境变量:
[root@linux-node0 ~]# cat admin-openrc.sh 
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=admin
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://192.168.1.17:35357/v3
export OS_IDENTITY_API_VERSION=3
[root@linux-node0 ~]# cat demo-openrc.sh 
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=demo
export OS_TENANT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://192.168.1.17:5000/v3
export OS_IDENTITY_API_VERSION=3
添加执行权限
[root@linux-node0 ~]# chmod +x admin-openrc.sh demo-openrc.sh 
测试获取TOKEN
[root@linux-node0 ~]# source admin-openrc.sh 
[root@linux-node0 ~]# openstack token issue
+------------+----------------------------------+
| Field      | Value                            |
+------------+----------------------------------+
| expires    | 2016-05-27T05:30:03.600977Z      |
| id         | 409443b07f5948f2a437443090927621 |
| project_id | b5a578cfdb4848dba2b91dd38d1e2b93 |
| user_id    | ad4f6c3d88a047d6802a05735a03ba8f |
+------------+----------------------------------+

修改配置文件添加数据库连接glance-api.conf与glance-registry.conf
[root@linux-node0 ~]# vim /etc/glance/glance-api.conf 
[root@linux-node0 ~]# vim /etc/glance/glance-registry.conf 
[root@linux-node0 ~]# grep -n '^connection' /etc/glance/glance-api.conf
538:connection=mysql://glance:glance@19.168.1.17/glance
[root@linux-node0 ~]# grep -n '^connection' /etc/glance/glance-registry.conf 
363:connection=mysql://glance:glance@192.168.1.17/glance
同步数据库
[root@linux-node0 ~]# su -s /bin/sh -c "glance-manage db_sync" glance
No handlers could be found for logger "oslo_config.cfg"
查看数据库同步是否成功
[root@linux-node0 ~]#  mysql -uglance -pglance -h 192.168.1.17
MariaDB [(none)]> use glance;
Database changed
MariaDB [glance]> show tables

创建glance用户
[root@linux-node0 ~]# source admin-openrc.sh 
[root@linux-node0 ~]# openstack user create --domain default --password=glance glance
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | default                          |
| enabled   | True                             |
| id        | 9009c0e0431646d193744d445411a0ab |
| name      | glance                           |
+-----------+----------------------------------+
将此用户加入到项目里面并给它赋予admin的权限
[root@linux-node0 ~]# openstack role add --project service --user glance admin

[root@linux-node0 ~]# vim  /etc/glance/glance-api.conf 
[root@linux-node0 ~]# grep -n ^[a-z]  /etc/glance/glance-api.conf 
363:verbose=True
491:notification_driver = noop
538:connection=mysql://glance:glance@192.168.1.17/glance
642:default_store=file
701:filesystem_store_datadir=/var/lib/glance/p_w_picpaths/
974:auth_uri = http://192.168.1.17:5000
975:auth_url = http://192.168.1.17:35357
976:auth_plugin = password
977:project_domain_id = default
978:user_domain_id = default
979:project_name = service
980:username = glance
981:password = glance
1484:flavor= keystone

[root@linux-node0 ~]# grep -n '^[a-z]' /etc/glance/glance-registry.conf 
363:connection=mysql://glance:glance@192.168.1.17/glance
767:auth_uri = http://192.168.1.17:5000
768:auth_url = http://192.168.1.17:35357
769:auth_plugin = password
770:project_domain_id = default
771:user_domain_id = default
772:project_name = service
773:username = glance
774:password = glance
1256:flavor=keystone
启动glance服务并设置开机启动
[root@linux-node0 ~]# systemctl enable openstack-glance-api
[root@linux-node0 ~]# systemctl enable openstack-glance-registry
[root@linux-node0 ~]# systemctl start openstack-glance-api
[root@linux-node0 ~]# systemctl start openstack-glance-registry
监听端口: registry:9191     api:9292
[root@linux-node0 ~]# netstat -antup
  

[root@linux-node0 ~]# source admin-openrc.sh 
[root@linux-node0 ~]# 
openstack service create --name glance --description "OpenStack Image service" p_w_picpath
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Image service          |
| enabled     | True                             |
| id          | 5ab719816a7f4294a7f843950fcd2e59 |
| name        | glance                           |
| type        | p_w_picpath                            |
+-------------+----------------------------------+
openstack endpoint create --region RegionOne   p_w_picpath public http://192.168.1.17:9292
openstack endpoint create --region RegionOne   p_w_picpath internal http://192.168.1.17:9292
openstack endpoint create --region RegionOne   p_w_picpath admin http://192.168.1.17:9292
[root@linux-node0 ~]# 
openstack endpoint create --region RegionOne   p_w_picpath public http://192.168.1.17:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | a181DDD3ee8b4d72be1a0fda87b542ef |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 5ab719816a7f4294a7f843950fcd2e59 |
| service_name | glance                           |
| service_type | p_w_picpath                            |
| url          | http://192.168.1.17:9292            |
+--------------+----------------------------------+
[root@linux-node0 ~]# openstack endpoint create --region RegionOne   p_w_picpath internal http://10.0.0.80:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 4df72061901c40efa3905e95674fc5bc |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 5ab719816a7f4294a7f843950fcd2e59 |
| service_name | glance                           |
| service_type | p_w_picpath                            |
| url          | http://192.168.1.17:9292            |
+--------------+----------------------------------+
[root@linux-node0 ~]# openstack endpoint create --region RegionOne   p_w_picpath admin http://192.168.1.17:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | f755b7c22ab04ea3857840086b7c7754 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 5ab719816a7f4294a7f843950fcd2e59 |
| service_name | glance                           |
| service_type | p_w_picpath                            |
| url          | http://192.168.1.17:9292            |
+--------------+----------------------------------+

环境变量添加export OS_IMAGE_API_VERSION=2
[root@linux-node0 ~]# cat admin-openrc.sh 
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=admin
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://192.168.1.17:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@linux-node0 ~]# cat demo-openrc.sh 
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=demo
export OS_TENANT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://192.168.1.17:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@linux-node0 ~]# glance p_w_picpath-list

上传镜像
[root@linux-node0 ~]# glance p_w_picpath-create --name "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare  --visibility public --progress
[=============================>] 100%
+------------------+--------------------------------------+
| Property         | Value                                |
+------------------+--------------------------------------+
| checksum         | ee1eca47dc88f4879d8a229cc70a07c6     |
| container_format | bare                                 |
| created_at       | 2016-05-27T05:09:36Z                 |
| disk_format      | qcow2                                |
| id               | 07245ea1-5f76-453d-a320-f1b08433a10a |
| min_disk         | 0                                    |
| min_ram          | 0                                    |
| name             | cirros                               |
| owner            | b5a578cfdb4848dba2b91dd38d1e2b93     |
| protected        | False                                |
| size             | 13287936                             |
| status           | active                               |
| tags             | []                                   |
| updated_at       | 2016-05-27T05:09:36Z                 |
| virtual_size     | None                                 |
| visibility       | public                               |
+------------------+--------------------------------------+
查看镜像
[root@linux-node0 ~]# glance p_w_picpath-list
+--------------------------------------+--------+
| ID                                   | Name   |
+--------------------------------------+--------+
| 07245ea1-5f76-453d-a320-f1b08433a10a | cirros |
+--------------------------------------+--------+


配置nova.conf文件
1)、配置nova连接及数据表的创建
[root@linux-node0 ~]# grep -n ^[a-z] /etc/nova/nova.conf 
1740:connection=mysql://nova:nova@192.168.1.17/nova
同步数据库
[root@linux-node0 ~]# su -s /bin/sh -c "nova-manage db sync" nova
检查数据库
[root@linux-node0 ~]# mysql -unova -pnova -h 192.168.1.17
MariaDB [(none)]> use nova
Database changed
MariaDB [nova]> show tables;

2)、Keystone配置
[root@linux-node0 ~]# vim /etc/nova/nova.conf 
[root@linux-node0 ~]# grep -n ^[a-z] /etc/nova/nova.conf 
1420:rpc_backend=rabbit
1740:connection=mysql://nova:nova@192.168.1.17/nova
2922:rabbit_host=192.168.1.17
2926:rabbit_port=5672
2938:rabbit_userid=openstack
2942:rabbit_password=openstack
[root@linux-node0 ~]# source admin-openrc.sh 
[root@linux-node0 ~]# openstack user create --domain default --password=nova nova
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | default                          |
| enabled   | True                             |
| id        | 6b4986f51d7749fd8dc9668d92e21e01 |
| name      | nova                             |
+-----------+----------------------------------+
[root@linux-node0 ~]# openstack role add --project service --user nova admin
[root@linux-node0 nova]# grep -n ^[a-z] nova.conf 
61:rpc_backend=rabbit
124:my_ip=192.168.1.17
268:enabled_apis=osapi_compute,metadata
425:auth_strategy=keystone
1053:network_api_class=nova.network.neutronv2.api.API
1171:linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
1331:security_group_api=neutron
1760:firewall_driver = nova.virt.firewall.NoopFirewallDriver
1828:vncserver_listen=$my_ip
1832:vncserver_proxyclient_address=$my_ip
2213:connection=mysql://nova:nova@192.168.1.17/nova
2334:host=$my_ip
2542:auth_uri = http://192.168.1.17:5000
2543:auth_url = http://192.168.1.17:35357
2544:auth_plugin = password
2545:project_domain_id = default
2546:user_domain_id = default
2547:project_name = service
2548:username = nova
2549:password = nova
3033:url = http://192.168.1.17:9696
3034:auth_url = http://192.168.1.17:35357
3035:auth_plugin = password
3036:project_domain_id = default
3037:user_domain_id = default
3038:region_name = RegionOne
3039:project_name = service
3040:username = neutron
3041:password = neutron
3049:service_metadata_proxy=true
3053:metadata_proxy_shared_secret=neutron
3804:lock_path=/var/lib/nova/tmp
3967:rabbit_host=192.168.1.17
3971:rabbit_port=5672
3983:rabbit_userid=openstack
3987:rabbit_password=openstack
设置开机自启动
systemctl enable openstack-nova-api.service 
openstack-nova-cert.service openstack-nova-consoleauth.service 
openstack-nova-scheduler.service openstack-nova-conductor.service 
openstack-nova-novncproxy.service
启动全部服务
[root@linux-node1 ~]# systemctl start openstack-nova-api.service 
openstack-nova-cert.service openstack-nova-consoleauth.service 
openstack-nova-scheduler.service openstack-nova-conductor.service 
openstack-nova-novncproxy.service
注册服务
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://192.168.1.17:8774/v2/%\(tenant_id\)s
openstack endpoint create --region RegionOne compute internal http://192.168.1.17:8774/v2/%\(tenant_id\)s
openstack endpoint create --region RegionOne compute admin http://192.168.1.17:8774/v2/%\(tenant_id\)s
[root@linux-node0 ~]# source admin-openrc.sh 
[root@linux-node0 ~]# openstack service create --name nova --description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Compute                |
| enabled     | True                             |
| id          | 47c979dc1312436fb912b8e8b842f293 |
| name        | nova                             |
| type        | compute                          |
+-------------+----------------------------------+
[root@linux-node0 ~]# openstack endpoint create --region RegionOne compute public http://192.168.1.17:8774/v2/%\(tenant_id\)s
+--------------+----------------------------------------+
| Field        | Value                                  |
+--------------+----------------------------------------+
| enabled      | True                                   |
| id           | b42b8696b4e84d0581228f8fef746ce2       |
| interface    | public                                 |
| region       | RegionOne                              |
| region_id    | RegionOne                              |
| service_id   | 47c979dc1312436fb912b8e8b842f293       |
| service_name | nova                                   |
| service_type | compute                                |
| url          | http://192.168.1.17:8774/v2/%(tenant_id)s |
+--------------+----------------------------------------+
[root@linux-node0 ~]# openstack endpoint create --region RegionOne compute internal http://192.168.1.17:8774/v2/%\(tenant_id\)s
+--------------+----------------------------------------+
| Field        | Value                                  |
+--------------+----------------------------------------+
| enabled      | True                                   |
| id           | b54df18a4c23471399858df476a98d5f       |
| interface    | internal                               |
| region       | RegionOne                              |
| region_id    | RegionOne                              |
| service_id   | 47c979dc1312436fb912b8e8b842f293       |
| service_name | nova                                   |
| service_type | compute                                |
| url          | http://192.168.1.17:8774/v2/%(tenant_id)s |
+--------------+----------------------------------------+
[root@linux-node0 ~]# openstack endpoint create --region RegionOne compute admin http://192.168.1.17:8774/v2/%\(tenant_id\)s
+--------------+----------------------------------------+
| Field        | Value                                  |
+--------------+----------------------------------------+
| enabled      | True                                   |
| id           | 71daf94628384f1e8315060f86542696       |
| interface    | admin                                  |
| region       | RegionOne                              |
| region_id    | RegionOne                              |
| service_id   | 47c979dc1312436fb912b8e8b842f293       |
| service_name | nova                                   |
| service_type | compute                                |
| url          | http://192.168.1.17:8774/v2/%(tenant_id)s |
+--------------+----------------------------------------+
验证是否成功:
[root@linux-node0 ~]# openstack host list
+-------------------------+-------------+----------+
| Host Name               | Service     | Zone     |
+-------------------------+-------------+----------+
| control-node0.xiegh.com | conductor   | internal |
| control-node0.xiegh.com | consoleauth | internal |
| control-node0.xiegh.com | scheduler   | internal |
| control-node0.xiegh.com | cert        | internal |
+-------------------------+-------------+----------+
如果出现此四个服务则代表nova创建成功了

nova-compute一般运行在计算节点上,通过message queue接收并管理VM的生命周期
nova-compute通过libvirt管理KVM,通过XenAPI管理Xen
[root@linux-node1 ~]# grep -n '^[a-z]' /etc/nova/nova.conf 
61:rpc_backend=rabbit
124:my_ip=10.0.0.81
268:enabled_apis=osapi_compute,metadata
425:auth_strategy=keystone
1053:network_api_class=nova.network.neutronv2.api.API
1171:linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
1331:security_group_api=neutron
1760:firewall_driver = nova.virt.firewall.NoopFirewallDriver
1820:novncproxy_base_url=http://192.168.1.17:6080/vnc_auto.html
1828:vncserver_listen=0.0.0.0
1832:vncserver_proxyclient_address=10.0.0.81
1835:vnc_enabled=true
1838:vnc_keymap=en-us
2213:connection=mysql://nova:nova@192.168.1.17/nova
2334:host=192.168.1.17
2542:auth_uri = http://192.168.1.17:5000
2543:auth_url = http://192.168.1.17:35357
2544:auth_plugin = password
2545:project_domain_id = default
2546:user_domain_id = default
2547:project_name = service
2548:username = nova
2549:password = nova
2727:virt_type=kvm
3033:url = http://192.168.1.17:9696
3034:auth_url = http://192.168.1.17:35357
3035:auth_plugin = password
3036:project_domain_id = default
3037:user_domain_id = default
3038:region_name = RegionOne
3039:project_name = service
3040:username = neutron
3041:password = neutron
3804:lock_path=/var/lib/nova/tmp
3967:rabbit_host=192.168.1.17
3971:rabbit_port=5672
3983:rabbit_userid=openstack
3987:rabbit_password=openstack

[root@linux-node1 ~]#  systemctl enable libvirtd openstack-nova-compute
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service to /usr/lib/systemd/system/openstack-nova-compute.service
[root@linux-node1 ~]# systemctl start libvirtd openstack-nova-compute
在控制节点上面查看注册状态
[root@linux-node0 ~]# openstack host list
+-------------------------+-------------+----------+
| Host Name               | Service     | Zone     |
+-------------------------+-------------+----------+
| control-node0.xiegh.com | conductor   | internal |
| control-node0.xiegh.com | consoleauth | internal |
| control-node0.xiegh.com | scheduler   | internal |
| control-node0.xiegh.com | cert        | internal |
| linux-node1.xiegh.com | compute     | nova     |
+-------------------------+-------------+----------+
计算节点上nova安装成功并注册成功
镜像出于活动的状态
[root@linux-node0 ~]#  nova p_w_picpath-list
+--------------------------------------+--------+--------+--------+
| ID                                   | Name   | Status | Server |
+--------------------------------------+--------+--------+--------+
| 07245ea1-5f76-453d-a320-f1b08433a10a | cirros | ACTIVE |        |
+--------------------------------------+--------+--------+--------+
验证nova与keystone的连接,如下说明成功
[root@linux-node0 ~]# nova endpoints
WARNING: keystone has no endpoint in ! Available endpoints for this service:
+-----------+----------------------------------+
| keystone  | Value                            |
+-----------+----------------------------------+
| id        | 1a8eb7b97ff64c56886942a38054b9bb |
| interface | public                           |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:5000/v2.0       |
+-----------+----------------------------------+
+-----------+----------------------------------+
| keystone  | Value                            |
+-----------+----------------------------------+
| id        | 34c8185306c340a0bb4efbfc9da21003 |
| interface | admin                            |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:35357/v2.0      |
+-----------+----------------------------------+
+-----------+----------------------------------+
| keystone  | Value                            |
+-----------+----------------------------------+
| id        | 4caf182c26dd457ba86d9974dfb00c1b |
| interface | internal                         |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:5000/v2.0       |
+-----------+----------------------------------+
WARNING: glance has no endpoint in ! Available endpoints for this service:
+-----------+----------------------------------+
| glance    | Value                            |
+-----------+----------------------------------+
| id        | 4df72061901c40efa3905e95674fc5bc |
| interface | internal                         |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:9292            |
+-----------+----------------------------------+
+-----------+----------------------------------+
| glance    | Value                            |
+-----------+----------------------------------+
| id        | a181ddd3ee8b4d72be1a0fda87b542ef |
| interface | public                           |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:9292            |
+-----------+----------------------------------+
+-----------+----------------------------------+
| glance    | Value                            |
+-----------+----------------------------------+
| id        | f755b7c22ab04ea3857840086b7c7754 |
| interface | admin                            |
| region    | RegionOne                        |
| region_id | RegionOne                        |
| url       | http://192.168.1.17:9292            |
+-----------+----------------------------------+
WARNING: nova has no endpoint in ! Available endpoints for this service:
+-----------+-----------------------------------------------------------+
| nova      | Value                                                     |
+-----------+-----------------------------------------------------------+
| id        | 71daf94628384f1e8315060f86542696                          |
| interface | admin                                                     |
| region    | RegionOne                                                 |
| region_id | RegionOne                                                 |
| url       | http://192.168.1.17:8774/v2/b5a578cfdb4848dba2b91dd38d1e2b93 |
+-----------+-----------------------------------------------------------+
+-----------+-----------------------------------------------------------+
| nova      | Value                                                     |
+-----------+-----------------------------------------------------------+
| id        | b42b8696b4e84d0581228f8fef746ce2                          |
| interface | public                                                    |
| region    | RegionOne                                                 |
| region_id | RegionOne                                                 |
| url       | http://192.168.1.17:8774/v2/b5a578cfdb4848dba2b91dd38d1e2b93 |
+-----------+-----------------------------------------------------------+
+-----------+-----------------------------------------------------------+
| nova      | Value                                                     |
+-----------+-----------------------------------------------------------+
| id        | b54df18a4c23471399858df476a98d5f                          |
| interface | internal                                                  |
| region    | RegionOne                                                 |
| region_id | RegionOne                                                 |
| url       | http://192.168.1.17:8774/v2/b5a578cfdb4848dba2b91dd38d1e2b93 |
+-----------+-----------------------------------------------------------+
Neutron部署
注册网络服务:
source admin-openrc.sh 
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://192.168.1.17:9696
openstack endpoint create --region RegionOne network internal http://192.168.1.17:9696
openstack endpoint create --region RegionOne network admin http://192.168.1.17:9696
[root@linux-node0 ~]# openstack service create --name neutron --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Networking             |
| enabled     | True                             |
| id          | eb5f03d85c774f48940654811a22b581 |
| name        | neutron                          |
| type        | network                          |
+-------------+----------------------------------+
[root@linux-node0 ~]# openstack endpoint create --region RegionOne network public http://192.168.1.17:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | f782d738018a4dc5b80931f67f31d974 |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | eb5f03d85c774f48940654811a22b581 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://192.168.1.17:9696            |
+--------------+----------------------------------+
[root@linux-node0 ~]# openstack endpoint create --region RegionOne network internal http://192.168.1.17:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 21565236fb1b4bc8b0c37c040369d7d4 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | eb5f03d85c774f48940654811a22b581 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://192.168.1.17:9696            |
+--------------+----------------------------------+
[root@linux-node0 ~]# openstack endpoint create --region RegionOne network admin http://192.168.1.17:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | f2c83846242d4443a7cd3f205cf3bb56 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | eb5f03d85c774f48940654811a22b581 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://192.168.1.17:9696            |
+--------------+----------------------------------+
[root@linux-node0 ~]#grep -n '^[a-z]' /etc/neutron/neutron.conf 
20:state_path = /var/lib/neutron
60:core_plugin = ml2
77:service_plugins = router
92:auth_strategy = keystone
360:notify_nova_on_port_status_changes = True
364:notify_nova_on_port_data_changes = True
367:nova_url = http://192.168.1.17:8774/v2
573:rpc_backend=rabbit
717:auth_uri = http://192.168.1.17:5000
718:auth_url = http://192.168.1.17:35357
719:auth_plugin = password
720:project_domain_id = default
721:user_domain_id = default
722:project_name = service
723:username = neutron
724:password = neutron
737:connection = mysql://neutron:neutron@192.168.1.17:3306/neutron
780:auth_url = http://192.168.1.17:35357
781:auth_plugin = password
782:project_domain_id = default
783:user_domain_id = default
784:region_name = RegionOne
785:project_name = service
786:username = nova
787:password = nova
818:lock_path = $state_path/lock
998:rabbit_host = 192.168.1.17
1002:rabbit_port = 5672
1014:rabbit_userid = openstack
1018:rabbit_password = openstack
[root@linux-node0 ~]# grep -n '^[a-z]' /etc/neutron/plugins/ml2/ml2_conf.ini
5:type_drivers = flat,vlan,gre,vxlan,geneve
12:tenant_network_types = vlan,gre,vxlan,geneve
18:mechanism_drivers = openvswitch,linuxbridge
27:extension_drivers = port_security
67:flat_networks = physnet1
120:enable_ipset = True
[root@linux-node0 ~]# grep -n '^[a-z]' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
9:physical_interface_mappings = physnet1:eth0
16:enable_vxlan = false
51:prevent_arp_spoofing = True
57:firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
61:enable_security_group = True
[root@linux-node0 ~]# grep -n '^[a-z]' /etc/neutron/dhcp_agent.ini
27:interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
31:dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
52:enable_isolated_metadata = true
[root@linux-node0 ~]# grep -n '^[a-z]' /etc/neutron/metadata_agent.ini
4:auth_uri = http://192.168.1.17:5000
5:auth_url = http://192.168.1.17:35357
6:auth_region = RegionOne
7:auth_plugin = password
8:project_domain_id = default
9:user_domain_id = default
10:project_name = service
11:username = neutron
12:password = neutron
29:nova_metadata_ip = 192.168.1.17
52:metadata_proxy_shared_secret = neutron
[root@linux-node0 ~]# grep -n '^[a-z]' /etc/nova/nova.conf 
61:rpc_backend=rabbit
124:my_ip=192.168.1.17
268:enabled_apis=osapi_compute,metadata
425:auth_strategy=keystone
1053:network_api_class=nova.network.neutronv2.api.API
1171:linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
1331:security_group_api=neutron
1760:firewall_driver = nova.virt.firewall.NoopFirewallDriver
1828:vncserver_listen=$my_ip
1832:vncserver_proxyclient_address=$my_ip
2213:connection=mysql://nova:nova@192.168.1.17/nova
2334:host=$my_ip
2542:auth_uri = http://192.168.1.17:5000
2543:auth_url = http://192.168.1.17:35357
2544:auth_plugin = password
2545:project_domain_id = default
2546:user_domain_id = default
2547:project_name = service
2548:username = nova
2549:password = nova
3033:url = http://192.168.1.17:9696
3034:auth_url = http://192.168.1.17:35357
3035:auth_plugin = password
3036:project_domain_id = default
3037:user_domain_id = default
3038:region_name = RegionOne
3039:project_name = service
3040:username = neutron
3041:password = neutron
3049:service_metadata_proxy=true
3053:metadata_proxy_shared_secret=neutron
3804:lock_path=/var/lib/nova/tmp
3967:rabbit_host=192.168.1.17
3971:rabbit_port=5672
3983:rabbit_userid=openstack
3987:rabbit_password=openstack
[root@linux-node0 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@linux-node0 ~]# openstack user create --domain default --password=neutron neutron
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | default                          |
| enabled   | True                             |
| id        | 85c411a092354b29b58c7505a8905824 |
| name      | neutron                          |
+-----------+----------------------------------+
[root@linux-node0 ~]# openstack role add --project service --user neutron admin

更新数据库
[root@linux-node0 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

重新驱动下服务:
[root@linux-node0 ~]# systemctl restart openstack-nova-api
开机自动加载neutron及启动neutron服务
systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
systemctl restart neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
执行结果:
[root@linux-node0 ~]# systemctl restart openstack-nova-api
[root@linux-node0 ~]# systemctl enable neutron-server.service \
> neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
> neutron-metadata-agent.service
ln -s '/usr/lib/systemd/system/neutron-server.service' '/etc/systemd/system/multi-user.target.wants/neutron-server.service'
ln -s '/usr/lib/systemd/system/neutron-linuxbridge-agent.service' '/etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service'
ln -s '/usr/lib/systemd/system/neutron-dhcp-agent.service' '/etc/systemd/system/multi-user.target.wants/neutron-dhcp-agent.service'
ln -s '/usr/lib/systemd/system/neutron-metadata-agent.service' '/etc/systemd/system/multi-user.target.wants/neutron-metadata-agent.service'
[root@linux-node0 ~]# systemctl restart neutron-server.service \
> neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
> neutron-metadata-agent.service
查看网卡的配置
[root@linux-node0 ~]# source admin-openrc.sh 
[root@linux-node0 ~]# neutron agent-list 
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host                    | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| 4de08ae7-5699-47ea-986b-7c855d7eb7bd | Linux bridge agent | control-node0.xiegh.com | :-)   | True           | neutron-linuxbridge-agent |
| adf5abfc-2a74-4baa-b4cd-da7f7f05a378 | Metadata agent     | control-node0.xiegh.com | :-)   | True           | neutron-metadata-agent    |
| c1562203-c8ff-4189-a59b-bcf480ca70c1 | DHCP agent         | control-node0.xiegh.com | :-)   | True           | neutron-dhcp-agent        |
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
将控制节点的配置文件neutron.conf 拷贝到计算节点的目录/etc/neutron/
[root@linux-node0 ~]# scp -r /etc/neutron/neutron.conf 10.0.0.81:/etc/neutron/
[root@linux-node0 ~]# scp -r /etc/neutron/plugins/ml2/linuxbridge_agent.ini 10.0.0.81:/etc/neutron/plugins/ml2/
[root@linux-node0 ~]# scp -r /etc/neutron/plugins/ml2/ml2_conf.ini 10.0.0.81:/etc/neutron/plugins/ml2/
在已经拷贝了,这里就不拷贝了nova.conf 
[root@linux-node1 ~]# grep -n '^[a-z]'  /etc/neutron/neutron.conf
20:state_path = /var/lib/neutron
60:core_plugin = ml2
77:service_plugins = router
92:auth_strategy = keystone
360:notify_nova_on_port_status_changes = True
364:notify_nova_on_port_data_changes = True
367:nova_url = http://192.168.1.17:8774/v2
573:rpc_backend=rabbit
717:auth_uri = http://192.168.1.17:5000
718:auth_url = http://192.168.1.17:35357
719:auth_plugin = password
720:project_domain_id = default
721:user_domain_id = default
722:project_name = service
723:username = neutron
724:password = neutron
737:connection = mysql://neutron:neutron@192.168.1.17:3306/neutron
780:auth_url = http://192.168.1.17:35357
781:auth_plugin = password
782:project_domain_id = default
783:user_domain_id = default
784:region_name = RegionOne
785:project_name = service
786:username = nova
787:password = nova
818:lock_path = $state_path/lock
998:rabbit_host = 192.168.1.17
1002:rabbit_port = 5672
1014:rabbit_userid = openstack
1018:rabbit_password = openstack

[root@linux-node1 ~]# grep -n '^[a-z]'  /etc/neutron/plugins/ml2/linuxbridge_agent.ini
9:physical_interface_mappings = physnet1:eth0
16:enable_vxlan = false
51:prevent_arp_spoofing = True
57:firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
61:enable_security_group = True
[root@linux-node1 ~]# grep -n '^[a-z]'  /etc/neutron/plugins/ml2/ml2_conf.ini
5:type_drivers = flat,vlan,gre,vxlan,geneve
12:tenant_network_types = vlan,gre,vxlan,geneve
18:mechanism_drivers = openvswitch,linuxbridge
27:extension_drivers = port_security
67:flat_networks = physnet1
120:enable_ipset = True

[root@linux-node1 ~]# grep -n '^[a-z]'  /etc/nova/nova.conf 
61:rpc_backend=rabbit
124:my_ip=10.0.0.81
268:enabled_apis=osapi_compute,metadata
425:auth_strategy=keystone
1053:network_api_class=nova.network.neutronv2.api.API
1171:linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
1331:security_group_api=neutron
1760:firewall_driver = nova.virt.firewall.NoopFirewallDriver
1820:novncproxy_base_url=http://192.168.1.17:6080/vnc_auto.html
1828:vncserver_listen=0.0.0.0
1832:vncserver_proxyclient_address=10.0.0.81
1835:vnc_enabled=true
1838:vnc_keymap=en-us
2213:connection=mysql://nova:nova@192.168.1.17/nova
2334:host=192.168.1.17
2542:auth_uri = http://192.168.1.17:5000
2543:auth_url = http://192.168.1.17:35357
2544:auth_plugin = password
2545:project_domain_id = default
2546:user_domain_id = default
2547:project_name = service
2548:username = nova
2549:password = nova
2727:virt_type=kvm
3033:url = http://192.168.1.17:9696
3034:auth_url = http://192.168.1.17:35357
3035:auth_plugin = password
3036:project_domain_id = default
3037:user_domain_id = default
3038:region_name = RegionOne
3039:project_name = service
3040:username = neutron
3041:password = neutron
3804:lock_path=/var/lib/nova/tmp
3967:rabbit_host=192.168.1.17
3971:rabbit_port=5672
3983:rabbit_userid=openstack
3987:rabbit_password=openstack

[root@linux-node1 ~]# systemctl restart openstack-nova-compute
[root@linux-node1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@linux-node1 ~]# systemctl enable neutron-linuxbridge-agent.service
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
[root@linux-node1 ~]# systemctl restart neutron-linuxbridge-agent.service
故障:
在控制不能发现计算节点neutron-linuxbridge-agent
重启计算计算节点恢复正常
[root@linux-node0 ~]#  neutron agent-list
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host                    | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| 4de08ae7-5699-47ea-986b-7c855d7eb7bd | Linux bridge agent | control-node0.xiegh.com | :-)   | True           | neutron-linuxbridge-agent |
| adf5abfc-2a74-4baa-b4cd-da7f7f05a378 | Metadata agent     | control-node0.xiegh.com | :-)   | True           | neutron-metadata-agent    |
| c1562203-c8ff-4189-a59b-bcf480ca70c1 | DHCP agent         | control-node0.xiegh.com | :-)   | True           | neutron-dhcp-agent        |
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
在控制节点查看:
[root@linux-node0 ~]# neutron agent-list
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host                    | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+-------------------------+-------+----------------+---------------------------+
| 4de08ae7-5699-47ea-986b-7c855d7eb7bd | Linux bridge agent | control-node0.xiegh.com | :-)   | True           | neutron-linuxbridge-agent |
| a7b2c76e-2c9e-42a3-89ac-725716a0c370 | Linux bridge agent | linux-node1.xiegh.com | :-)   | True           | neutron-linuxbridge-agent |
| adf5abfc-2a74-4baa-b4cd-da7f7f05a378 | Metadata agent     | control-node0.xiegh.com | :-)   | True           | neutron-metadata-agent    |
| c1562203-c8ff-4189-a59b-bcf480ca70c1 | DHCP agent         | control-node0.xiegh.com | :-)   | True           | neutron-dhcp-agent        |
+--------------------------------------+--------------------+-------------------------+-------+------
代表计算节点的Linux bridge agent已成功连接到控制节点。
创建一个网络:
neutron net-create flat --shared --provider:physical_network physnet1 --provider:network_type flat
[root@linux-node0 ~]# neutron net-create flat --shared --provider:physical_network physnet1 --provider:network_type flat
Created a new network:
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | True                                 |
| id                        | 516b5a4d-7fa5-43ae-8328-965c5e0e21d7 |
| mtu                       | 0                                    |
| name                      | flat                                 |
| port_security_enabled     | True                                 |
| provider:network_type     | flat                                 |
| provider:physical_network | physnet1                             |
| provider:segmentation_id  |                                      |
| router:external           | False                                |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tenant_id                 | b5a578cfdb4848dba2b91dd38d1e2b93     |
+---------------------------+--------------------------------------+
创建一个子网
neutron subnet-create flat 10.0.0.0/24 --name flat-subnet --allocation-pool start=10.0.0.100,end=10.0.0.200 --dns-nameserver 10.0.0.2 --gateway 10.0.0.2
[root@linux-node0 ~]# neutron subnet-create flat 10.0.0.0/24 --name flat-subnet --allocation-pool start=10.0.0.100,end=10.0.0.200 --dns-nameserver 10.0.0.2 --gateway 10.0.0.2
Created a new subnet:
+-------------------+----------------------------------------------+
| Field             | Value                                        |
+-------------------+----------------------------------------------+
| allocation_pools  | {"start": "10.0.0.100", "end": "10.0.0.200"} |
| cidr              | 10.0.0.0/24                                  |
| dns_nameservers   | 10.0.0.2                                     |
| enable_dhcp       | True                                         |
| gateway_ip        | 10.0.0.2                                     |
| host_routes       |                                              |
| id                | 64ba9f36-3e3e-4988-a863-876759ad43c3         |
| ip_version        | 4                                            |
| ipv6_address_mode |                                              |
| ipv6_ra_mode      |                                              |
| name              | flat-subnet                                  |
| network_id        | 516b5a4d-7fa5-43ae-8328-965c5e0e21d7         |
| subnetpool_id     |                                              |
| tenant_id         | b5a578cfdb4848dba2b91dd38d1e2b93             |
+-------------------+----------------------------------------------+
查看网络和子网
[root@linux-node0 ~]# neutron subnet-list 
+--------------------------------------+-------------+-------------+---------------------------------
| id                                   | name        | cidr        | allocation_pools                             |
+--------------------------------------+-------------+-------------+---------------------------------
| 64ba9f36-3e3e-4988-a863-876759ad43c3 | flat-subnet | 10.0.0.0/24 | {"start": "10.0.0.100", "end": "10.0.0.200"} |
+--------------------------------------+-------------+-------------+---------------------------------
[root@linux-node0 ~]#  source demo-openrc.sh 
[root@linux-node0 ~]# ssh-keygen -q -N ""
Enter file in which to save the key (/root/.ssh/id_rsa): 
[root@linux-node0 ~]# ls .ssh/
id_rsa  id_rsa.pub  known_hosts
[root@linux-node0 ~]# nova keypair-add --pub-key .ssh/id_rsa.pub mykey
[root@linux-node0 ~]# nova keypair-list
+-------+-------------------------------------------------+
| Name  | Fingerprint                                     |
+-------+-------------------------------------------------+
| mykey | ce:ad:3c:51:2a:db:dc:4c:d1:a5:22:e6:20:53:cf:65 |
+-------+-------------------------------------------------+
[root@linux-node0 ~]# nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
+-------------+-----------+---------+-----------+--------------+
| IP Protocol | From Port | To Port | IP Range  | Source Group |
+-------------+-----------+---------+-----------+--------------+
| icmp        | -1        | -1      | 0.0.0.0/0 |              |
+-------------+-----------+---------+-----------+--------------+
[root@linux-node0 ~]# nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
+-------------+-----------+---------+-----------+--------------+
| IP Protocol | From Port | To Port | IP Range  | Source Group |
+-------------+-----------+---------+-----------+--------------+
| tcp         | 22        | 22      | 0.0.0.0/0 |              |
+-------------+-----------+---------+-----------+--------------+
[root@linux-node0 ~]# nova flavor-list
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| ID | Name      | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| 1  | m1.tiny   | 512       | 1    | 0         |      | 1     | 1.0         | True      |
| 2  | m1.small  | 2048      | 20   | 0         |      | 1     | 1.0         | True      |
| 3  | m1.medium | 4096      | 40   | 0         |      | 2     | 1.0         | True      |
| 4  | m1.large  | 8192      | 80   | 0         |      | 4     | 1.0         | True      |
| 5  | m1.xlarge | 16384     | 160  | 0         |      | 8     | 1.0         | True      |
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
[root@linux-node0 ~]# nova p_w_picpath-list
+--------------------------------------+--------+--------+--------+
| ID                                   | Name   | Status | Server |
+--------------------------------------+--------+--------+--------+
| 07245ea1-5f76-453d-a320-f1b08433a10a | cirros | ACTIVE |        |
+--------------------------------------+--------+--------+--------+
[root@linux-node0 ~]# neutron net-list
+--------------------------------------+------+--------------------------------------------------+
| id                                   | name | subnets                                          |
+--------------------------------------+------+--------------------------------------------------+
| 516b5a4d-7fa5-43ae-8328-965c5e0e21d7 | flat | 64ba9f36-3e3e-4988-a863-876759ad43c3 10.0.0.0/24 |
+--------------------------------------+------+--------------------------------------------------+
[root@linux-node0 ~]# nova secgroup-list
+--------------------------------------+---------+------------------------+
| Id                                   | Name    | Description            |
+--------------------------------------+---------+------------------------+
| ba83d14c-2516-427b-8e88-89a49270b8d7 | default | Default security group |
+--------------------------------------+---------+------------------------+
nova boot --flavor m1.tiny --p_w_picpath cirros --nic net-id=516b5a4d-7fa5-43ae-8328-965c5e0e21d7 --security-group default --key-name mykey hehe-instance
您可能感兴趣的文档:

--结束END--

本文标题: 私有云 openstack部署

本文链接: https://lsjlt.com/news/43622.html(转载时请注明来源链接)

有问题或投稿请发送至: 邮箱/279061341@qq.com    QQ/279061341

猜你喜欢
  • 私有云 openstack部署
                     &...
    99+
    2024-04-02
  • 钉钉私有云部署
    一、钉钉私有云部署 在企业数字化转型的过程中,私有云已经成为了不可或缺的一部分。在钉钉的帮助下,企业可以通过私有云的部署,更加专注于自身业务的发展,并为客户提供更加个性化的服务。具体来说,私有云的部署可以分为以下几个步骤: 1.选择适合的...
    99+
    2023-10-28
  • RHEL7部署私有云网盘
    OwnCloud 一款文件主机服务软件,就是我们平时使用的云存储,不过这是在自己主机的服务器上建立属于自己的私有云,OwnCloud 使用AGPLv3协议发布。本项目是基于PHP和SQLite,MySQL,Oracle或PostgreSQL...
    99+
    2023-06-05
  • 私有云服务器部署
    私有云服务器部署通常涉及将数据中心的资源和服务部署到云端。在这种情况下,部署私有云服务器的过程相对简单,可以在几个步骤下完成。 以下是几个步骤: 选择云服务提供商:选择一家可以提供云计算服务的提供商,例如AWS、Amazon Web S...
    99+
    2023-10-26
    服务器
  • 钉钉 私有部署
    私有部署意味着钉钉只能在自己的服务器上进行开发和部署,从而保证了企业数据的安全性和隐私性。这种私有部署模式使得钉钉的开发者和使用者之间的关系更加紧密,可以更好地保证企业数据的安全和稳定。 另外,钉钉的私有部署模式也可以提高钉钉的安全性。由...
    99+
    2023-10-28
  • 阿里私有云代理部署
    确定您的业务需求:您需要考虑您的应用程序需要什么样的云环境,如云服务器、存储、网络、安全等。 选择适合您需求的私有云代理:阿里云提供多种不同的私有云代理,可以满足您的需求。 配置和安装:您需要根据您的需求配置和安装私有云代理,并设置其访问...
    99+
    2023-10-27
    阿里
  • 云服务器私有化部署
    云服务器私有化部署通常需要采取私有云或者公有云的方式进行部署。私有云是指在云服务器的基础架构中不包括存储和数据服务器,只提供基础资源,客户可以通过租用或购买的方式将其部署到云服务器中。 私有云可以通过使用公有云服务提供商提供的私有云管理平...
    99+
    2023-10-27
    服务器
  • openstack 一键部署
    OpenStack是一个开源的云计算平台,用于构建和管理公有云和私有云。在部署OpenStack时,可以使用一键部署工具来简化部署过...
    99+
    2023-10-11
    openstack
  • CentOS 7部署OpenStack(
    1、创建数据库[root@controller ~]# mysql -u root -p -e "CREATE DATABASEglance;"Enter password: [root@controller ~]# mysql -u ro...
    99+
    2023-01-31
    CentOS OpenStack
  • 钉钉 私有化部署
    阿里巴巴是一家以电商、云计算和数字媒体为主要业务的公司,旗下拥有多个子公司,包括淘宝、天猫、阿里云、菜鸟等。这些子公司的发展历程可以追溯到2008年,当时淘宝成立并推出了“网购一站式服务”的概念,旨在为用户提供全方位的网购服务。 2015...
    99+
    2023-10-28
  • 钉钉私有化部署
    私有化钉钉并入阿里云事业群是阿里巴巴集团的一次重大举措。私有化钉钉是阿里巴巴集团为优化资源配置,加强对旗下子公司的管理而做出的决策。私有化钉钉可以更好地实现集团内部的资源共享,同时也能更好地管理旗下子公司,提高企业运营效率。 在私有化钉钉...
    99+
    2023-10-28
  • 私有云服务器部署流程
    私有云服务器部署是指在云端将您的数据、应用程序和服务部署到互联网上的服务。以下是一个基本的云服务器部署流程的描述: 选择云计算平台 在选择云计算平台时,您可以考虑几个因素,例如可用性、可靠性、安全性和性能。可以通过访问云计算平台官方网...
    99+
    2023-10-26
    流程 服务器
  • 私有云服务器部署失败
    私有云服务器部署失败是一种常见的故障,可能是由于以下原因之一: 配置错误:配置错误导致服务器无法正常启动,可能会影响服务的运行。 网络连接问题:如果云服务器位于公共互联网上,网络连接可能会出现问题,导致服务无法正常运行。 安全漏洞:某些...
    99+
    2023-10-26
    服务器
  • openstack M版本部署
    系统解决方案 一、环境需求1、网卡em1em2em3em4controller1172.16.16.1172.16.17.1nonenonecontroller1172.16.16.2172.16...
    99+
    2024-04-02
  • packstack一键部署openstack
    Packstack是一个用于快速部署OpenStack的一键部署工具。它使用了puppet模块来自动化安装和配置OpenStack的...
    99+
    2023-10-11
    openstack
  • 云主机上怎么部署OpenStack
    在云主机上部署OpenStack可以按照以下步骤进行:1. 创建云主机:首先需要创建一个云主机实例,确保该云主机满足OpenStac...
    99+
    2023-09-17
    openstack 云主机
  • 如何部署私有化KooTeam
    这篇文章主要介绍了如何部署私有化KooTeam的相关知识,内容详细易懂,操作简单快捷,具有一定借鉴价值,相信大家阅读完这篇如何部署私有化KooTeam文章都会有所收获,下面我们一起来看看吧。KooTeam是一个社会化的团队协作项目管理在线系...
    99+
    2023-06-27
  • kooteam私有化如何部署
    这篇文章主要介绍“kooteam私有化如何部署”的相关知识,小编通过实际案例向大家展示操作过程,操作方法简单快捷,实用性强,希望这篇“kooteam私有化如何部署”文章能帮助大家解决问题。kooteam是一款轻量级的在线团队协作工具,提供各...
    99+
    2023-06-27
  • 钉钉可以私有部署
    首先,私有部署可以为企业带来更高的安全性和自主性。私有部署可以将企业的应用程序和数据存储在一个独立的、受保护的服务器上,以保护企业的核心数据和信息。这样可以大大降低企业遭受网络攻击和数据泄露的风险,同时也提高了企业的自主性和独立性。 其次...
    99+
    2023-10-28
  • 云服务器私有化部署方案
    一、云服务器私有化部署的优势 资源共享 私有化部署可以将一台云服务器私有化,从而使得其他用户可以使用这台服务器的全部资源,包括计算、存储、网络等,从而实现资源的共享。此外,云服务器还可以根据用户需求进行弹性扩展,从而满足不同用户的需求...
    99+
    2023-10-27
    服务器 方案
软考高级职称资格查询
编程网,编程工程师的家园,是目前国内优秀的开源技术社区之一,形成了由开源软件库、代码分享、资讯、协作翻译、讨论区和博客等几大频道内容,为IT开发者提供了一个发现、使用、并交流开源技术的平台。
  • 官方手机版

  • 微信公众号

  • 商务合作