RDO Triple0 QuickStart && First impressions

May 27, 2016

I believe the post bellow will bring some more light on TripleO QuickStart
procedure suggested on RDO QuickStart page ( size of memory 32 GB is a must. During minimal configuration runtime 23 GB of RAM are required ). Following tips from Deploying OpenStack on just one hosted server

Overcloud deployed .

************************************************************************
First of all taking  look at routing tables  on undercloud VM
************************************************************************

[root@undercloud ~]# ifconfig

br-ctlplane: flags=4163  mtu 1500
inet 192.0.2.1  netmask 255.255.255.0  broadcast 192.0.2.255

inet6 fe80::285:8cff:feee:4c12  prefixlen 64  scopeid 0x20
ether 00:85:8c:ee:4c:12  txqueuelen 0  (Ethernet)
RX packets 5458173  bytes 430801023 (410.8 MiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 8562456  bytes 31493865046 (29.3 GiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth0: flags=4163  mtu 1500
inet 192.168.23.28  netmask 255.255.255.0  broadcast 192.168.23.255
inet6 fe80::285:8cff:feee:4c10  prefixlen 64  scopeid 0x20
ether 00:85:8c:ee:4c:10  txqueuelen 1000  (Ethernet)
RX packets 4550861  bytes 7090076105 (6.6 GiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 1597556  bytes 760511620 (725.2 MiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1: flags=4163  mtu 1500
inet6 fe80::285:8cff:feee:4c12  prefixlen 64  scopeid 0x20
ether 00:85:8c:ee:4c:12  txqueuelen 1000  (Ethernet)
RX packets 5459780  bytes 430920997 (410.9 MiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 8564443  bytes 31494029129 (29.3 GiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73  mtu 65536
inet 127.0.0.1  netmask 255.0.0.0
inet6 ::1  prefixlen 128  scopeid 0x10
loop  txqueuelen 0  (Local Loopback)
RX packets 4361647  bytes 24858373851 (23.1 GiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 4361647  bytes 24858373851 (23.1 GiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

virbr0: flags=4099  mtu 1500
inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255
ether 52:54:00:39:0a:ae  txqueuelen 0  (Ethernet)
RX packets 0  bytes 0 (0.0 B)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 0  bytes 0 (0.0 B)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

vlan10: flags=4163  mtu 1500
inet 10.0.0.1  netmask 255.255.255.0  broadcast 10.0.0.255
inet6 fe80::804e:69ff:fe19:844b  prefixlen 64  scopeid 0x20
ether 82:4e:69:19:84:4b  txqueuelen 0  (Ethernet)
RX packets 0  bytes 0 (0.0 B)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 12  bytes 816 (816.0 B)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@undercloud ~]# ip route
default via 192.168.23.1 dev eth0
10.0.0.0/24 dev vlan10  proto kernel  scope link  src 10.0.0.1
192.0.2.0/24 dev br-ctlplane  proto kernel  scope link  src 192.0.2.1
192.168.23.0/24 dev eth0  proto kernel  scope link  src 192.168.23.28
192.168.122.0/24 dev virbr0  proto kernel  scope link  src 192.168.122.1

[root@undercloud ~]# ovs-vsctl show
83b044ee-44ac-4575-88b3-4951a6e9847f
Bridge br-int
fail_mode: secure
Port “tapb3ad6627-29”
tag: 1
Interface “tapb3ad6627-29”
type: internal
Port int-br-ctlplane
Interface int-br-ctlplane
type: patch
options: {peer=phy-br-ctlplane}
Port br-int
Interface br-int
type: internal
Bridge br-ctlplane
Port “vlan10”
tag: 10
Interface “vlan10”
type: internal
Port phy-br-ctlplane
Interface phy-br-ctlplane
type: patch
options: {peer=int-br-ctlplane}
Port “eth1”
Interface “eth1”
Port br-ctlplane
Interface br-ctlplane
type: internal
ovs_version: “2.5.0”

*********************************************************
Here are admin credentials for overcloud controller
*********************************************************

[stack@undercloud ~]$ cat overcloudrc
export OS_NO_CACHE=True
export OS_CLOUDNAME=overcloud
export OS_AUTH_URL=http://192.0.2.10:5000/v2.0
export NOVA_VERSION=1.1
export COMPUTE_API_VERSION=1.1
export OS_USERNAME=admin
export no_proxy=,192.0.2.10,192.0.2.10
export OS_PASSWORD=pWyQpHsaXAWskcmYEq2ja4WaU
export PYTHONWARNINGS=”ignore:Certificate has no, ignore:A true SSLContext object is not available”
export OS_TENANT_NAME=admin

*******************************
At the same time on VIRTHOST
*******************************

[root@ServerCentOS72 ~]# virsh net-list

Name                 State      Autostart     Persistent
———————————————————-
default              active     yes           yes
external             active     yes           yes
overcloud            active     yes           yes

[root@ServerCentOS72 ~]#  virsh net-dunpxml external

<network>
<name>external</name>
<uuid>d585615b-c1c5-4e30-bf2d-ea247591c2b0</uuid>
<forward mode=’nat’>
<nat>
<port start=’1024′ end=’65535’/>
</nat>
</forward>
<bridge name=’brext’ stp=’off’ delay=’0’/>
<mac address=’52:54:00:9d:b4:1d’/>
<ip address=’192.168.23.1′ netmask=’255.255.255.0′>
<dhcp>
<range start=’192.168.23.10′ end=’192.168.23.50’/>
</dhcp>
</ip>
</network>

[root@ServerCentOS72 ~]# su – stack

Last login: Thu May 26 18:01:31 MSK 2016 on :0

[stack@ServerCentOS72 ~]$ virsh list
Id    Name                           State

—————————————————-
2     undercloud                     running
11    compute_0                      running
12    control_0                      running

*************************************************************************
Source stackrc and run openstack-status on undercloud
Overcloud deployment is already done on undercloud VM
*************************************************************************

[root@undercloud ~]# . stackrc
[root@undercloud ~]# openstack-status

== Nova services ==

openstack-nova-api:                     active
openstack-nova-compute:                 active
openstack-nova-network:                 inactive  (disabled on boot)
openstack-nova-scheduler:               active
openstack-nova-cert:                    active
openstack-nova-conductor:               active
openstack-nova-console:                 inactive  (disabled on boot)
openstack-nova-consoleauth:             inactive  (disabled on boot)
openstack-nova-xvpvncproxy:             inactive  (disabled on boot)

== Glance services ==

openstack-glance-api:                   active
openstack-glance-registry:              active
== Keystone service ==

openstack-keystone:                     inactive  (disabled on boot)

== Horizon service ==
openstack-dashboard:                    404
== neutron services ==

neutron-server:                         active
neutron-dhcp-agent:                     active
neutron-l3-agent:                       inactive  (disabled on boot)
neutron-metadata-agent:                 inactive  (disabled on boot)
neutron-lbaas-agent:                    inactive  (disabled on boot)
neutron-openvswitch-agent:              active
neutron-metering-agent:                 inactive  (disabled on boot)

== Swift services ==

openstack-swift-proxy:                  active
openstack-swift-account:                active
openstack-swift-container:              active
openstack-swift-object:                 active

== Cinder services ==

openstack-cinder-api:                   inactive  (disabled on boot)
openstack-cinder-scheduler:             inactive  (disabled on boot)
openstack-cinder-volume:                inactive  (disabled on boot)
openstack-cinder-backup:                inactive  (disabled on boot)

== Ceilometer services ==

openstack-ceilometer-api:               active
openstack-ceilometer-central:           active
openstack-ceilometer-compute:           inactive  (disabled on boot)
openstack-ceilometer-collector:         active
openstack-ceilometer-notification:      active

== Heat services ==
openstack-heat-api:                     active
openstack-heat-api-cfn:                 active
openstack-heat-api-cloudwatch:          inactive  (disabled on boot)
openstack-heat-engine:                  active

== Sahara services ==

openstack-sahara-all:                   inactive  (disabled on boot)

== Ironic services ==

openstack-ironic-api:                   active
openstack-ironic-conductor:             active

== Support services ==

mysqld:                                 inactive  (disabled on boot)
libvirtd:                               active
openvswitch:                            active
dbus:                                   active
rabbitmq-server:                        active
memcached:                              active

====Keystone users ======

+———————————-+——————+———+———————————–+

|                id                |       name       | enabled |               email               |

+———————————-+——————+———+———————————–+
| c1668084d057422ab21c9180424b3e4a |      admin       |   True  |           root@localhost          |
| db938fe459c94cd09fe227a118f8be0f |       aodh       |   True  |           aodh@localhost          |
| 001a56a0872048a592db95dc9885292d |    ceilometer    |   True  |        ceilometer@localhost       |
| e038f5b685b84e6aa601b37312d84a56 |      glance      |   True  |          glance@localhost         |
| d7ddbfd73b814c13926c1ecd5ebe1bb2 |       heat       |   True  |           heat@localhost          |
| dc784308498d40568b649fbf12eaeb51 |      ironic      |   True  |          ironic@localhost         |
| 0c1f829c533240cdbec944236048ee1a | ironic-inspector |   True  | baremetal-introspection@localhost |
| ddbcb1dd885845c698f8d65f6f9ff44f |     neutron      |   True  |         neutron@localhost         |
| 987bd356963e4a5cbf2bd50c50919f9b |       nova       |   True  |           nova@localhost          |
| a5c862796ef24615afc2881e1a59f9d5 |      swift       |   True  |          swift@localhost          |
+———————————-+——————+———+———————————–+

== Glance images ==

+————————————–+————————+————-+——————+————+——–+

| ID                                   | Name                   | Disk Format | Container Format | Size       | Status |

+————————————–+————————+————-+——————+————+——–+
| c734ff64-7723-43ee-a5d2-d662e1e206eb | bm-deploy-kernel       | aki         | aki              | 5157360    | active |
| f80e32c4-cfce-4dcc-993a-939800440fbf | bm-deploy-ramdisk      | ari         | ari              | 380554146  | active |
| 8616adc8-7136-4536-8562-5ed9cf129ed2 | overcloud-full         | qcow2       | bare             | 1175351296 | active |
| 73f5bfc7-99c2-46dc-8507-e5978ec61b84 | overcloud-full-initrd  | ari         | ari              | 36444678   | active |
| 0d30aa5d-869c-4716-bdd4-87685e4790ca | overcloud-full-vmlinuz | aki         | aki              | 5157360    | active |
+————————————–+————————+————-+——————+————+——–+

== Nova managed services ==

+—-+—————-+————+———-+———+——-+—————————-+—————–+

| Id | Binary         | Host       | Zone     | Status  | State | Updated_at                 | Disabled Reason |

+—-+—————-+————+———-+———+——-+—————————-+—————–+
| 1  | nova-cert      | undercloud | internal | enabled | up    | 2016-05-26T18:41:57.000000 | –               |
| 7  | nova-scheduler | undercloud | internal | enabled | up    | 2016-05-26T18:41:55.000000 | –               |
| 8  | nova-conductor | undercloud | internal | enabled | up    | 2016-05-26T18:41:56.000000 | –               |
| 10 | nova-compute   | undercloud | nova     | enabled | up    | 2016-05-26T18:41:54.000000 | –               |
+—-+—————-+————+———-+———+——-+—————————-+—————–+

== Nova networks ==
+————————————–+———-+——+
| ID                                   | Label    | Cidr |
+————————————–+———-+——+
| c27b8d62-f838-4c7e-8828-64ae1503f4c4 | ctlplane | –    |
+————————————–+———-+——+

== Nova instance flavors ==

+————————————–+—————+———–+——+———–+——+——-+————-+———–+

| ID                                   | Name          | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |

+————————————–+—————+———–+——+———–+——+——-+————-+———–+
| 1320d766-7051-4639-9554-a42e7c7fd958 | control       | 4096      | 40   | 0         |      | 1     | 1.0         | True      |
| 1b0ad845-6273-437f-8573-e4922a256ec7 | block-storage | 4096      | 40   | 0         |      | 1     | 1.0         | True      |
| 27a0e9ee-c909-4d7d-8e86-1eb2e61fb1cb | oooq_control  | 8192      | 49   | 0         |      | 1     | 1.0         | True      |
| 40057aa6-5e8b-4d4b-85d4-f21418d01b5d | baremetal     | 4096      | 40   | 0         |      | 1     | 1.0         | True      |
| 5750def3-dc08-43dd-b194-02d4ea73b8d7 | compute       | 4096      | 40   | 0         |      | 1     | 1.0         | True      |
| 769969da-f429-4f5f-84c9-6456f39539f8 | ceph-storage  | 4096      | 40   | 0         |      | 1     | 1.0         | True      |
| 9c1622bc-ee0f-4dfa-a988-1e89cad47015 | oooq_compute  | 8192      | 49   | 0         |      | 1     | 1.0         | True      |
| a2e5a055-3334-4080-86f9-4887931aee22 | swift-storage | 4096      | 40   | 0         |      | 1     | 1.0         | True      |
| b05b3c15-7928-4f59-9f8d-7d3947e19bee | oooq_ceph     | 8192      | 49   | 0         |      | 1     | 1.0         | True      |
+————————————–+—————+———–+——+———–+——+——-+————-+———–+

== Nova instances ==

+————————————–+————————-+———————————-+——–+————+————-+———————+
| ID                                   | Name                    | Tenant ID                        | Status | Task State | Power State | Networks            |
+————————————–+————————-+———————————-+——–+————+————-+———————+
| 88f841ac-1ca0-4339-ba8a-c2895c0dc57c | overcloud-controller-0  | ccf0e5fdbebb4335ad7875ec821af91d | ACTIVE | –          | Running     | ctlplane=192.0.2.13 |
| f12a1086-7e23-4acb-80a7-8b2efe1e4ef2 | overcloud-novacompute-0 | ccf0e5fdbebb4335ad7875ec821af91d | ACTIVE | –          | Running     | ctlplane=192.0.2.12 |
+————————————–+————————-+———————————-+——–+————+————-+———————+

******************************************************
Neutron reports on undercloud VM
******************************************************

[root@undercloud ~]# neutron net-list

+————————————–+———-+——————————————+
| id                                   | name     | subnets                                  |
+————————————–+———-+——————————————+
| c27b8d62-f838-4c7e-8828-64ae1503f4c4 | ctlplane | 631022c3-cfc5-4353-b038-1592cceea57e     |
|                                      |          | 192.0.2.0/24                             |
+————————————–+———-+——————————————+

[root@undercloud ~]# neutron net-show ctlplane

+—————————+————————————–+
| Field                     | Value                                |
+—————————+————————————–+
| admin_state_up            | True                                 |
| availability_zone_hints   |                                      |
| availability_zones        | nova                                 |
| created_at                | 2016-05-26T11:32:18                  |
| description               |                                      |
| id                        | c27b8d62-f838-4c7e-8828-64ae1503f4c4 |
| ipv4_address_scope        |                                      |
| ipv6_address_scope        |                                      |
| mtu                       | 1500                                 |
| name                      | ctlplane                             |
| provider:network_type     | flat                                 |
| provider:physical_network | ctlplane                             |
| provider:segmentation_id  |                                      |
| router:external           | False                                |
| shared                    | False                                |
| status                    | ACTIVE                               |
| subnets                   | 631022c3-cfc5-4353-b038-1592cceea57e |
| tags                      |                                      |
| tenant_id                 | ccf0e5fdbebb4335ad7875ec821af91d     |
| updated_at                | 2016-05-26T11:32:18                  |
+—————————+————————————–+

[root@undercloud ~]# neutron subnet-list

+————————————+——+————–+————————————+
| id                                 | name | cidr         | allocation_pools                   |
+————————————+——+————–+————————————+
| 631022c3-cfc5-4353-b038-1592cceea5 |      | 192.0.2.0/24 | {“start”: “192.0.2.5”, “end”:      |
| 7e                                 |      |              | “192.0.2.30”}                      |
+————————————+——+————–+————————————+

[root@undercloud ~]# neutron subnet-show 631022c3-cfc5-4353-b038-1592cceea57e

+——————-+—————————————————————+
| Field             | Value                                                         |
+——————-+—————————————————————+
| allocation_pools  | {“start”: “192.0.2.5”, “end”: “192.0.2.30”}                   |
| cidr              | 192.0.2.0/24                                                  |
| created_at        | 2016-05-26T11:32:18                                           |
| description       |                                                               |
| dns_nameservers   |                                                               |
| enable_dhcp       | True                                                          |
| gateway_ip        | 192.0.2.1                                                     |
| host_routes       | {“destination”: “169.254.169.254/32”, “nexthop”: “192.0.2.1”} |
| id                | 631022c3-cfc5-4353-b038-1592cceea57e                          |
| ip_version        | 4                                                             |
| ipv6_address_mode |                                                               |
| ipv6_ra_mode      |                                                               |
| name              |                                                               |
| network_id        | c27b8d62-f838-4c7e-8828-64ae1503f4c4                          |
| subnetpool_id     |                                                               |
| tenant_id         | ccf0e5fdbebb4335ad7875ec821af91d                              |
| updated_at        | 2016-05-26T11:32:18                                           |
+——————-+—————————————————————+

**********************************************
When overcloud deployment is done
**********************************************

[stack@undercloud ~]$ heat stack-list

+————————————–+————+—————–+———————+————–+

| id                                   | stack_name | stack_status    | creation_time       | updated_time |

+————————————–+————+—————–+———————+————–+
| 7002392b-cd2d-439f-b3cd-024979f153a5 | overcloud  | CREATE_COMPLETE | 2016-05-26T13:35:17 | None         |
+————————————–+————+—————–+———————+————–+

[stack@undercloud ~]$ nova list

+————————————–+————————-+——–+————+————-+———————+
| ID                                   | Name                    | Status | Task State | Power State | Networks            |
+————————————–+————————-+——–+————+————-+———————+
| 88f841ac-1ca0-4339-ba8a-c2895c0dc57c | overcloud-controller-0  | ACTIVE | –          | Running     | ctlplane=192.0.2.13 |
| f12a1086-7e23-4acb-80a7-8b2efe1e4ef2 | overcloud-novacompute-0 | ACTIVE | –          | Running     | ctlplane=192.0.2.12 |
+————————————–+————————-+——–+————+————-+———————+

*******************************************
Log into overcloud controller
*******************************************

[stack@undercloud ~]$ ssh heat-admin@192.0.2.13
Last login: Thu May 26 16:52:28 2016 from gateway
[heat-admin@overcloud-controller-0 ~]$ sudo su –
Last login: Thu May 26 15:42:23 UTC 2016 on pts/0

[root@overcloud-controller-0 ~]# ls
keystonerc_admin  oskey01.pem
[root@overcloud-controller-0 ~]# . keystonerc_admin

[root@overcloud-controller-0 ~]# ifconfig

br-ex: flags=4163  mtu 1500
inet 192.0.2.13  netmask 255.255.255.0  broadcast 192.0.2.255
inet6 fe80::2f7:7fff:fe1a:ca59  prefixlen 64  scopeid 0x20
ether 00:f7:7f:1a:ca:59  txqueuelen 0  (Ethernet)
RX packets 689651  bytes 1362839189 (1.2 GiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 2346450  bytes 3243444405 (3.0 GiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth0: flags=4163  mtu 1500
inet6 fe80::2f7:7fff:fe1a:ca59  prefixlen 64  scopeid 0x20
ether 00:f7:7f:1a:ca:59  txqueuelen 1000  (Ethernet)
RX packets 2783352  bytes 4201989574 (3.9 GiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 2876264  bytes 3280863833 (3.0 GiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73  mtu 65536
inet 127.0.0.1  netmask 255.0.0.0
inet6 ::1  prefixlen 128  scopeid 0x10
loop  txqueuelen 0  (Local Loopback)
RX packets 2962545  bytes 8418607495 (7.8 GiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 2962545  bytes 8418607495 (7.8 GiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@overcloud-controller-0 ~]# ovs-vsctl show
cc8be4fb-f96f-4679-b85d-d0afc7dd7f72
Bridge br-int
fail_mode: secure
Port “tapb86d48f2-45”
tag: 2
Interface “tapb86d48f2-45”
type: internal
Port “tapa4fa2a9d-a4”
tag: 3
Interface “tapa4fa2a9d-a4”
type: internal
Port “qr-eb92ffa9-da”
tag: 2
Interface “qr-eb92ffa9-da”
type: internal
Port “qr-e8146f9f-51”
tag: 3
Interface “qr-e8146f9f-51”
type: internal
Port br-int
Interface br-int
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port int-br-ex
Interface int-br-ex
type: patch
options: {peer=phy-br-ex}
Bridge br-tun
fail_mode: secure
Port “vxlan-c000020c”
Interface “vxlan-c000020c”
type: vxlan
options: {df_default=”true”, in_key=flow, local_ip=”192.0.2.13″, out_key=flow, remote_ip=”192.0.2.12″}
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-ex
Port phy-br-ex
Interface phy-br-ex
type: patch
options: {peer=int-br-ex}
Port “qg-df23145d-8f”
Interface “qg-df23145d-8f”
type: internal
Port “qg-53315134-1d”
Interface “qg-53315134-1d”
type: internal
Port br-ex
Interface br-ex
type: internal
Port “eth0”
Interface “eth0”
ovs_version: “2.5.0”

***************************************************
Routing table on overcloud controller
***************************************************

[root@overcloud-controller-0 ~]# ip route
default via 192.0.2.1 dev br-ex  proto static
169.254.169.254 via 192.0.2.1 dev br-ex  proto static
192.0.2.0/24 dev br-ex  proto kernel  scope link  src 192.0.2.13

Network topology

[root@overcloud-controller-0 ~]# neutron net-list

+————————————–+————–+—————————————-+
| id                                   | name         | subnets                                |
+————————————–+————–+—————————————-+
| 1dad601c-c865-41d8-94cb-efc634c1fc83 | public       | 12787d8b-1b72-402d-9b93-2821f0a18b7b   |
|                                      |              | 192.0.2.0/24                           |
| 0086836e-2dc3-4d40-a2e2-21f222b159f4 | demo_network | dcc40bfc-9293-47bb-8788-d4b5f090d076   |
|                                      |              | 90.0.0.0/24                            |
| 59168b6e-adca-4ec6-982a-f94a0eb770c8 | private      | ede9bbc2-5099-4d9f-91af-2fd4387d52be   |
|                                      |              | 50.0.0.0/24                            |
+————————————–+————–+—————————————-+

[root@overcloud-controller-0 ~]# nova service-list

+—-+——————+————————————-+———-+———+——-+—————————-+—————–+
| Id | Binary           | Host                                | Zone     | Status  | State | Updated_at                 | Disabled Reason |
+—-+——————+————————————-+———-+———+——-+—————————-+—————–+
| 1  | nova-cert        | overcloud-controller-0              | internal | enabled | up    | 2016-05-26T17:09:20.000000 | –               |
| 2  | nova-consoleauth | overcloud-controller-0              | internal | enabled | up    | 2016-05-26T17:09:20.000000 | –               |
| 5  | nova-scheduler   | overcloud-controller-0              | internal | enabled | up    | 2016-05-26T17:09:22.000000 | –               |
| 6  | nova-conductor   | overcloud-controller-0              | internal | enabled | up    | 2016-05-26T17:09:24.000000 | –               |
| 7  | nova-compute     | overcloud-novacompute-0.localdomain | nova     | enabled | up    | 2016-05-26T17:09:19.000000 | –               |
+—-+——————+————————————-+———-+———+——-+—————————-+—————–+

Running VMs

*************************************************************************
Verfication outbound connectivity. Connecting via floating IPs belong
192.0.2.0/24 to VMs running in overcloud from undercloud VM
*************************************************************************

********************************************************
`ip netns` on overcloud controller
********************************************************

It won’t work on 16 GB even minimal configuration.
Server memory allocation for minimal virtual environment


Creating Servers via REST API on RDO Mitaka via Chrome Advanced REST Client

April 21, 2016

In posting bellow we are going to demonstrate Chrome Advanced REST Client successfully issuing REST API POST requests for creating RDO Mitaka Servers (VMs) as well as getting information about servers via GET requests. All required HTTP Headers are configured in GUI environment as well as body request field for servers creation.

Version of keystone API installed v2.0

Following [ 1 ] to authenticate access to OpenStack Services, you are supposed first of all to issue an authentication request to get authentication token. If the request succeeds, the server returns an authentication token.

Source keystonerc_demo on Controller or on Compute node. It doesn’t
matter. Then run this cURL command to request a token:

curl -s -X POST http://192.169.142.54:5000/v2.0/tokens \
-H “Content-Type: application/json” \
-d ‘{“auth”: {“tenantName”: “‘”$OS_TENANT_NAME”‘”, “passwordCredentials”: {“username”: “‘”$OS_USERNAME”‘”, “password”: “‘”$OS_PASSWORD”‘”}}}’ \
| python -m json.tool

to get authentication token and scroll down to the bottom :-

“token”: {
“audit_ids”: [
“ce1JojlRSiO6TmMTDW3QNQ”
],
“expires”: “2016-04-21T18:26:28Z”,
“id”: “0cfb3ec7a10c4f549a3dc138cf8a270a”, &lt;== X-Auth-Token
“issued_at”: “2016-04-21T17:26:28.246724Z”,
“tenant”: {
“description”: “default tenant”,
“enabled”: true,
“id”: “1578b57cfd8d43278098c5266f64e49f”, &lt;=== Demo tenant’s id
“name”: “demo”
}
},
“user”: {
“id”: “8e1e992eee474c3ab7a08ffde678e35b”,
“name”: “demo”,
“roles”: [
{
“name”: “heat_stack_owner”
},
{
“name”: “_member_”
}
],
“roles_links”: [],
“username”: “demo”
}
}
}

********************************************************************************************
Original request to obtain token might be issued via Chrome Advanced REST Client as well
********************************************************************************************

Scrolling down shows up token been returned and demo’s tenant id

Required output

{

access“: 

{

token“: 

{
issued_at“: 2016-04-21T21:56:52.668252Z
expires“: 2016-04-21T22:56:52Z
id“: dd119ea14e97416b834ca72aab7f8b5a

tenant“: 

{
description“: default tenant
enabled“: true
id“: 1578b57cfd8d43278098c5266f64e49f
name“: demo
}

*****************************************************************************
Next create ssh-keypair via CLI or dashboard for particular tenant :-
*****************************************************************************
nova keypair-add oskeymitaka0417 &gt; oskeymitaka0417.pem
chmod 600 *.pem

******************************************************************************************
Following bellow is a couple of samples REST API POST requests starting servers as they usually are issued and described.
******************************************************************************************

curl -g -i -X POST http://192.169.142.54:8774/v2/1578b57cfd8d43278098c5266f64e49f/servers -H “User-Agent: python-novaclient” -H “Content-Type: application/json” -H “Accept: application/json” -H “X-Auth-Token: 0cfb3ec7a10c4f549a3dc138cf8a270a” -d ‘{“server”: {“name”: “CirrOSDevs03”, “key_name” : “oskeymitaka0417”, “imageRef”: “2e148cd0-7dac-49a7-8a79-2efddbd83852”, “flavorRef”: “1”, “max_count”: 1, “min_count”: 1, “networks”: [{“uuid”: “e7c90970-c304-4f51-9d65-4be42318487c”}], “security_groups”: [{“name”: “default”}]}}’

curl -g -i -X POST http://192.169.142. 54:8774/v2/1578b57cfd8d43278098c5266f64e49f/servers -H “User-Agent: python-novaclient” -H “Content-Type: application/json” -H “Accept: application/json” -H “X-Auth-Token: 0cfb3ec7a10c4f549a3dc138cf8a270a” -d ‘{“server”: {“name”: “VF23Devs03”, “key_name” : “oskeymitaka0417”, “imageRef”: “5b00b1a8-30d1-4e9d-bf7d-5f1abed5173b”, “flavorRef”: “2”, “max_count”: 1, “min_count”: 1, “networks”: [{“uuid”: “e7c90970-c304-4f51-9d65-4be42318487c”}], “security_groups”: [{“name”: “default”}]}}’

**********************************************************************************
We are going to initiate REST API POST requests creating servers been
issued  via Chrome Advanced REST Client
**********************************************************************************

[root@ip-192-169-142-54 ~(keystone_demo)]# glance image-list

+————————————–+———————–+
| ID                                   | Name                  |
+————————————–+———————–+
| 28b590fa-05c8-4706-893a-54efc4ca8cd6 | cirros                |
| 9c78c3da-b25b-4b26-9d24-514185e99c00 | Ubuntu1510Cloud-image |
| a050a122-a1dc-40d0-883f-25617e452d90 | VF23Cloud-image       |
+————————————–+———————–+

[root@ip-192-169-142-54 ~(keystone_demo)]# neutron net-list
+————————————–+————–+—————————————-+
| id                                   | name         | subnets                                |
+————————————–+————–+—————————————-+
| 43daa7c3-4e04-4661-8e78-6634b06d63f3 | public       | 71e0197b-fe9a-4643-b25f-65424d169492   |
|                                      |              | 192.169.142.0/24                       |
| 292a2f21-70af-48ef-b100-c0639a8ffb22 | demo_network | d7aa6f0f-33ba-430d-a409-bd673bed7060   |
|                                      |              | 50.0.0.0/24                            |
+————————————–+————–+—————————————-+

First required Headers were created in corresponding fields and
following fragment was placed in Raw Payload area of Chrome Client

{“server”:
{“name”: “VF23Devs03”,
“key_name” : “oskeymitaka0420”,
“imageRef” : “a050a122-a1dc-40d0-883f-25617e452d90“,
“flavorRef”: “2”,
“max_count”: 1,
“min_count”: 1,
“networks”: [{“uuid”: “292a2f21-70af-48ef-b100-c0639a8ffb22“}],
“security_groups”: [{“name”: “default”}]
}
}

Launching Fedora 23 Server :-

Next Ubuntu 15.10 Server (VM) will be created via changing  image-id in  Advanced RESTful Client GUI environment

Make sure that servers have been created and are currently up and running

***************************************************************************************
Now launch Chrome REST Client again for servers verification via GET request
***************************************************************************************


Neutron work flow for Docker Hypervisor running on DVR Cluster RDO Mitaka in appropriate amount of details && HA support for Glance storage using to load nova-docker instances

April 6, 2016

Why DVR come into concern ?

Refreshing in memory similar problem with Nova-Docker Driver (Kilo) with which I had same kind of problems (VXLAN connection Controller <==> Compute) on F22 (OVS 2.4.0) when the same driver worked fine on CentOS 7.1 (OVS 2.3.1). I just guess that Nova-Docker driver has a problem with OVS 2.4.0 no matter of stable/kilo, stable/liberty, stable/mitaka branches been checked out for driver build.

I have to notice that issue is related specifically with ML2&OVS&VXLAN setup, RDO Mitaka deployment ML2&OVS&VLAN  works with Nova-Docker (stable/mitaka) with no problems.

I have not run ovs-ofctl dump-flows at br-tun bridges ant etc, because even having proved malfunctinality I cannot file it to BZ. Nova-Docker Driver is not packaged for RDO so it’s upstream stuff. Upstream won’t consider issue which involves build driver from source on RDO Mitaka (RC1).

Thus as quick and efficient workaround I suggest DVR deployment setup. It will result South-North traffic to be forwarded right away from host running Docker Hypervisor to Internet and vice/versa due to basic “fg” functionality ( outgoing interface of fip-namespace,residing on Compute node having L3 agent running in “dvr” agent_mode ).

**************************
Procedure in details
**************************

First install repositories for RDO Mitaka (the most recent build passed CI):-
# yum -y install yum-plugin-priorities
# cd /etc/yum.repos.d
# curl -O https://trunk.rdoproject.org/centos7-mitaka/delorean-deps.repo
# curl -O https://trunk.rdoproject.org/centos7-mitaka/current-passed-ci/delorean.repo
# yum -y install openstack-packstack (Controller only)

Now proceed as follows :-

1. Here is   Answer file to deploy pre DVR Cluster
2. See pre-deployment actions to be undertaken on Controller/Storage Node  

Before DVR set up change swift to glance back end  ( swift is configured in answer-file as follows )

CONFIG_SWIFT_STORAGES=/dev/vdb1,/dev/vdc1,/dev/vdd1
CONFIG_SWIFT_STORAGE_ZONES=3
CONFIG_SWIFT_STORAGE_REPLICAS=3
CONFIG_SWIFT_STORAGE_FSTYPE=xfs
CONFIG_SWIFT_HASH=a55607bff10c4210
CONFIG_SWIFT_STORAGE_SIZE=10G

Up on set up completion on storage node :-

[root@ip-192-169-142-127 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root   45G  5.3G   40G  12% /
devtmpfs                 2.8G     0  2.8G   0% /dev
tmpfs                    2.8G  204K  2.8G   1% /dev/shm
tmpfs                    2.8G   25M  2.8G   1% /run
tmpfs                    2.8G     0  2.8G   0% /sys/fs/cgroup
/dev/vdc1                 10G  2.5G  7.5G  25% /srv/node/vdc1
/dev/vdb1                 10G  2.5G  7.5G  25% /srv/node/vdb1
/dev/vdd1                 10G  2.5G  7.5G  25% /srv/node/vdd1

/dev/vda1                497M  211M  286M  43% /boot
tmpfs                    567M  4.0K  567M   1% /run/user/42
tmpfs                    567M  8.0K  567M   1% /run/user/1000

****************************
Update  glance-api.conf
****************************

[glance_store]
stores = swift
default_store = swift
swift_store_auth_address = http://192.169.142.127:5000/v2.0/
swift_store_user = services:glance
swift_store_key = f6a9398960534797 

swift_store_create_container_on_put = True
os_region_name=RegionOne

# openstack-service restart glance

# keystone user-role-add –tenant_id=$UUID_SERVICES_TENANT \
–user=$UUID_GLANCE_USER –role=$UUID_ResellerAdmin_ROLE

Value f6a9398960534797 is corresponding CONFIG_GLANCE_KS_PW in answer-file,i.e. keystone glance password for authentification

2. Convert cluster to DVR as advised in  “RDO Liberty DVR Neutron workflow on CentOS 7.2”
http://dbaxps.blogspot.com/2015/10/rdo-liberty-rc-dvr-deployment.html
Just one notice on RDO Mitaka on each compute node run

# ovs-vsctl add-br br-ex
# ovs-vsctl add-port br-ex eth0

Then configure

***********************************************************
On Controller (X=2) and Computes X=(3,4) update :-
***********************************************************

# cat ifcfg-br-ex
DEVICE=”br-ex”
BOOTPROTO=”static”
IPADDR=”192.169.142.1(X)7″
NETMASK=”255.255.255.0″
DNS1=”83.221.202.254″
BROADCAST=”192.169.142.255″
GATEWAY=”192.169.142.1″
NM_CONTROLLED=”no”
DEFROUTE=”yes”
IPV4_FAILURE_FATAL=”yes”
IPV6INIT=no
ONBOOT=”yes”
TYPE=”OVSIntPort”
OVS_BRIDGE=br-ex

DEVICETYPE=”ovs”

# cat ifcfg-eth0
DEVICE=”eth0″
ONBOOT=”yes”
TYPE=”OVSPort”
DEVICETYPE=”ovs”
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no

***************************
Then run script
***************************

#!/bin/bash -x
chkconfig network on
systemctl stop NetworkManager
systemctl disable NetworkManager
service network restart

Reboot node.

**********************************************
Nova-Docker Setup on each Compute
**********************************************

# curl -sSL https://get.docker.com/ | sh
# usermod -aG docker nova      ( seems not help to set 660 for docker.sock )
# systemctl start docker
# systemctl enable docker
# chmod 666  /var/run/docker.sock (add to /etc/rc.d/rc.local)
# easy_install pip
# git clone -b stable/mitaka   https://github.com/openstack/nova-docker

*******************
Driver build
*******************

# cd nova-docker
# pip install -r requirements.txt
# python setup.py install

********************************************
Switch nova-compute to DockerDriver
********************************************
vi /etc/nova/nova.conf
compute_driver=novadocker.virt.docker.DockerDriver

******************************************************************
Next on Controller/Network Node and each Compute Node
******************************************************************

mkdir /etc/nova/rootwrap.d
vi /etc/nova/rootwrap.d/docker.filters

[Filters]
# nova/virt/docker/driver.py: ‘ln’, ‘-sf’, ‘/var/run/netns/.*’
ln: CommandFilter, /bin/ln, root

**********************************************************
Nova Compute Service restart on Compute Nodes
**********************************************************
# systemctl restart openstack-nova-compute

***********************************************
Glance API Service restart on Controller
**********************************************
vi /etc/glance/glance-api.conf

container_formats=ami,ari,aki,bare,ovf,ova,docker
# systemctl restart openstack-glance-api

**************************************************
Network flow on Compute in a bit more details
**************************************************

When floating IP gets assigned to  VM ,  what actually happens ( [1] ) :-

The same explanation may be found in ([4]) , the only style would not be in step by step manner, in particular it contains detailed description of reverse network flow and ARP Proxy functionality.

1.The fip- namespace is created on the local compute node
(if it does not already exist)
2.A new port rfp- gets created on the qrouter- namespace
(if it does not already exist)
3.The rfp port on the qrouter namespace is assigned the associated floating IP address
4.The fpr port on the fip namespace gets created and linked via point-to-point  network to the rfp port of the qrouter namespace
5.The fip namespace gateway port fg- is assigned an additional address
from the public network range to set up  ARP proxy point
6.The fg- is configured as a Proxy ARP

*********************
Flow itself  ( [1] ):
*********************

1.The VM, initiating transmission, sends a packet via default gateway
and br-int forwards the traffic to the local DVR gateway port (qr-).
2.DVR routes the packet using the routing table to the rfp- port
3.The packet is applied NAT rule, replacing the source-IP of VM to
the assigned floating IP, and then it gets sent through the rfp- port,
which connects to the fip namespace via point-to-point network
169.254.31.28/31
4. The packet is received on the fpr- port in the fip namespace
and then routed outside through the fg- port

dvr273Screenshot from 2016-04-06 22-17-32

[root@ip-192-169-142-137 ~(keystone_demo)]# nova list

+————————————–+—————-+——–+————+————-+—————————————–+
| ID                                   | Name           | Status | Task State | Power State | Networks                                |
+————————————–+—————-+——–+————+————-+—————————————–+
| 957814c1-834e-47e5-9236-ef228455fe36 | UbuntuDevs01   | ACTIVE | –          | Running     | demo_network=50.0.0.12, 192.169.142.151 |
| 65dd55b9-23ea-4e5b-aeed-4db259436df2 | derbyGlassfish | ACTIVE | –          | Running     | demo_network=50.0.0.13, 192.169.142.153 |
| f9311d57-4352-48a6-a042-b36393e0af7a | fedora22docker | ACTIVE | –          | Running     | demo_network=50.0.0.14, 192.169.142.154 |
+————————————–+—————-+——–+————+————-+—————————————–+

[root@ip-192-169-142-137 ~(keystone_demo)]# docker ps

CONTAINER ID        IMAGE                      COMMAND                  CREATED             STATUS              PORTS               NAMES

336679f5bf7a        kumarpraveen/fedora-sshd   “/usr/bin/supervisord”   About an hour ago   Up About an hour                        nova-f9311d57-4352-48a6-a042-b36393e0af7a
8bb2ce01e671        derby/docker-glassfish41   “/sbin/my_init”          2 hours ago         Up 2 hours                              nova-65dd55b9-23ea-4e5b-aeed-4db259436df2
fe5eb55a4c9d        rastasheep/ubuntu-sshd     “/usr/sbin/sshd -D”      3 hours ago         Up 3 hours                              nova-957814c1-834e-47e5-9236-ef228455fe36

[root@ip-192-169-142-137 ~(keystone_demo)]# nova show f9311d57-4352-48a6-a042-b36393e0af7a | grep image
| image                                | kumarpraveen/fedora-sshd (93345f0b-fcbd-41e4-b335-a4ecb8b59e73) |
[root@ip-192-169-142-137 ~(keystone_demo)]# nova show 65dd55b9-23ea-4e5b-aeed-4db259436df2 | grep image
| image                                | derby/docker-glassfish41 (9f2cd9bc-7840-47c1-81e8-3bc0f76426ec) |
[root@ip-192-169-142-137 ~(keystone_demo)]# nova show 957814c1-834e-47e5-9236-ef228455fe36 | grep image
| image                                | rastasheep/ubuntu-sshd (29c057f1-3c7d-43e3-80e6-dc8fef1ea035) |

[root@ip-192-169-142-137 ~(keystone_demo)]# . keystonerc_glance
[root@ip-192-169-142-137 ~(keystone_glance)]# glance image-list

+————————————–+————————–+
| ID                                   | Name                     |

+————————————–+————————–+
| 27551b28-6df7-4b0e-a0c8-322b416092c1 | cirros                   |
| 9f2cd9bc-7840-47c1-81e8-3bc0f76426ec | derby/docker-glassfish41 |
| 93345f0b-fcbd-41e4-b335-a4ecb8b59e73 | kumarpraveen/fedora-sshd |
| 29c057f1-3c7d-43e3-80e6-dc8fef1ea035 | rastasheep/ubuntu-sshd   |
+————————————–+————————–+

[root@ip-192-169-142-137 ~(keystone_glance)]# swift list glance

29c057f1-3c7d-43e3-80e6-dc8fef1ea035
29c057f1-3c7d-43e3-80e6-dc8fef1ea035-00001
29c057f1-3c7d-43e3-80e6-dc8fef1ea035-00002

93345f0b-fcbd-41e4-b335-a4ecb8b59e73
93345f0b-fcbd-41e4-b335-a4ecb8b59e73-00001
93345f0b-fcbd-41e4-b335-a4ecb8b59e73-00002
93345f0b-fcbd-41e4-b335-a4ecb8b59e73-00003
93345f0b-fcbd-41e4-b335-a4ecb8b59e73-00004
93345f0b-fcbd-41e4-b335-a4ecb8b59e73-00005

9f2cd9bc-7840-47c1-81e8-3bc0f76426ec
9f2cd9bc-7840-47c1-81e8-3bc0f76426ec-00001
9f2cd9bc-7840-47c1-81e8-3bc0f76426ec-00002
9f2cd9bc-7840-47c1-81e8-3bc0f76426ec-00003
9f2cd9bc-7840-47c1-81e8-3bc0f76426ec-00004
9f2cd9bc-7840-47c1-81e8-3bc0f76426ec-00005
9f2cd9bc-7840-47c1-81e8-3bc0f76426ec-00006

Screenshot from 2016-04-06 18-08-30     Screenshot from 2016-04-06 18-08-46

Screenshot from 2016-04-06 18-09-28

 

 


Setting up Nova-Docker on Multi Node DVR Cluster RDO Mitaka

April 1, 2016

UPDATE 04/03/2016
   In meantime  better use  repositories for RC1,
   rather then Delorean trunks
END UPDATE

DVR && Nova-Docker Driver (stable/mitaka) tested fine on RDO Mitaka (build 20160329) with no issues described in previous post for RDO Liberty
So, create DVR deployment with Contrpoller/Network + N(*)Compute Nodes. Switch to Docker Hypervisor on each Compute Node and make requiered updates to glance and filters file on Controller. You are all set. Nova-Dockers instances FIP(s) are available from outside via Neutron Distributed Router (DNAT) using “fg” inteface ( fip-namespace ) residing on same host as Docker Hypervisor. South-North traffic is not related with VXLAN tunneling on DVR systems.

Why DVR comes into concern ?

Refreshing in memory similar problem with Nova-Docker Driver (Kilo)
with which I had same kind of problems (VXLAN connection Controller <==> Compute)
on F22 (OVS 2.4.0) when the same driver worked fine on CentOS 7.1 (OVS 2.3.1).
I just guess that Nova-Docker driver has a problem with OVS 2.4.0
no matter of stable/kilo, stable/liberty, stable/mitaka branches
been checked out for driver build.

I have not run ovs-ofctl dump-flows at br-tun bridges ant etc,
because even having proved malfunctinality I cannot file it to BZ.
Nova-Docker Driver is not packaged for RDO so it’s upstream stuff,
Upstream won’t consider issue which involves build driver from source
on RDO Mitaka (RC1).

Thus as quick and efficient workaround I suggest DVR deployment setup,
to kill two birds with one stone. It will result South-North traffic
to be forwarded right away from host running Docker Hypervisor to Internet
and vice/versa due to basic “fg” functionality (outgoing interface of
fip-namespace,residing on Compute node having L3 agent running in “dvr”
agent_mode).

dvr273

**************************
Procedure in details
**************************

First install repositories for RDO Mitaka (the most recent build passed CI):-
# yum -y install yum-plugin-priorities
# cd /etc/yum.repos.d
# curl -O https://trunk.rdoproject.org/centos7-mitaka/delorean-deps.repo
# curl -O https://trunk.rdoproject.org/centos7-mitaka/current-passed-ci/delorean.repo
# yum -y install openstack-packstack (Controller only)

Now proceed as follows :-

1. Here is   Answer file to deploy pre DVR Cluster
2. Convert cluster to DVR as advised in  “RDO Liberty DVR Neutron workflow on CentOS 7.2”  :-

http://dbaxps.blogspot.com/2015/10/rdo-liberty-rc-dvr-deployment.html

Just one notice on RDO Mitaka on each compute node, first create br-ex and add port eth0

# ovs-vsctl add-br br-ex
# ovs-vsctl add-port br-ex eth0

Then configure

*********************************
Compute nodes X=(3,4)
*********************************

# cat ifcfg-br-ex
DEVICE=”br-ex”
BOOTPROTO=”static”
IPADDR=”192.169.142.1(X)7″
NETMASK=”255.255.255.0″
DNS1=”83.221.202.254″
BROADCAST=”192.169.142.255″
GATEWAY=”192.169.142.1″
NM_CONTROLLED=”no”
DEFROUTE=”yes”
IPV4_FAILURE_FATAL=”yes”
IPV6INIT=no
ONBOOT=”yes”
TYPE=”OVSIntPort”
OVS_BRIDGE=br-ex

DEVICETYPE=”ovs”

# cat ifcfg-eth0

DEVICE=”eth0″
ONBOOT=”yes”
TYPE=”OVSPort”
DEVICETYPE=”ovs”
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no

***************************
Then run script
***************************

#!/bin/bash -x
chkconfig network on
systemctl stop NetworkManager
systemctl disable NetworkManager
service network restart

Reboot node.

**********************************************
Nova-Docker Setup on each Compute
**********************************************

# curl -sSL https://get.docker.com/ | sh
# usermod -aG docker nova      ( seems not help to set 660 for docker.sock )
# systemctl start docker
# systemctl enable docker
# chmod 666  /var/run/docker.sock (add to /etc/rc.d/rc.local)
# easy_install pip
# git clone -b stable/mitaka   https://github.com/openstack/nova-docker

*******************
Driver build
*******************

# cd nova-docker
# pip install -r requirements.txt
# python setup.py install

********************************************
Switch nova-compute to DockerDriver
********************************************

vi /etc/nova/nova.conf
compute_driver=novadocker.virt.docker.DockerDriver

******************************************************************
Next on Controller/Network Node and each Compute Node
******************************************************************

mkdir /etc/nova/rootwrap.d
vi /etc/nova/rootwrap.d/docker.filters

[Filters]
# nova/virt/docker/driver.py: ‘ln’, ‘-sf’, ‘/var/run/netns/.*’
ln: CommandFilter, /bin/ln, root

**********************************************************
Nova Compute Service restart on Compute Nodes
**********************************************************
# systemctl restart openstack-nova-compute
***********************************************
Glance API Service restart on Controller
**********************************************
vi /etc/glance/glance-api.conf
container_formats=ami,ari,aki,bare,ovf,ova,docker

# systemctl restart openstack-glance-api

Screenshot from 2016-04-03 12-22-34                                          Screenshot from 2016-04-03 12-57-09                                          Screenshot from 2016-04-03 12-32-41

Screenshot from 2016-04-03 14-39-11

**************************************************************************************
Build on Compute GlassFish 4.1 docker image per
http://bderzhavets.blogspot.com/2015/01/hacking-dockers-phusionbaseimage-to.html  and upload to glance :-
**************************************************************************************

[root@ip-192-169-142-137 ~(keystone_admin)]# docker images

REPOSITORY                 TAG                 IMAGE ID            CREATED              SIZE
derby/docker-glassfish41   latest              3a6b84ec9206        About a minute ago   1.155 GB
rastasheep/ubuntu-sshd     latest              70e0ac74c691        2 days ago           251.6 MB
phusion/baseimage          latest              772dd063a060        3 months ago         305.1 MB
tutum/tomcat               latest              2edd730bbedd        7 months ago         539.9 MB
larsks/thttpd              latest              a31ab5050b67        15 months ago        1.058 MB

[root@ip-192-169-142-137 ~(keystone_admin)]# docker save derby/docker-glassfish41 |  openstack image create  derby/docker-glassfish41  –public –container-format docker –disk-format raw

+——————+——————————————————+
| Field            | Value                                                |
+——————+——————————————————+
| checksum         | 9bea6dd0bcd8d0d7da2d82579c0e658a                     |
| container_format | docker                                               |
| created_at       | 2016-04-01T14:29:20Z                                 |
| disk_format      | raw                                                  |
| file             | /v2/images/acf03d15-b7c5-4364-b00f-603b6a5d9af2/file |
| id               | acf03d15-b7c5-4364-b00f-603b6a5d9af2                 |
| min_disk         | 0                                                    |
| min_ram          | 0                                                    |
| name             | derby/docker-glassfish41                             |
| owner            | 31b24d4b1574424abe53b9a5affc70c8                     |
| protected        | False                                                |
| schema           | /v2/schemas/image                                    |
| size             | 1175020032                                           |
| status           | active                                               |
| tags             |                                                      |
| updated_at       | 2016-04-01T14:30:13Z                                 |
| virtual_size     | None                                                 |
| visibility       | public                                               |
+——————+——————————————————+

[root@ip-192-169-142-137 ~(keystone_admin)]# docker ps

CONTAINER ID        IMAGE                      COMMAND               CREATED             STATUS              PORTS               NAMES

8f551d35f2d7        derby/docker-glassfish41   “/sbin/my_init”       39 seconds ago      Up 31 seconds                           nova-faba725e-e031-4edb-bf2c-41c6dfc188c1
dee4425261e8        tutum/tomcat               “/run.sh”             About an hour ago   Up About an hour                        nova-13450558-12d7-414c-bcd2-d746495d7a57
41d2ebc54d75        rastasheep/ubuntu-sshd     “/usr/sbin/sshd -D”   2 hours ago         Up About an hour                        nova-04ddea42-10a3-4a08-9f00-df60b5890ee9

[root@ip-192-169-142-137 ~(keystone_admin)]# docker logs 8f551d35f2d7

*** Running /etc/my_init.d/00_regen_ssh_host_keys.sh…
No SSH host key available. Generating one…
*** Running /etc/my_init.d/01_sshd_start.sh…
Creating SSH2 RSA key; this may take some time …
Creating SSH2 DSA key; this may take some time …
Creating SSH2 ECDSA key; this may take some time …
Creating SSH2 ED25519 key; this may take some time …
invoke-rc.d: policy-rc.d denied execution of restart.
SSH KEYS regenerated by Boris just in case !
SSHD started !

*** Running /etc/my_init.d/database.sh…
Derby database started !
*** Running /etc/my_init.d/run.sh…

Bad Network Configuration.  DNS can not resolve the hostname:
java.net.UnknownHostException: instance-00000006: instance-00000006: unknown error

Waiting for domain1 to start ……
Successfully started the domain : domain1
domain  Location: /opt/glassfish4/glassfish/domains/domain1
Log File: /opt/glassfish4/glassfish/domains/domain1/logs/server.log
Admin Port: 4848
Command start-domain executed successfully.
=&gt; Modifying password of admin to random in Glassfish
spawn asadmin –user admin change-admin-password
Enter the admin password&gt;
Enter the new admin password&gt;
Enter the new admin password again&gt;
Command change-admin-password executed successfully.

Fairly hard docker image been built by “docker expert” as myself 😉
gets launched and nova-docker instance seems to run properly
several daemons at a time ( sshd enabled )
[boris@fedora23wks Downloads]$ ssh root@192.169.142.156

root@192.169.142.156’s password:
Last login: Fri Apr  1 15:33:06 2016 from 192.169.142.1
root@instance-00000006:~# ps -ef

UID        PID  PPID  C STIME TTY          TIME CMD
root         1     0  0 14:32 ?        00:00:00 /usr/bin/python3 -u /sbin/my_init
root       100     1  0 14:33 ?        00:00:00 /bin/bash /etc/my_init.d/run.sh
root       103     1  0 14:33 ?        00:00:00 /usr/sbin/sshd
root       170     1  0 14:33 ?        00:00:03 /opt/jdk1.8.0_25/bin/java -Djava.library.path=/op
root       427   100  0 14:33 ?        00:00:02 java -jar /opt/glassfish4/bin/../glassfish/lib/cl
root       444   427  2 14:33 ?        00:01:23 /opt/jdk1.8.0_25/bin/java -cp /opt/glassfish4/gla

root      1078     0  0 15:32 ?        00:00:00 bash
root      1110   103  0 15:33 ?        00:00:00 sshd: root@pts/0
root      1112  1110  0 15:33 pts/0    00:00:00 -bash
root      1123  1112  0 15:33 pts/0    00:00:00 ps -ef

Glassfish is running indeed


Setup Docker Hypervisor on Multi Node DVR Cluster RDO Mitaka

March 31, 2016

UPDATE 04/01/2016

  DVR && Nova-Docker Driver (stable/mitaka) tested fine on RDO Mitaka (build 20160329) with no issues discribed in link for RDO Liberty.So, create DVR deployment with Contrpoller/Network + N(*)Compute Nodes. Switch to Docker Hypervisor on each Compute Node and make requiered  updates to glance and filters file on Controller. You are all set. Nova-Dockers instances FIP(s) are available from outside via Neutron Distributed Router (DNAT) using “fg” inteface ( fip-namespace ) residing on same host as Docker Hypervisor. South-North traffic is not related with VXLAN tunneling on DVR systems.

END UPDATE

Perform two node cluster deployment Controller + Network&amp;Compute (ML2&amp;OVS&amp;VXLAN).  Another configuration available via packstack  is Controller+Storage+Compute&amp;Network.
Deployment schema bellow will start on Compute node ( supposed to run Nova-Docker instances ) all four Neutron agents. Thus routing via VXLAN tunnel will be excluded . Nova-Docker instances will be routed to the Internet and vice/versa via local neutron router (DNAT/SNAT) residing on the same host where Docker Hypervisor is running.

For multi node node solution testing DVR with Nova-Docker driver is required.

For now tested only on RDO Liberty DVR system :-
RDO Liberty DVR cluster switched no Nova-Docker (stable/liberty) successfully. Containers (instances) may be launched on Compute Nodes and are available via theirs fip(s) due to neutron (DNAT) routing via “fg” interface of corresponding fip-namespace.  Snapshots  here

Question will be closed if I would be able get same results on RDO Mitaka, which will solve problem of Multi Node Docker Hypervisor deployment across Compute nodes , not using VXLAN tunnels for South-North traffic, supported by Metadata,L3,openvswitch neutron agents with unique dhcp agent proviging
private IPs  and residing on Controller/Network Node.
SELINUX should be set to permissive mode after rdo deployment.

First install repositories for RDO Mitaka (the most recent build passed CI):-
# yum -y install yum-plugin-priorities
# cd /etc/yum.repos.d
# curl -O https://trunk.rdoproject.org/centos7-mitaka/delorean-deps.repo
# curl -O https://trunk.rdoproject.org/centos7-mitaka/current-passed-ci/delorean.repo
# yum -y install openstack-packstack (Controller only)

********************************************

Answer file for RDO Mitaka deployment

********************************************

[general]

CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub

CONFIG_DEFAULT_PASSWORD=

CONFIG_SERVICE_WORKERS=%{::processorcount}

CONFIG_MARIADB_INSTALL=y

CONFIG_GLANCE_INSTALL=y

CONFIG_CINDER_INSTALL=y

CONFIG_MANILA_INSTALL=n

CONFIG_NOVA_INSTALL=y

CONFIG_NEUTRON_INSTALL=y

CONFIG_HORIZON_INSTALL=y

CONFIG_SWIFT_INSTALL=y

CONFIG_CEILOMETER_INSTALL=y

CONFIG_AODH_INSTALL=y

CONFIG_GNOCCHI_INSTALL=y

CONFIG_SAHARA_INSTALL=n

CONFIG_HEAT_INSTALL=n

CONFIG_TROVE_INSTALL=n

CONFIG_IRONIC_INSTALL=n

CONFIG_CLIENT_INSTALL=y

CONFIG_NTP_SERVERS=

CONFIG_NAGIOS_INSTALL=y

EXCLUDE_SERVERS=

CONFIG_DEBUG_MODE=n

CONFIG_CONTROLLER_HOST=192.169.142.127
CONFIG_COMPUTE_HOSTS=192.169.142.137
CONFIG_NETWORK_HOSTS=192.169.142.137

CONFIG_VMWARE_BACKEND=n

CONFIG_UNSUPPORTED=n

CONFIG_USE_SUBNETS=n

CONFIG_VCENTER_HOST=

CONFIG_VCENTER_USER=

CONFIG_VCENTER_PASSWORD=

CONFIG_VCENTER_CLUSTER_NAMES=

CONFIG_STORAGE_HOST=192.169.142.127

CONFIG_SAHARA_HOST=192.169.142.127

CONFIG_USE_EPEL=y

CONFIG_REPO=

CONFIG_ENABLE_RDO_TESTING=n

CONFIG_RH_USER=

CONFIG_SATELLITE_URL=

CONFIG_RH_SAT6_SERVER=

CONFIG_RH_PW=

CONFIG_RH_OPTIONAL=y

CONFIG_RH_PROXY=

CONFIG_RH_SAT6_ORG=

CONFIG_RH_SAT6_KEY=

CONFIG_RH_PROXY_PORT=

CONFIG_RH_PROXY_USER=

CONFIG_RH_PROXY_PW=

CONFIG_SATELLITE_USER=

CONFIG_SATELLITE_PW=

CONFIG_SATELLITE_AKEY=

CONFIG_SATELLITE_CACERT=

CONFIG_SATELLITE_PROFILE=

CONFIG_SATELLITE_FLAGS=

CONFIG_SATELLITE_PROXY=

CONFIG_SATELLITE_PROXY_USER=

CONFIG_SATELLITE_PROXY_PW=

CONFIG_SSL_CACERT_FILE=/etc/pki/tls/certs/selfcert.crt

CONFIG_SSL_CACERT_KEY_FILE=/etc/pki/tls/private/selfkey.key

CONFIG_SSL_CERT_DIR=~/packstackca/

CONFIG_SSL_CACERT_SELFSIGN=y

CONFIG_SELFSIGN_CACERT_SUBJECT_C=–

CONFIG_SELFSIGN_CACERT_SUBJECT_ST=State

CONFIG_SELFSIGN_CACERT_SUBJECT_L=City

CONFIG_SELFSIGN_CACERT_SUBJECT_O=openstack

CONFIG_SELFSIGN_CACERT_SUBJECT_OU=packstack

CONFIG_SELFSIGN_CACERT_SUBJECT_CN=ip-192-169-142-127.ip.secureserver.net

CONFIG_SELFSIGN_CACERT_SUBJECT_MAIL=admin@ip-192-169-142-127.ip.secureserver.net

CONFIG_AMQP_BACKEND=rabbitmq

CONFIG_AMQP_HOST=192.169.142.127

CONFIG_AMQP_ENABLE_SSL=n

CONFIG_AMQP_ENABLE_AUTH=n

CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER

CONFIG_AMQP_AUTH_USER=amqp_user

CONFIG_AMQP_AUTH_PASSWORD=PW_PLACEHOLDER

CONFIG_MARIADB_HOST=192.169.142.127

CONFIG_MARIADB_USER=root

CONFIG_MARIADB_PW=7207ae344ed04957

CONFIG_KEYSTONE_DB_PW=abcae16b785245c3

CONFIG_KEYSTONE_DB_PURGE_ENABLE=True

CONFIG_KEYSTONE_REGION=RegionOne

CONFIG_KEYSTONE_ADMIN_TOKEN=3ad2de159f9649afb0c342ba57e637d9

CONFIG_KEYSTONE_ADMIN_EMAIL=root@localhost

CONFIG_KEYSTONE_ADMIN_USERNAME=admin

CONFIG_KEYSTONE_ADMIN_PW=7049f834927e4468

CONFIG_KEYSTONE_DEMO_PW=bf737b785cfa4398

CONFIG_KEYSTONE_API_VERSION=v2.0

CONFIG_KEYSTONE_TOKEN_FORMAT=UUID

CONFIG_KEYSTONE_SERVICE_NAME=httpd

CONFIG_KEYSTONE_IDENTITY_BACKEND=sql

CONFIG_KEYSTONE_LDAP_URL=ldap://12.0.0.127

CONFIG_KEYSTONE_LDAP_USER_DN=

CONFIG_KEYSTONE_LDAP_USER_PASSWORD=

CONFIG_KEYSTONE_LDAP_SUFFIX=

CONFIG_KEYSTONE_LDAP_QUERY_SCOPE=one

CONFIG_KEYSTONE_LDAP_PAGE_SIZE=-1

CONFIG_KEYSTONE_LDAP_USER_SUBTREE=

CONFIG_KEYSTONE_LDAP_USER_FILTER=

CONFIG_KEYSTONE_LDAP_USER_OBJECTCLASS=

CONFIG_KEYSTONE_LDAP_USER_ID_ATTRIBUTE=

CONFIG_KEYSTONE_LDAP_USER_NAME_ATTRIBUTE=

CONFIG_KEYSTONE_LDAP_USER_MAIL_ATTRIBUTE=

CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE=

CONFIG_KEYSTONE_LDAP_USER_ENABLED_MASK=-1

CONFIG_KEYSTONE_LDAP_USER_ENABLED_DEFAULT=TRUE

CONFIG_KEYSTONE_LDAP_USER_ENABLED_INVERT=n

CONFIG_KEYSTONE_LDAP_USER_ATTRIBUTE_IGNORE=

CONFIG_KEYSTONE_LDAP_USER_DEFAULT_PROJECT_ID_ATTRIBUTE=

CONFIG_KEYSTONE_LDAP_USER_ALLOW_CREATE=n

CONFIG_KEYSTONE_LDAP_USER_ALLOW_UPDATE=n

CONFIG_KEYSTONE_LDAP_USER_ALLOW_DELETE=n

CONFIG_KEYSTONE_LDAP_USER_PASS_ATTRIBUTE=

CONFIG_KEYSTONE_LDAP_USER_ENABLED_EMULATION_DN=

CONFIG_KEYSTONE_LDAP_USER_ADDITIONAL_ATTRIBUTE_MAPPING=

CONFIG_KEYSTONE_LDAP_GROUP_SUBTREE=

CONFIG_KEYSTONE_LDAP_GROUP_FILTER=

CONFIG_KEYSTONE_LDAP_GROUP_OBJECTCLASS=

CONFIG_KEYSTONE_LDAP_GROUP_ID_ATTRIBUTE=

CONFIG_KEYSTONE_LDAP_GROUP_NAME_ATTRIBUTE=

CONFIG_KEYSTONE_LDAP_GROUP_MEMBER_ATTRIBUTE=

CONFIG_KEYSTONE_LDAP_GROUP_DESC_ATTRIBUTE=

CONFIG_KEYSTONE_LDAP_GROUP_ATTRIBUTE_IGNORE=

CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_CREATE=n

CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_UPDATE=n

CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_DELETE=n

CONFIG_KEYSTONE_LDAP_GROUP_ADDITIONAL_ATTRIBUTE_MAPPING=

CONFIG_KEYSTONE_LDAP_USE_TLS=n

CONFIG_KEYSTONE_LDAP_TLS_CACERTDIR=

CONFIG_KEYSTONE_LDAP_TLS_CACERTFILE=

CONFIG_KEYSTONE_LDAP_TLS_REQ_CERT=demand

CONFIG_GLANCE_DB_PW=41264fc52ffd4fe8

CONFIG_GLANCE_KS_PW=f6a9398960534797

CONFIG_GLANCE_BACKEND=file

CONFIG_CINDER_DB_PW=5ac08c6d09ba4b69

CONFIG_CINDER_DB_PURGE_ENABLE=True

CONFIG_CINDER_KS_PW=c8cb1ecb8c2b4f6f

CONFIG_CINDER_BACKEND=lvm

CONFIG_CINDER_VOLUMES_CREATE=y

CONFIG_CINDER_VOLUMES_SIZE=2G

CONFIG_CINDER_GLUSTER_MOUNTS=

CONFIG_CINDER_NFS_MOUNTS=

CONFIG_CINDER_NETAPP_LOGIN=

CONFIG_CINDER_NETAPP_PASSWORD=

CONFIG_CINDER_NETAPP_HOSTNAME=

CONFIG_CINDER_NETAPP_SERVER_PORT=80

CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster

CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http

CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs

CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0

CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720

CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20

CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60

CONFIG_CINDER_NETAPP_NFS_SHARES=

CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=/etc/cinder/shares.conf

CONFIG_CINDER_NETAPP_VOLUME_LIST=

CONFIG_CINDER_NETAPP_VFILER=

CONFIG_CINDER_NETAPP_PARTNER_BACKEND_NAME=

CONFIG_CINDER_NETAPP_VSERVER=

CONFIG_CINDER_NETAPP_CONTROLLER_IPS=

CONFIG_CINDER_NETAPP_SA_PASSWORD=

CONFIG_CINDER_NETAPP_ESERIES_HOST_TYPE=linux_dm_mp

CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2

CONFIG_CINDER_NETAPP_STORAGE_POOLS=

CONFIG_IRONIC_DB_PW=PW_PLACEHOLDER

CONFIG_IRONIC_KS_PW=PW_PLACEHOLDER

CONFIG_NOVA_DB_PURGE_ENABLE=True

CONFIG_NOVA_DB_PW=1e1b5aeeeaf342a8

CONFIG_NOVA_KS_PW=d9583177a2444f06

CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0

CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5

CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp

CONFIG_NOVA_COMPUTE_MANAGER=nova.compute.manager.ComputeManager

CONFIG_VNC_SSL_CERT=

CONFIG_VNC_SSL_KEY=

CONFIG_NOVA_PCI_ALIAS=

CONFIG_NOVA_PCI_PASSTHROUGH_WHITELIST=

CONFIG_NOVA_COMPUTE_PRIVIF=

CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager

CONFIG_NOVA_NETWORK_PUBIF=eth0

CONFIG_NOVA_NETWORK_PRIVIF=

CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22

CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22

CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n

CONFIG_NOVA_NETWORK_VLAN_START=100

CONFIG_NOVA_NETWORK_NUMBER=1

CONFIG_NOVA_NETWORK_SIZE=255

CONFIG_NEUTRON_KS_PW=808e36e154bd4cee

CONFIG_NEUTRON_DB_PW=0e2b927a21b44737

CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex

CONFIG_NEUTRON_METADATA_PW=a965cd23ed2f4502

CONFIG_LBAAS_INSTALL=n

CONFIG_NEUTRON_METERING_AGENT_INSTALL=n

CONFIG_NEUTRON_FWAAS=n

CONFIG_NEUTRON_VPNAAS=n

CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vxlan
CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vxlan
CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch
CONFIG_NEUTRON_ML2_FLAT_NETWORKS=*
CONFIG_NEUTRON_ML2_VLAN_RANGES=
CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES=1001:2000
CONFIG_NEUTRON_ML2_VXLAN_GROUP=239.1.1.2
CONFIG_NEUTRON_ML2_VNI_RANGES=1001:2000
CONFIG_NEUTRON_L2_AGENT=openvswitch

CONFIG_NEUTRON_ML2_SUPPORTED_PCI_VENDOR_DEVS=[’15b3:1004′, ‘8086:10ca’]

CONFIG_NEUTRON_ML2_SRIOV_AGENT_REQUIRED=n

CONFIG_NEUTRON_ML2_SRIOV_INTERFACE_MAPPINGS=

CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS=

CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-ex

CONFIG_NEUTRON_OVS_BRIDGE_IFACES=

CONFIG_NEUTRON_OVS_TUNNEL_IF=eth1

CONFIG_NEUTRON_OVS_TUNNEL_SUBNETS=

CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789

CONFIG_MANILA_DB_PW=PW_PLACEHOLDER

CONFIG_MANILA_KS_PW=PW_PLACEHOLDER

CONFIG_MANILA_BACKEND=generic

CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS=false

CONFIG_MANILA_NETAPP_TRANSPORT_TYPE=https

CONFIG_MANILA_NETAPP_LOGIN=admin

CONFIG_MANILA_NETAPP_PASSWORD=

CONFIG_MANILA_NETAPP_SERVER_HOSTNAME=

CONFIG_MANILA_NETAPP_STORAGE_FAMILY=ontap_cluster

CONFIG_MANILA_NETAPP_SERVER_PORT=443

CONFIG_MANILA_NETAPP_AGGREGATE_NAME_SEARCH_PATTERN=(.*)

CONFIG_MANILA_NETAPP_ROOT_VOLUME_AGGREGATE=

CONFIG_MANILA_NETAPP_ROOT_VOLUME_NAME=root

CONFIG_MANILA_NETAPP_VSERVER=

CONFIG_MANILA_GENERIC_DRV_HANDLES_SHARE_SERVERS=true

CONFIG_MANILA_GENERIC_VOLUME_NAME_TEMPLATE=manila-share-%s

CONFIG_MANILA_GENERIC_SHARE_MOUNT_PATH=/shares

CONFIG_MANILA_SERVICE_IMAGE_LOCATION=https://www.dropbox.com/s/vi5oeh10q1qkckh/ubuntu_1204_nfs_cifs.qcow2

CONFIG_MANILA_SERVICE_INSTANCE_USER=ubuntu

CONFIG_MANILA_SERVICE_INSTANCE_PASSWORD=ubuntu

CONFIG_MANILA_NETWORK_TYPE=neutron

CONFIG_MANILA_NETWORK_STANDALONE_GATEWAY=

CONFIG_MANILA_NETWORK_STANDALONE_NETMASK=

CONFIG_MANILA_NETWORK_STANDALONE_SEG_ID=

CONFIG_MANILA_NETWORK_STANDALONE_IP_RANGE=

CONFIG_MANILA_NETWORK_STANDALONE_IP_VERSION=4

CONFIG_MANILA_GLUSTERFS_SERVERS=

CONFIG_MANILA_GLUSTERFS_NATIVE_PATH_TO_PRIVATE_KEY=

CONFIG_MANILA_GLUSTERFS_VOLUME_PATTERN=

CONFIG_MANILA_GLUSTERFS_TARGET=

CONFIG_MANILA_GLUSTERFS_MOUNT_POINT_BASE=

CONFIG_MANILA_GLUSTERFS_NFS_SERVER_TYPE=gluster

CONFIG_MANILA_GLUSTERFS_PATH_TO_PRIVATE_KEY=

CONFIG_MANILA_GLUSTERFS_GANESHA_SERVER_IP=

CONFIG_HORIZON_SSL=n

CONFIG_HORIZON_SECRET_KEY=33cade531a764c858e4e6c22488f379f

CONFIG_HORIZON_SSL_CERT=

CONFIG_HORIZON_SSL_KEY=

CONFIG_HORIZON_SSL_CACERT=

CONFIG_SWIFT_KS_PW=30911de72a15427e

CONFIG_SWIFT_STORAGES=

CONFIG_SWIFT_STORAGE_ZONES=1

CONFIG_SWIFT_STORAGE_REPLICAS=1

CONFIG_SWIFT_STORAGE_FSTYPE=ext4

CONFIG_SWIFT_HASH=a55607bff10c4210

CONFIG_SWIFT_STORAGE_SIZE=2G

CONFIG_HEAT_DB_PW=PW_PLACEHOLDER

CONFIG_HEAT_AUTH_ENC_KEY=0ef4161f3bb24230

CONFIG_HEAT_KS_PW=PW_PLACEHOLDER

CONFIG_HEAT_CLOUDWATCH_INSTALL=n

CONFIG_HEAT_CFN_INSTALL=n

CONFIG_HEAT_DOMAIN=heat

CONFIG_HEAT_DOMAIN_ADMIN=heat_admin

CONFIG_HEAT_DOMAIN_PASSWORD=PW_PLACEHOLDER

CONFIG_PROVISION_DEMO=n

CONFIG_PROVISION_TEMPEST=n

CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28

CONFIG_PROVISION_IMAGE_NAME=cirros

CONFIG_PROVISION_IMAGE_URL=http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img

CONFIG_PROVISION_IMAGE_FORMAT=qcow2

CONFIG_PROVISION_IMAGE_SSH_USER=cirros

CONFIG_TEMPEST_HOST=

CONFIG_PROVISION_TEMPEST_USER=

CONFIG_PROVISION_TEMPEST_USER_PW=PW_PLACEHOLDER

CONFIG_PROVISION_TEMPEST_FLOATRANGE=172.24.4.224/28

CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git

CONFIG_PROVISION_TEMPEST_REPO_REVISION=master

CONFIG_RUN_TEMPEST=n

CONFIG_RUN_TEMPEST_TESTS=smoke

CONFIG_PROVISION_OVS_BRIDGE=n

CONFIG_CEILOMETER_SECRET=19ae0e7430174349

CONFIG_CEILOMETER_KS_PW=337b08d4b3a44753

CONFIG_CEILOMETER_SERVICE_NAME=httpd

CONFIG_CEILOMETER_COORDINATION_BACKEND=redis

CONFIG_MONGODB_HOST=192.169.142.127

CONFIG_REDIS_MASTER_HOST=192.169.142.127

CONFIG_REDIS_PORT=6379

CONFIG_REDIS_HA=n

CONFIG_REDIS_SLAVE_HOSTS=

CONFIG_REDIS_SENTINEL_HOSTS=

CONFIG_REDIS_SENTINEL_CONTACT_HOST=

CONFIG_REDIS_SENTINEL_PORT=26379

CONFIG_REDIS_SENTINEL_QUORUM=2

CONFIG_REDIS_MASTER_NAME=mymaster

CONFIG_AODH_KS_PW=acdd500a5fed4700

CONFIG_GNOCCHI_DB_PW=cf11b5d6205f40e7

CONFIG_GNOCCHI_KS_PW=36eba4690b224044

CONFIG_TROVE_DB_PW=PW_PLACEHOLDER

CONFIG_TROVE_KS_PW=PW_PLACEHOLDER

CONFIG_TROVE_NOVA_USER=trove

CONFIG_TROVE_NOVA_TENANT=services

CONFIG_TROVE_NOVA_PW=PW_PLACEHOLDER

CONFIG_SAHARA_DB_PW=PW_PLACEHOLDER

CONFIG_SAHARA_KS_PW=PW_PLACEHOLDER

CONFIG_NAGIOS_PW=02f168ee8edd44e4

**********************************************************************

Upon completion connect to external network on Compute Node :-

**********************************************************************

[root@ip-192-169-142-137 network-scripts(keystone_admin)]# cat ifcfg-br-ex

DEVICE=”br-ex”
BOOTPROTO=”static”
IPADDR=”172.124.4.137″
NETMASK=”255.255.255.0″
DNS1=”83.221.202.254″
BROADCAST=”172.124.4.255″
GATEWAY=”172.124.4.1″
NM_CONTROLLED=”no”
TYPE=”OVSIntPort”
OVS_BRIDGE=br-ex
DEVICETYPE=”ovs”
DEFROUTE=”yes”
IPV4_FAILURE_FATAL=”yes”
IPV6INIT=no

[root@ip-192-169-142-137 network-scripts(keystone_admin)]# cat ifcfg-eth2

DEVICE=”eth2″
# HWADDR=00:22:15:63:E4:E2
ONBOOT=”yes”
TYPE=”OVSPort”
DEVICETYPE=”ovs”
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no

[root@ip-192-169-142-137 network-scripts(keystone_admin)]# cat start.sh

#!/bin/bash -x
chkconfig network on
systemctl stop NetworkManager
systemctl disable NetworkManager
service network restart

**********************************************
Verification Compute node status
**********************************************

[root@ip-192-169-142-137 ~(keystone_admin)]# openstack-status

== Nova services ==
openstack-nova-api:                     inactive  (disabled on boot)
openstack-nova-compute:                 active
openstack-nova-network:                 inactive  (disabled on boot)
openstack-nova-scheduler:               inactive  (disabled on boot)
== neutron services ==
neutron-server:                         inactive  (disabled on boot)
neutron-dhcp-agent:                     active
neutron-l3-agent:                          active
neutron-metadata-agent:               active
neutron-openvswitch-agent:          active

==ceilometer services==
openstack-ceilometer-api:               inactive  (disabled on boot)
openstack-ceilometer-central:         inactive  (disabled on boot)
openstack-ceilometer-compute:       active
openstack-ceilometer-collector:       inactive  (disabled on boot)
== Support services ==
openvswitch:                            active
dbus:                                        active
Warning novarc not sourced

[root@ip-192-169-142-137 ~(keystone_admin)]# nova-manage version
13.0.0-0.20160329105656.7662fb9.el7.centos

Also install  python-openstackclient on Compute

******************************************
Verfication status on Controller
******************************************

[root@ip-192-169-142-127 ~(keystone_admin)]# openstack-status

== Nova services ==
openstack-nova-api:                     active
openstack-nova-compute:                 inactive  (disabled on boot)
openstack-nova-network:                 inactive  (disabled on boot)
openstack-nova-scheduler:               active
openstack-nova-cert:                    active
openstack-nova-conductor:               active
openstack-nova-console:                 inactive  (disabled on boot)
openstack-nova-consoleauth:             active
openstack-nova-xvpvncproxy:             inactive  (disabled on boot)
== Glance services ==
openstack-glance-api:                   active
openstack-glance-registry:              active
== Keystone service ==
openstack-keystone:                     inactive  (disabled on boot)
== Horizon service ==
openstack-dashboard:                    active
== neutron services ==
neutron-server:                        active
neutron-dhcp-agent:                 inactive  (disabled on boot)
neutron-l3-agent:                      inactive  (disabled on boot)
neutron-metadata-agent:           inactive  (disabled on boot)
== Swift services ==
openstack-swift-proxy:                  active
openstack-swift-account:                active
openstack-swift-container:              active
openstack-swift-object:                 active
== Cinder services ==
openstack-cinder-api:                   active
openstack-cinder-scheduler:             active
openstack-cinder-volume:                active
openstack-cinder-backup:                active
== Ceilometer services ==
openstack-ceilometer-api:               inactive  (disabled on boot)
openstack-ceilometer-central:           active
openstack-ceilometer-compute:           inactive  (disabled on boot)
openstack-ceilometer-collector:         active
openstack-ceilometer-notification:      active
== Support services ==
mysqld:                               inactive  (disabled on boot)
dbus:                                   active
target:                                 active
rabbitmq-server:                  active
memcached:                        active

== Keystone users ==

+———————————-+————+———+———————-+

|                id                |    name    | enabled |        email         |

+———————————-+————+———+———————-+
| f7dbea6e5b704c7d8e77e88c1ce1fce8 |   admin    |   True  |    root@localhost    |
| baf4ee3fe0e749f982747ffe68e0e562 |    aodh    |   True  |    aodh@localhost    |
| 770d5c0974fb49998440b1080e5939a0 |   boris    |   True  |                      |
| f88d8e83df0f43a991cb7ff063a2439f | ceilometer |   True  | ceilometer@localhost |
| e7a92f59f081403abd9c0f92c4f8d8d0 |   cinder   |   True  |   cinder@localhost   |
| 58e531b5eba74db2b4559aaa16561900 |   glance   |   True  |   glance@localhost   |
| d215d99466aa481f847df2a909c139f7 |  gnocchi   |   True  |  gnocchi@localhost   |
| 5d3433f7d54d40d8b9eeb576582cc672 |  neutron   |   True  |  neutron@localhost   |
| 3a50997aa6fc4c129dff624ed9745b94 |    nova    |   True  |    nova@localhost    |
| ef1a323f98cb43c789e4f84860afea35 |   swift    |   True  |   swift@localhost    |
+———————————-+————+———+———————-+

== Glance images ==

+————————————–+————————–+
| ID                                   | Name                     |
+————————————–+————————–+
| cbf88266-0b49-4bc2-9527-cc9c9da0c1eb | derby/docker-glassfish41 |
| 5d0a97c3-c717-46ac-a30f-86208ea0d31d | larsks/thttpd            |
| 80eb0d7d-17ae-49c7-997f-38d8a3aeeabd | rastasheep/ubuntu-sshd   |
+————————————–+————————–+

== Nova managed services ==

+—-+——————+—————————————-+———-+———+——-+—————————-+—————–+

| Id | Binary           | Host                                   | Zone     | Status  | State | Updated_at                 | Disabled Reason |

+—-+——————+—————————————-+———-+———+——-+—————————-+—————–+
| 5  | nova-cert        | ip-192-169-142-127.ip.secureserver.net | internal | enabled | up    | 2016-03-31T09:59:53.000000 |                |
| 6  | nova-consoleauth | ip-192-169-142-127.ip.secureserver.net | internal | enabled | up    | 2016-03-31T09:59:52.000000 | –               |
| 7  | nova-scheduler   | ip-192-169-142-127.ip.secureserver.net | internal | enabled | up    | 2016-03-31T09:59:52.000000 | –               |
| 8  | nova-conductor   | ip-192-169-142-127.ip.secureserver.net | internal | enabled | up    | 2016-03-31T09:59:54.000000 | –               |
| 10 | nova-compute     | ip-192-169-142-137.ip.secureserver.net | nova     | enabled | up    | 2016-03-31T09:59:55.000000 | –               |

+—-+——————+—————————————-+———-+———+——-+—————————-+—————–+

== Nova networks ==

+————————————–+————–+——+
| ID                                   | Label        | Cidr |
+————————————–+————–+——+
| 47798c88-29e5-4dee-8206-d0f9b7e19130 | public       | –    |
| 8f849505-0550-4f6c-8c73-6b8c9ec56789 | private      | –    |
| bcfcf3c3-c651-4ae7-b7ee-fdafae04a2a9 | demo_network | –    |
+————————————–+————–+——+

== Nova instance flavors ==

+—-+———–+———–+——+———–+——+——-+————-+———–+
| ID | Name      | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
+—-+———–+———–+——+———–+——+——-+————-+———–+
| 1  | m1.tiny   | 512       | 1    | 0         |      | 1     | 1.0         | True      |
| 2  | m1.small  | 2048      | 20   | 0         |      | 1     | 1.0         | True      |
| 3  | m1.medium | 4096      | 40   | 0         |      | 2     | 1.0         | True      |
| 4  | m1.large  | 8192      | 80   | 0         |      | 4     | 1.0         | True      |
| 5  | m1.xlarge | 16384     | 160  | 0         |      | 8     | 1.0         | True      |
+—-+———–+———–+——+———–+——+——-+————-+———–+

== Nova instances ==

+————————————–+——————+———————————-+——–+————+————-+—————————————+
| ID                                   | Name             | Tenant ID                        | Status | Task State | Power State | Networks                              |
+————————————–+——————+———————————-+——–+————+————-+—————————————+

| c8284258-f9c0-4b81-8cd0-db6e7cbf8d48 | UbuntuRastasheep | 32df2fd0c85745c9901b2247ec4905bc | ACTIVE | –          | Running     | demo_network=90.0.0.15, 172.124.4.154 |
| 50f22f8a-e6ff-4b8b-8c15-f3b9bbd1aad2 | derbyGlassfish   | 32df2fd0c85745c9901b2247ec4905bc | ACTIVE | –          | Running     | demo_network=90.0.0.16, 172.124.4.155 |
| 03664d5e-f3c5-4ebb-9109-e96189150626 | testLars         | 32df2fd0c85745c9901b2247ec4905bc | ACTIVE | –          | Running     | demo_network=90.0.0.14, 172.124.4.153 |
+————————————–+——————+———————————-+——–+————+————-+—————————————+

*********************************
Nova-Docker Setup on Compute
*********************************

# curl -sSL https://get.docker.com/ | sh
# usermod -aG docker nova      ( seems not help to set 660 for docker.sock )
# systemctl start docker
# systemctl enable docker
# chmod 666  /var/run/docker.sock (add to /etc/rc.d/rc.local)
# easy_install pip
# git clone -b stable/mitaka   https://github.com/openstack/nova-docker

*******************
Driver build
*******************

# cd nova-docker
# pip install -r requirements.txt
# python setup.py install

********************************************
Switch nova-compute to DockerDriver
********************************************
vi /etc/nova/nova.conf
compute_driver=novadocker.virt.docker.DockerDriver

***********************************
Next one on Controller
***********************************

mkdir /etc/nova/rootwrap.d
vi /etc/nova/rootwrap.d/docker.filters

[Filters]
# nova/virt/docker/driver.py: ‘ln’, ‘-sf’, ‘/var/run/netns/.*’
ln: CommandFilter, /bin/ln, root

****************************************************
Nova Compute Service restart on Compute
****************************************************

# systemctl restart openstack-nova-compute

****************************************
Glance API Service restart on Controller
****************************************

vi /etc/glance/glance-api.conf
container_formats=ami,ari,aki,bare,ovf,ova,docker
# systemctl restart openstack-glance-api

Build on Compute GlassFish 4.1 docker image per
http://bderzhavets.blogspot.com/2015/01/hacking-dockers-phusionbaseimage-to.html  and upload to glance :-

[root@ip-192-169-142-137 ~(keystone_admin)]# docker images

REPOSITORY                 TAG                 IMAGE ID            CREATED             SIZE

derby/docker-glassfish41   latest              615ce2c6a21f        29 minutes ago      1.155 GB
rastasheep/ubuntu-sshd     latest              70e0ac74c691        32 hours ago        251.6 MB
phusion/baseimage          latest              772dd063a060        3 months ago        305.1 MB
larsks/thttpd              latest              a31ab5050b67        15 months ago       1.058 MB

[root@ip-192-169-142-137 ~(keystone_admin)]# docker save derby/docker-glassfish41 | openstack image create  derby/docker-glassfish41  –public –container-format docker –disk-format raw

+——————+——————————————————+
| Field            | Value                                                |
+——————+——————————————————+
| checksum         | dca755d516e35d947ae87ff8bef8fa8f                     |
| container_format | docker                                               |
| created_at       | 2016-03-31T09:32:53Z                                 |
| disk_format      | raw                                                  |
| file             | /v2/images/cbf88266-0b49-4bc2-9527-cc9c9da0c1eb/file |
| id               | cbf88266-0b49-4bc2-9527-cc9c9da0c1eb                 |
| min_disk         | 0                                                    |
| min_ram          | 0                                                    |
name             | derby/docker-glassfish41                             |
| owner            | 677c4fec97d14b8db0639086f5d59f7d                     |
| protected        | False                                                |
| schema           | /v2/schemas/image                                    |
| size             | 1175030784                                           |
| status           | active                                               |
| tags             |                                                      |
| updated_at       | 2016-03-31T09:33:58Z                                 |
| virtual_size     | None                                                 |
| visibility       | public                                               |
+——————+——————————————————+

Now launch DerbyGassfish instance via dashboard and assign floating ip

Access to Glassfish instance via FIP 172.124.4.155

root@ip-192-169-142-137 ~(keystone_admin)]# docker ps

CONTAINER ID        IMAGE                      COMMAND                  CREATED             STATUS              PORTS               NAMES

70ac259e9176        derby/docker-glassfish41   “/sbin/my_init”          3 minutes ago       Up 3 minutes                            nova-50f22f8a-e6ff-4b8b-8c15-f3b9bbd1aad2
a0826911eabe        rastasheep/ubuntu-sshd     “/usr/sbin/sshd -D”      About an hour ago   Up About an hour                        nova-c8284258-f9c0-4b81-8cd0-db6e7cbf8d48
7923487076d5        larsks/thttpd              “/thttpd -D -l /dev/s”   About an hour ago   Up About an hour                        nova-03664d5e-f3c5-4ebb-9109-e96189150626


Attempt to set up HAProxy/Keepalived 3 Node Controller on RDO Liberty per Javier Pena

November 18, 2015

URGENT UPDATE 11/18/2015
Please, view https://github.com/beekhof/osp-ha-deploy/commit/b2e01e86ca93cfad9ad01d533b386b4c9607c60d
It looks as work in progress.
See also https://www.redhat.com/archives/rdo-list/2015-November/msg00168.html
END UPDATE

Actually, setup bellow follows closely https://github.com/beekhof/osp-ha-deploy/blob/master/HA-keepalived.md

As far as to my knowledge Cisco’s schema has been implemented :-
Keepalived, HAProxy,Galera for MySQL Manual install, at least 3 controller nodes. I just highlighted several steps  which as I believe allowed me to bring this work to success.  Javier is using flat external network provider for Controllers cluster disabling from the same start NetworkManager && enabling service network, there is one step which i was unable to skip. It’s disabling IP’s of eth0’s interfaces && restarting network service right before running `ovs-vsctl add-port br-eth0 eth0` per  Neutron building instructions of mentioned “Howto”, which seems to be one of the best I’ve ever seen.

I (just) guess that due this sequence of steps even on already been built and seems to run OK  three nodes Controller Cluster external network is still ping able :-

However, would i disable eth0’s IPs from the start i would lost connectivity right away switching to network service from NetworkManager . In general,  external network is supposed to be ping able from qrouter namespace due to Neutron router’s  DNAT/SNAT IPtables forwarding, but not from Controller . I am also aware of that when Ethernet interface becomes an OVS port of OVS bridge it’s IP is supposed to be suppressed. When external network provider is not used , then br-ex gets any IP  available IP on external network. Using external network provider changes situation. Details may be seen here :-

https://www.linux.com/community/blogs/133-general-linux/858156-multiple-external-networks-with-a-single-l3-agent-testing-on-rdo-liberty-per-lars-kellogg-stedman

[root@hacontroller1 ~(keystone_admin)]# systemctl status NetworkManager
NetworkManager.service – Network Manager
Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; disabled)
Active: inactive (dead)

[root@hacontroller1 ~(keystone_admin)]# systemctl status network
network.service – LSB: Bring up/down networking
Loaded: loaded (/etc/rc.d/init.d/network)
Active: active (exited) since Wed 2015-11-18 08:36:53 MSK; 2h 10min ago
Process: 708 ExecStart=/etc/rc.d/init.d/network start (code=exited, status=0/SUCCESS)

Nov 18 08:36:47 hacontroller1.example.com network[708]: Bringing up loopback interface:  [  OK  ]
Nov 18 08:36:51 hacontroller1.example.com network[708]: Bringing up interface eth0:  [  OK  ]
Nov 18 08:36:53 hacontroller1.example.com network[708]: Bringing up interface eth1:  [  OK  ]
Nov 18 08:36:53 hacontroller1.example.com systemd[1]: Started LSB: Bring up/down networking.

[root@hacontroller1 ~(keystone_admin)]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
inet6 fe80::5054:ff:fe6d:926a  prefixlen 64  scopeid 0x20<link>
ether 52:54:00:6d:92:6a  txqueuelen 1000  (Ethernet)
RX packets 5036  bytes 730778 (713.6 KiB)
RX errors 0  dropped 12  overruns 0  frame 0
TX packets 15715  bytes 930045 (908.2 KiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
inet 192.169.142.221  netmask 255.255.255.0  broadcast 192.169.142.255
inet6 fe80::5054:ff:fe5e:9644  prefixlen 64  scopeid 0x20<link>
ether 52:54:00:5e:96:44  txqueuelen 1000  (Ethernet)
RX packets 1828396  bytes 283908183 (270.7 MiB)
RX errors 0  dropped 13  overruns 0  frame 0
TX packets 1839312  bytes 282429736 (269.3 MiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
inet 127.0.0.1  netmask 255.0.0.0
inet6 ::1  prefixlen 128  scopeid 0x10<host>
loop  txqueuelen 0  (Local Loopback)
RX packets 869067  bytes 69567890 (66.3 MiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 869067  bytes 69567890 (66.3 MiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@hacontroller1 ~(keystone_admin)]# ping -c 3  10.10.10.1
PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=2.04 ms
64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=0.103 ms
64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=0.118 ms

— 10.10.10.1 ping statistics —

3 packets transmitted, 3 received, 0% packet loss, time 2001ms

rtt min/avg/max/mdev = 0.103/0.754/2.043/0.911 ms

 

Both mgmt and external networks emulated by corresponging Libvirt Networks
on F23 Virtualization Server. Total four VMs been setup , 3 of them for Controller nodes and one for compute (4 VCPUS, 4 GB RAM)

[root@fedora23wks ~]# cat openstackvms.xml ( for eth1’s)

<network>
<name>openstackvms</name>
<uuid>d0e9964a-f91a-40c0-b769-a609aee41bf2</uuid>
<forward mode=’nat’>
<nat>
<port start=’1024′ end=’65535’/>
</nat>
</forward>
<bridge name=’virbr1′ stp=’on’ delay=’0′ />
<mac address=’52:54:00:60:f8:6d’/>
<ip address=’192.169.142.1′ netmask=’255.255.255.0′>
<dhcp>
<range start=’192.169.142.2′ end=’192.169.142.254′ />
</dhcp>
</ip>
</network>

[root@fedora23wks ~]# cat public.xml ( for external network provider )

<network>
<name>public</name>
<uuid>d0e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
<forward mode=’nat’>
<nat>
<port start=’1024′ end=’65535’/>
</nat>
</forward>
<bridge name=’virbr2′ stp=’on’ delay=’0′ />
<mac address=’52:54:00:60:f8:6d’/>
<ip address=’10.10.10.1′ netmask=’255.255.255.0′>
<dhcp>
<range start=’10.10.10.2′ end=’10.10.10.254′ />
</dhcp>
</ip>
</network>

Only one file is bit different on Controller Nodes , it is l3_agent.ini

[root@hacontroller1 neutron(keystone_demo)]# cat l3_agent.ini | grep -v ^# | grep -v ^$

[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
handle_internal_only_routers = True
send_arp_for_ha = 3
metadata_ip = controller-vip.example.com
external_network_bridge =
gateway_external_network_id =
[AGENT]

*************************************************************************************
Due to posted “UPDATE” on the top of  the blog entry in meantime
perfect solution is provided by
https://github.com/beekhof/osp-ha-deploy/commit/b2e01e86ca93cfad9ad01d533b386b4c9607c60d
The commit has been done on 11/14/2015 right after discussion at RDO mailing list.
*************************************************************************************

One more step which I did ( not sure that is really has
to be done at this point of time )
IP’s on eth0’s interfaces were disabled just before
running `ovs-vsctl add-port br-eth0 eth0`:-

1. Updated ifcfg-eth0 files on all Controllers
2. `service network restart` on all Controllers
3. `ovs-vsctl add-port br-eth0 eth0`on all Controllers

*****************************************************************************************
Targeting just POC ( to get floating ips accessible from Fedora 23 Virtualization host )  resulted  Controllers Cluster setup:-
*****************************************************************************************

I installed only

Keystone
Glance
Neutron
Nova
Horizon

**************************
UPDATE to official docs
**************************
[root@hacontroller1 ~(keystone_admin)]# cat   keystonerc_admin
export OS_USERNAME=admin
export OS_TENANT_NAME=admin
export OS_PROJECT_NAME=admin
export OS_REGION_NAME=regionOne
export OS_PASSWORD=keystonetest
export OS_AUTH_URL=http://controller-vip.example.com:35357/v2.0/
export OS_SERVICE_ENDPOINT=http://controller-vip.example.com:35357/v2.0
export OS_SERVICE_TOKEN=2fbe298b385e132da335
export PS1='[\u@\h \W(keystone_admin)]\$ ‘

Due to running Galera Synchronous MultiMaster Replication between Controllers each commands like :-

# su keystone -s /bin/sh -c “keystone-manage db_sync”
# su glance -s /bin/sh -c “glance-manage db_sync”
# neutron-db-manage –config-file /etc/neutron/neutron.conf –config-file /etc/neutron/plugin.ini upgrade head
# su nova -s /bin/sh -c “nova-manage db sync”

are supposed to run just once from Conroller node 1 ( for instance )

************************
Compute Node setup:-
*************************

Compute setup

**********************
On all nodes
**********************

[root@hacontroller1 neutron(keystone_demo)]# cat /etc/hosts
192.169.142.220 controller-vip.example.com controller-vip
192.169.142.221 hacontroller1.example.com hacontroller1
192.169.142.222 hacontroller2.example.com hacontroller2
192.169.142.223 hacontroller3.example.com hacontroller3
192.169.142.224 compute.example.con compute
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

[root@hacontroller1 ~(keystone_admin)]# cat /etc/neutron/neutron.conf | grep -v ^$| grep -v ^#

[DEFAULT]
bind_host = 192.169.142.22(X)
auth_strategy = keystone
notification_driver = neutron.openstack.common.notifier.rpc_notifier
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
service_plugins = router,lbaas
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
dhcp_agents_per_network = 2
api_workers = 2
rpc_workers = 2
l3_ha = True
min_l3_agents_per_router = 2
max_l3_agents_per_router = 2

[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://controller-vip.example.com:5000/
identity_uri = http://127.0.0.1:5000
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
auth_plugin = password
auth_url = http://controller-vip.example.com:35357/
username = neutron
password = neutrontest
project_name = services
[database]
connection = mysql://neutron:neutrontest@controller-vip.example.com:3306/neutron
max_retries = -1
[nova]
nova_region_name = regionOne
project_domain_id = default
project_name = services
user_domain_id = default
password = novatest
username = compute
auth_url = http://controller-vip.example.com:35357/
auth_plugin = password
[oslo_concurrency]
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_hosts = hacontroller1,hacontroller2,hacontroller3
rabbit_ha_queues = true
[qos]

[root@hacontroller1 haproxy(keystone_demo)]# cat haproxy.cfg
global
daemon
stats socket /var/lib/haproxy/stats
defaults
mode tcp
maxconn 10000
timeout connect 5s
timeout client 30s
timeout server 30s
listen monitor
bind 192.169.142.220:9300
mode http
monitor-uri /status
stats enable
stats uri /admin
stats realm Haproxy\ Statistics
stats auth root:redhat
stats refresh 5s
frontend vip-db
bind 192.169.142.220:3306
timeout client 90m
default_backend db-vms-galera
backend db-vms-galera
option httpchk
stick-table type ip size 1000
stick on dst
timeout server 90m
server rhos8-node1 192.169.142.221:3306 check inter 1s port 9200 backup on-marked-down shutdown-sessions
server rhos8-node2 192.169.142.222:3306 check inter 1s port 9200 backup on-marked-down shutdown-sessions
server rhos8-node3 192.169.142.223:3306 check inter 1s port 9200 backup on-marked-down shutdown-sessions
# Note the RabbitMQ entry is only needed for CloudForms compatibility
# and should be removed in the future
frontend vip-rabbitmq
option clitcpka
bind 192.169.142.220:5672
timeout client 900m
default_backend rabbitmq-vms
backend rabbitmq-vms
option srvtcpka
balance roundrobin
timeout server 900m
server rhos8-node1 192.169.142.221:5672 check inter 1s
server rhos8-node2 192.169.142.222:5672 check inter 1s
server rhos8-node3 192.169.142.223:5672 check inter 1s
frontend vip-keystone-admin
bind 192.169.142.220:35357
default_backend keystone-admin-vms
timeout client 600s
backend keystone-admin-vms
balance roundrobin
timeout server 600s
server rhos8-node1 192.169.142.221:35357 check inter 1s on-marked-down shutdown-sessions
server rhos8-node2 192.169.142.222:35357 check inter 1s on-marked-down shutdown-sessions
server rhos8-node3 192.169.142.223:35357 check inter 1s on-marked-down shutdown-sessions
frontend vip-keystone-public
bind 192.169.142.220:5000
default_backend keystone-public-vms
timeout client 600s
backend keystone-public-vms
balance roundrobin
timeout server 600s
server rhos8-node1 192.169.142.221:5000 check inter 1s on-marked-down shutdown-sessions
server rhos8-node2 192.169.142.222:5000 check inter 1s on-marked-down shutdown-sessions
server rhos8-node3 192.169.142.223:5000 check inter 1s on-marked-down shutdown-sessions
frontend vip-glance-api
bind 192.169.142.220:9191
default_backend glance-api-vms
backend glance-api-vms
balance roundrobin
server rhos8-node1 192.169.142.221:9191 check inter 1s
server rhos8-node2 192.169.142.222:9191 check inter 1s
server rhos8-node3 192.169.142.223:9191 check inter 1s
frontend vip-glance-registry
bind 192.169.142.220:9292
default_backend glance-registry-vms
backend glance-registry-vms
balance roundrobin
server rhos8-node1 192.169.142.221:9292 check inter 1s
server rhos8-node2 192.169.142.222:9292 check inter 1s
server rhos8-node3 192.169.142.223:9292 check inter 1s
frontend vip-cinder
bind 192.169.142.220:8776
default_backend cinder-vms
backend cinder-vms
balance roundrobin
server rhos8-node1 192.169.142.221:8776 check inter 1s
server rhos8-node2 192.169.142.222:8776 check inter 1s
server rhos8-node3 192.169.142.223:8776 check inter 1s
frontend vip-swift
bind 192.169.142.220:8080
default_backend swift-vms
backend swift-vms
balance roundrobin
server rhos8-node1 192.169.142.221:8080 check inter 1s
server rhos8-node2 192.169.142.222:8080 check inter 1s
server rhos8-node3 192.169.142.223:8080 check inter 1s
frontend vip-neutron
bind 192.169.142.220:9696
default_backend neutron-vms
backend neutron-vms
balance roundrobin
server rhos8-node1 192.169.142.221:9696 check inter 1s
server rhos8-node2 192.169.142.222:9696 check inter 1s
server rhos8-node3 192.169.142.223:9696 check inter 1s
frontend vip-nova-vnc-novncproxy
bind 192.169.142.220:6080
default_backend nova-vnc-novncproxy-vms
backend nova-vnc-novncproxy-vms
balance roundrobin
timeout tunnel 1h
server rhos8-node1 192.169.142.221:6080 check inter 1s
server rhos8-node2 192.169.142.222:6080 check inter 1s
server rhos8-node3 192.169.142.223:6080 check inter 1s
frontend nova-metadata-vms
bind 192.169.142.220:8775
default_backend nova-metadata-vms
backend nova-metadata-vms
balance roundrobin
server rhos8-node1 192.169.142.221:8775 check inter 1s
server rhos8-node2 192.169.142.222:8775 check inter 1s
server rhos8-node3 192.169.142.223:8775 check inter 1s
frontend vip-nova-api
bind 192.169.142.220:8774
default_backend nova-api-vms
backend nova-api-vms
balance roundrobin
server rhos8-node1 192.169.142.221:8774 check inter 1s
server rhos8-node2 192.169.142.222:8774 check inter 1s
server rhos8-node3 192.169.142.223:8774 check inter 1s
frontend vip-horizon
bind 192.169.142.220:80
timeout client 180s
default_backend horizon-vms
backend horizon-vms
balance roundrobin
timeout server 180s
mode http
cookie SERVERID insert indirect nocache
server rhos8-node1 192.169.142.221:80 check inter 1s cookie rhos8-horizon1 on-marked-down shutdown-sessions
server rhos8-node2 192.169.142.222:80 check inter 1s cookie rhos8-horizon2 on-marked-down shutdown-sessions
server rhos8-node3 192.169.142.223:80 check inter 1s cookie rhos8-horizon3 on-marked-down shutdown-sessions
frontend vip-heat-cfn
bind 192.169.142.220:8000
default_backend heat-cfn-vms
backend heat-cfn-vms
balance roundrobin
server rhos8-node1 192.169.142.221:8000 check inter 1s
server rhos8-node2 192.169.142.222:8000 check inter 1s
server rhos8-node3 192.169.142.223:8000 check inter 1s
frontend vip-heat-cloudw
bind 192.169.142.220:8003
default_backend heat-cloudw-vms
backend heat-cloudw-vms
balance roundrobin
server rhos8-node1 192.169.142.221:8003 check inter 1s
server rhos8-node2 192.169.142.222:8003 check inter 1s
server rhos8-node3 192.169.142.223:8003 check inter 1s
frontend vip-heat-srv
bind 192.169.142.220:8004
default_backend heat-srv-vms
backend heat-srv-vms
balance roundrobin
server rhos8-node1 192.169.142.221:8004 check inter 1s
server rhos8-node2 192.169.142.222:8004 check inter 1s
server rhos8-node3 192.169.142.223:8004 check inter 1s
frontend vip-ceilometer
bind 192.169.142.220:8777
timeout client 90s
default_backend ceilometer-vms
backend ceilometer-vms
balance roundrobin
timeout server 90s
server rhos8-node1 192.169.142.221:8777 check inter 1s
server rhos8-node2 192.169.142.222:8777 check inter 1s
server rhos8-node3 192.169.142.223:8777 check inter 1s
frontend vip-sahara
bind 192.169.142.220:8386
default_backend sahara-vms
backend sahara-vms
balance roundrobin
server rhos8-node1 192.169.142.221:8386 check inter 1s
server rhos8-node2 192.169.142.222:8386 check inter 1s
server rhos8-node3 192.169.142.223:8386 check inter 1s
frontend vip-trove
bind 192.169.142.220:8779
default_backend trove-vms
backend trove-vms
balance roundrobin
server rhos8-node1 192.169.142.221:8779 check inter 1s
server rhos8-node2 192.169.142.222:8779 check inter 1s
server rhos8-node3 192.169.142.223:8779 check inter 1s

[root@hacontroller1 ~(keystone_demo)]# cat /etc/my.cnf.d/galera.cnf
[mysqld]
skip-name-resolve=1
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
innodb_locks_unsafe_for_binlog=1
max_connections=8192
query_cache_size=0
query_cache_type=0
bind_address=192.169.142.22(X)
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name=”galera_cluster”
wsrep_cluster_address=”gcomm://192.169.142.221,192.169.142.222,192.169.142.223″
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync

[root@hacontroller1 ~(keystone_demo)]# cat /etc/keepalived/keepalived.conf
vrrp_script chk_haproxy {
script “/usr/bin/killall -0 haproxy”
interval 2
}
vrrp_instance VI_PUBLIC {
interface eth1
state BACKUP
virtual_router_id 52
priority 101
virtual_ipaddress {
192.169.142.220 dev eth1
}
track_script {
chk_haproxy
}
# Avoid failback
nopreempt
}
vrrp_sync_group VG1
group {
VI_PUBLIC
}

*************************************************************************
The most difficult  procedure is re-syncing Galera Mariadb cluster
*************************************************************************

https://github.com/beekhof/osp-ha-deploy/blob/master/keepalived/galera-bootstrap.md

Due to nova services start not waiting for getting in sync Galera databases
After sync is done , regardless systemctl reports that service are up and running.
Database update by `openstack-service restart nova` is required on every Controller.  Also the most suspicious reason for failure access Nova metadata Server by starting VMs is failure to start neutron-l3-agent service  on each Controller due to classical design – VM’s access metadata via neutron-ns-metadata-proxy running in qrouter namespace. neutron-l3-agents may be started with no problems, some times just restarted when needed.

RUN Time Snapshots. Keepalived status on Controller’s nodes

HA Neutron router belonging tenant demo create via Neutron CLI

***********************************************************************

 At this point hacontroller1 goes down. On hacontroller2 run :-

***********************************************************************

root@hacontroller2 ~(keystone_admin)]# neutron l3-agent-list-hosting-router RouterHA

+————————————–+—————————+—————-+——-+———-+

| id                                   | host                      | admin_state_up | alive | ha_state |

+————————————–+—————————+—————-+——-+———-+

| a03409d2-fbe9-492c-a954-e1bdf7627491 | hacontroller2.example.com | True           | 🙂   | active   |

| 0d6e658a-e796-4cff-962f-06e455fce02f | hacontroller1.example.com | True           | xxx   | active   |

+————————————–+—————————+—————-+——-+——-

***********************************************************************

 At this point hacontroller2 goes down. hacontroller1 goes up :-

***********************************************************************

Nova Services status on all Controllers

Neutron Services status on all Controllers

Compute Node status

******************************************************************************
Cloud VM (L3) at runtime . Accessibility from F23 Virtualization Host,
running HA 3  Nodes Controller and Compute Node VMs (L2)
******************************************************************************

[root@fedora23wks ~]# ping  10.10.10.103

PING 10.10.10.103 (10.10.10.103) 56(84) bytes of data.
64 bytes from 10.10.10.103: icmp_seq=1 ttl=63 time=1.14 ms
64 bytes from 10.10.10.103: icmp_seq=2 ttl=63 time=0.813 ms
64 bytes from 10.10.10.103: icmp_seq=3 ttl=63 time=0.636 ms
64 bytes from 10.10.10.103: icmp_seq=4 ttl=63 time=0.778 ms
64 bytes from 10.10.10.103: icmp_seq=5 ttl=63 time=0.493 ms
^C

— 10.10.10.103 ping statistics —

5 packets transmitted, 5 received, 0% packet loss, time 4001ms

rtt min/avg/max/mdev = 0.493/0.773/1.146/0.218 ms

[root@fedora23wks ~]# ssh -i oskey1.priv fedora@10.10.10.103
Last login: Tue Nov 17 09:02:30 2015
[fedora@vf23dev ~]$ uname -a
Linux vf23dev.novalocal 4.2.5-300.fc23.x86_64 #1 SMP Tue Oct 27 04:29:56 UTC 2015 x86_64 x86_64 x86_64 GNU/Linux

********************************************************************************
Verifying neutron workflow on 3 node controller been built via patch:-
********************************************************************************

[root@hacontroller1 ~(keystone_admin)]# ovs-ofctl show br-eth0
OFPT_FEATURES_REPLY (xid=0x2): dpid:0000baf0db1a854f
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst
1(eth0): addr:52:54:00:aa:0e:fc
config:     0
state:      0
speed: 0 Mbps now, 0 Mbps max
2(phy-br-eth0): addr:46:c0:e0:30:72:92
config:     0
state:      0
speed: 0 Mbps now, 0 Mbps max
LOCAL(br-eth0): addr:ba:f0:db:1a:85:4f
config:     0
state:      0
speed: 0 Mbps now, 0 Mbps max
OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0

[root@hacontroller1 ~(keystone_admin)]# ovs-ofctl dump-flows  br-eth0
NXST_FLOW reply (xid=0x4):
cookie=0x0, duration=15577.057s, table=0, n_packets=50441, n_bytes=3262529, idle_age=2, priority=4,in_port=2,dl_vlan=3 actions=strip_vlan,NORMAL
cookie=0x0, duration=15765.938s, table=0, n_packets=31225, n_bytes=1751795, idle_age=0, priority=2,in_port=2 actions=drop
cookie=0x0, duration=15765.974s, table=0, n_packets=39982, n_bytes=42838752, idle_age=1, priority=0 actions=NORMAL

Check `ovs-vsctl show`

Bridge br-int
fail_mode: secure
Port “tapc8488877-45”
tag: 4
Interface “tapc8488877-45”
type: internal
Port br-int
Interface br-int
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port “tap14aa6eeb-70”
tag: 2
Interface “tap14aa6eeb-70”
type: internal
Port “qr-8f5b3f4a-45”
tag: 2
Interface “qr-8f5b3f4a-45”
type: internal
Port “int-br-eth0”
Interface “int-br-eth0″
type: patch
options: {peer=”phy-br-eth0”}
Port “qg-34893aa0-17”
tag: 3

[root@hacontroller2 ~(keystone_demo)]# ovs-ofctl show  br-eth0
OFPT_FEATURES_REPLY (xid=0x2): dpid:0000b6bfa2bafd45
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst
1(eth0): addr:52:54:00:73:df:29
config:     0
state:      0
speed: 0 Mbps now, 0 Mbps max
2(phy-br-eth0): addr:be:89:61:87:56:20
config:     0
state:      0
speed: 0 Mbps now, 0 Mbps max
LOCAL(br-eth0): addr:b6:bf:a2:ba:fd:45
config:     0
state:      0
speed: 0 Mbps now, 0 Mbps max
OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0

[root@hacontroller2 ~(keystone_demo)]# ovs-ofctl dump-flows  br-eth0
NXST_FLOW reply (xid=0x4):
cookie=0x0, duration=15810.746s, table=0, n_packets=0, n_bytes=0, idle_age=15810, priority=4,in_port=2,dl_vlan=2 actions=strip_vlan,NORMAL
cookie=0x0, duration=16105.662s, table=0, n_packets=31849, n_bytes=1786827, idle_age=0, priority=2,in_port=2 actions=drop
cookie=0x0, duration=16105.696s, table=0, n_packets=39762, n_bytes=2100763, idle_age=0, priority=0 actions=NORMAL

Check `ovs-vsctl show`
Bridge br-int
fail_mode: secure
Port “qg-34893aa0-17”
tag: 2
Interface “qg-34893aa0-17”
type: internal


RDO Liberty Set up for three Nodes (Controller+Network+Compute) ML2&OVS&VXLAN on CentOS 7.1

October 22, 2015

As advertised officially

In addition to the comprehensive OpenStack services, libraries and clients, this release also provides Packstack, a simple installer for proof-of-concept installations, as small as a single all-in-one box and RDO Manager an OpenStack deployment and management tool for production environments based on the OpenStack TripleO project

In posting bellow I intend to test packstack on Liberty to perform classic three node deployment.  If packstack will succeed then post installation  actions  like VRRP or DVR setups might be committed as well. One of the real problems for packstack is HA Controller(s) setup. Here RDO Manager is supposed to get a significant advantage, replacing with comprehensive CLI a lot of manual configuration.

Following bellow is brief instruction  for three node deployment test Controller&&Network&&Compute for RDO Liberty, which was performed on Fedora 22 host with KVM/Libvirt Hypervisor (16 GB RAM, Intel Core i7-4790  Haswell CPU, ASUS Z97-P ) Three VMs (4 GB RAM,4 VCPUS)  have been setup. Controller VM one (management subnet) VNIC, Network Node VM three VNICS (management,vtep’s external subnets), Compute Node VM two VNICS (management,vtep’s subnets)

SELINUX stays in enforcing mode.

I avoid using default libvirt subnet 192.168.122.0/24 for any purposes related with VM serves as RDO Liberty Nodes, by some reason it causes network congestion when forwarding packets to Internet and vice versa.

Three Libvirt networks created

# cat openstackvms.xml
<network>
<name>openstackvms</name>
<uuid>d0e9964a-f91a-40c0-b769-a609aee41bf2</uuid>
<forward mode=’nat’>
<nat>
<port start=’1024′ end=’65535’/>
</nat>
</forward>
<bridge name=’virbr1′ stp=’on’ delay=’0′ />
<mac address=’52:54:00:60:f8:6d’/>
<ip address=’192.169.142.1′ netmask=’255.255.255.0′>
<dhcp>
<range start=’192.169.142.2′ end=’192.169.142.254′ />
</dhcp>
</ip>
</network>

[root@vfedora22wks ~]# cat public.xml
<network>
<name>public</name>
<uuid>d1e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
<forward mode=’nat’>
<nat>
<port start=’1024′ end=’65535’/>
</nat>
</forward>
<bridge name=’virbr2′ stp=’on’ delay=’0′ />
<mac address=’52:54:00:60:f8:6d’/>
<ip address=’172.24.4.225′ netmask=’255.255.255.240′>
<dhcp>
<range start=’172.24.4.226′ end=’172.24.4.238′ />
</dhcp>
</ip>
</network>

[root@vfedora22wks ~]# cat vteps.xml
<network>
<name>vteps</name>
<uuid>d2e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
<forward mode=’nat’>
<nat>
<port start=’1024′ end=’65535’/>
</nat>
</forward>
<bridge name=’virbr3′ stp=’on’ delay=’0′ />
<mac address=’52:54:00:60:f8:6d’/>
<ip address=’10.0.0.1′ netmask=’255.255.255.0′>
<dhcp>
<range start=’10.0.0.1′ end=’10.0.0.254′ />
</dhcp>
</ip>
</network>

# virsh net-list
Name                 State      Autostart     Persistent
————————————————————————–
default               active        yes           yes
openstackvms     active        yes           yes
public                active        yes           yes
vteps                 active         yes          yes

*********************************************************************************
1. First Libvirt subnet “openstackvms”  serves as management network.
All 3 VM are attached to this subnet
**********************************************************************************
2. Second Libvirt subnet “public” serves for simulation external network  Network Node attached to public,latter on “eth2” interface (belongs to “public”) is supposed to be converted into OVS port of br-ex on Network Node. This Libvirt subnet via bridge virbr2 172.24.4.225 provides VMs running on Compute Node access to Internet due to match to external network created by packstack installation 172.24.4.224/28.
***********************************************************************************
3.Third Libvirt subnet “vteps” serves  for VTEPs endpoint simulation. Network and Compute Node VMs are attached to this subnet. ***********************************************************************************

*********************
Answer-file :-
*********************

[root@ip-192-169-142-127 ~(keystone_admin)]# cat answer3Nodet.txt
[general]
CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub
CONFIG_DEFAULT_PASSWORD=
CONFIG_MARIADB_INSTALL=y
CONFIG_GLANCE_INSTALL=y
CONFIG_CINDER_INSTALL=y
CONFIG_NOVA_INSTALL=y
CONFIG_NEUTRON_INSTALL=y
CONFIG_HORIZON_INSTALL=y
CONFIG_SWIFT_INSTALL=y
CONFIG_CEILOMETER_INSTALL=y
CONFIG_HEAT_INSTALL=n
CONFIG_CLIENT_INSTALL=y
CONFIG_NTP_SERVERS=
CONFIG_NAGIOS_INSTALL=y
EXCLUDE_SERVERS=
CONFIG_DEBUG_MODE=n
CONFIG_CONTROLLER_HOST=192.169.142.127
CONFIG_COMPUTE_HOSTS=192.169.142.137
# In case of two Compute nodes
# CONFIG_COMPUTE_HOSTS=192.169.142.137,192.169.142.157
CONFIG_NETWORK_HOSTS=192.169.142.147
CONFIG_VMWARE_BACKEND=n
CONFIG_UNSUPPORTED=n
CONFIG_VCENTER_HOST=
CONFIG_VCENTER_USER=
CONFIG_VCENTER_PASSWORD=
CONFIG_VCENTER_CLUSTER_NAME=
CONFIG_STORAGE_HOST=192.169.142.127
CONFIG_USE_EPEL=y
CONFIG_REPO=
CONFIG_RH_USER=
CONFIG_SATELLITE_URL=
CONFIG_RH_PW=
CONFIG_RH_OPTIONAL=y
CONFIG_RH_PROXY=
CONFIG_RH_PROXY_PORT=
CONFIG_RH_PROXY_USER=
CONFIG_RH_PROXY_PW=
CONFIG_SATELLITE_USER=
CONFIG_SATELLITE_PW=
CONFIG_SATELLITE_AKEY=
CONFIG_SATELLITE_CACERT=
CONFIG_SATELLITE_PROFILE=
CONFIG_SATELLITE_FLAGS=
CONFIG_SATELLITE_PROXY=
CONFIG_SATELLITE_PROXY_USER=
CONFIG_SATELLITE_PROXY_PW=
CONFIG_AMQP_BACKEND=rabbitmq
CONFIG_AMQP_HOST=192.169.142.127
CONFIG_AMQP_ENABLE_SSL=n
CONFIG_AMQP_ENABLE_AUTH=n
CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER
CONFIG_AMQP_SSL_PORT=5671
CONFIG_AMQP_SSL_CERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem
CONFIG_AMQP_SSL_KEY_FILE=/etc/pki/tls/private/amqp_selfkey.pem
CONFIG_AMQP_SSL_SELF_SIGNED=y
CONFIG_AMQP_AUTH_USER=amqp_user
CONFIG_AMQP_AUTH_PASSWORD=PW_PLACEHOLDER
CONFIG_MARIADB_HOST=192.169.142.127
CONFIG_MARIADB_USER=root
CONFIG_MARIADB_PW=7207ae344ed04957
CONFIG_KEYSTONE_DB_PW=abcae16b785245c3
CONFIG_KEYSTONE_REGION=RegionOne
CONFIG_KEYSTONE_ADMIN_TOKEN=3ad2de159f9649afb0c342ba57e637d9
CONFIG_KEYSTONE_ADMIN_PW=7049f834927e4468
CONFIG_KEYSTONE_DEMO_PW=bf737b785cfa4398
CONFIG_KEYSTONE_TOKEN_FORMAT=UUID
CONFIG_KEYSTONE_SERVICE_NAME=httpd
CONFIG_GLANCE_DB_PW=41264fc52ffd4fe8
CONFIG_GLANCE_KS_PW=f6a9398960534797
CONFIG_GLANCE_BACKEND=file
CONFIG_CINDER_DB_PW=5ac08c6d09ba4b69
CONFIG_CINDER_KS_PW=c8cb1ecb8c2b4f6f
CONFIG_CINDER_BACKEND=lvm
CONFIG_CINDER_VOLUMES_CREATE=y
CONFIG_CINDER_VOLUMES_SIZE=10G
CONFIG_CINDER_GLUSTER_MOUNTS=
CONFIG_CINDER_NFS_MOUNTS=
CONFIG_CINDER_NETAPP_LOGIN=
CONFIG_CINDER_NETAPP_PASSWORD=
CONFIG_CINDER_NETAPP_HOSTNAME=
CONFIG_CINDER_NETAPP_SERVER_PORT=80
CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster
CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http
CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs
CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0
CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60
CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=
CONFIG_CINDER_NETAPP_VOLUME_LIST=
CONFIG_CINDER_NETAPP_VFILER=
CONFIG_CINDER_NETAPP_VSERVER=
CONFIG_CINDER_NETAPP_CONTROLLER_IPS=
CONFIG_CINDER_NETAPP_SA_PASSWORD=
CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2
CONFIG_CINDER_NETAPP_STORAGE_POOLS=
CONFIG_NOVA_DB_PW=1e1b5aeeeaf342a8
CONFIG_NOVA_KS_PW=d9583177a2444f06
CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0
CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5
CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp
CONFIG_NOVA_COMPUTE_PRIVIF=eth1
CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager
CONFIG_NOVA_NETWORK_PUBIF=eth0
CONFIG_NOVA_NETWORK_PRIVIF=eth1
CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22
CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22
CONFIG_NOVA_NETWORK_DEFAULTFLOATINGPOOL=nova
CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n
CONFIG_NOVA_NETWORK_VLAN_START=100
CONFIG_NOVA_NETWORK_NUMBER=1
CONFIG_NOVA_NETWORK_SIZE=255
CONFIG_NEUTRON_KS_PW=808e36e154bd4cee
CONFIG_NEUTRON_DB_PW=0e2b927a21b44737
CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex
CONFIG_NEUTRON_L2_PLUGIN=ml2
CONFIG_NEUTRON_METADATA_PW=a965cd23ed2f4502
CONFIG_LBAAS_INSTALL=n
CONFIG_NEUTRON_METERING_AGENT_INSTALL=n
CONFIG_NEUTRON_FWAAS=n
CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vxlan
CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vxlan
CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch CONFIG_NEUTRON_ML2_FLAT_NETWORKS=*
CONFIG_NEUTRON_ML2_VLAN_RANGES=
CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES=1001:2000
CONFIG_NEUTRON_ML2_VXLAN_GROUP=239.1.1.2
CONFIG_NEUTRON_ML2_VNI_RANGES=1001:2000
CONFIG_NEUTRON_L2_AGENT=openvswitch
CONFIG_NEUTRON_LB_TENANT_NETWORK_TYPE=local
CONFIG_NEUTRON_LB_VLAN_RANGES=
CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS=
CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE=vxlan
CONFIG_NEUTRON_OVS_VLAN_RANGES=
CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-ex
CONFIG_NEUTRON_OVS_BRIDGE_IFACES=
CONFIG_NEUTRON_OVS_TUNNEL_RANGES=1001:2000
# This is VXLAN tunnel endpoint interface
# It should be assigned IP from vteps network
# before running packstack
CONFIG_NEUTRON_OVS_TUNNEL_IF=eth1
CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789
CONFIG_HORIZON_SSL=n
CONFIG_SSL_CERT=
CONFIG_SSL_KEY=
CONFIG_SSL_CACHAIN=
CONFIG_SWIFT_KS_PW=8f75bfd461234c30
CONFIG_SWIFT_STORAGES=
CONFIG_SWIFT_STORAGE_ZONES=1
CONFIG_SWIFT_STORAGE_REPLICAS=1
CONFIG_SWIFT_STORAGE_FSTYPE=ext4
CONFIG_SWIFT_HASH=a60aacbedde7429a
CONFIG_SWIFT_STORAGE_SIZE=2G
CONFIG_PROVISION_DEMO=y
CONFIG_PROVISION_TEMPEST=n
CONFIG_PROVISION_TEMPEST_USER=
CONFIG_PROVISION_TEMPEST_USER_PW=44faa4ebc3da4459
CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28
CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git
CONFIG_PROVISION_TEMPEST_REPO_REVISION=master
CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n
CONFIG_HEAT_DB_PW=PW_PLACEHOLDER
CONFIG_HEAT_AUTH_ENC_KEY=fc3fb7fee61e46b0
CONFIG_HEAT_KS_PW=PW_PLACEHOLDER
CONFIG_HEAT_CLOUDWATCH_INSTALL=n
CONFIG_HEAT_USING_TRUSTS=y
CONFIG_HEAT_CFN_INSTALL=n
CONFIG_HEAT_DOMAIN=heat
CONFIG_HEAT_DOMAIN_ADMIN=heat_admin
CONFIG_HEAT_DOMAIN_PASSWORD=PW_PLACEHOLDER
CONFIG_CEILOMETER_SECRET=19ae0e7430174349
CONFIG_CEILOMETER_KS_PW=337b08d4b3a44753
CONFIG_MONGODB_HOST=192.169.142.127
CONFIG_NAGIOS_PW=02f168ee8edd44e4
**************************************
At this point run on Controller:-
**************************************
Keep SELINUX=enforcing ( RDO Liberty is supposed to handle this)
# yum -y  install centos-release-openstack-liberty
# yum -y  install openstack-packstack
# packstack –answer-file=./answer3Node.txt
**********************************************************************************
Up on packstack completion on Network Node create following files ,
designed to  match created by installer external network
**********************************************************************************

[root@ip-192-169-142-147 network-scripts]# cat ifcfg-br-ex
DEVICE=”br-ex”
BOOTPROTO=”static”
IPADDR=”172.24.4.232″
NETMASK=”255.255.255.240″
DNS1=”83.221.202.254″
BROADCAST=”172.24.4.239″
GATEWAY=”172.24.4.225″
NM_CONTROLLED=”no”
TYPE=”OVSIntPort”
OVS_BRIDGE=br-ex
DEVICETYPE=”ovs”
DEFROUTE=”yes”
IPV4_FAILURE_FATAL=”yes”
IPV6INIT=no

[root@ip-192-169-142-147 network-scripts]# cat ifcfg-eth2
DEVICE=”eth2″
# HWADDR=00:22:15:63:E4:E2
ONBOOT=”yes”
TYPE=”OVSPort”
DEVICETYPE=”ovs”
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no

*************************************************
Next step to performed on Network Node :-
*************************************************

# chkconfig network on
# systemctl stop NetworkManager
# systemctl disable NetworkManager
# service network restart

OVS PORT should be eth2 (third Ethernet interface on Network Node)
  Libvirt bridge VIRBR2 in real deployment is a your router to External
  network. OVS BRIDGE br-ex should have IP belongs to External network 

*******************
On Controller :-
*******************

[root@ip-192-169-142-127 ~(keystone_admin)]# netstat -lntp |  grep 35357
tcp6       0      0 :::35357                :::*                    LISTEN      7047/httpd

[root@ip-192-169-142-127 ~(keystone_admin)]# ps -ef | grep 7047
root      7047     1  0 11:22 ?        00:00:00 /usr/sbin/httpd -DFOREGROUND
keystone  7089  7047  0 11:22 ?        00:00:07 keystone-admin  -DFOREGROUND
keystone  7090  7047  0 11:22 ?        00:00:02 keystone-main   -DFOREGROUND
apache    7092  7047  0 11:22 ?        00:00:04 /usr/sbin/httpd -DFOREGROUND
apache    7093  7047  0 11:22 ?        00:00:04 /usr/sbin/httpd -DFOREGROUND
apache    7094  7047  0 11:22 ?        00:00:03 /usr/sbin/httpd -DFOREGROUND
apache    7095  7047  0 11:22 ?        00:00:00 /usr/sbin/httpd -DFOREGROUND
apache    7096  7047  0 11:22 ?        00:00:00 /usr/sbin/httpd -DFOREGROUND
apache    7097  7047  0 11:22 ?        00:00:00 /usr/sbin/httpd -DFOREGROUND
apache    7098  7047  0 11:22 ?        00:00:00 /usr/sbin/httpd -DFOREGROUND
apache    7099  7047  0 11:22 ?        00:00:00 /usr/sbin/httpd -DFOREGROUND
apache    7100  7047  0 11:22 ?        00:00:00 /usr/sbin/httpd -DFOREGROUND
apache    7101  7047  0 11:22 ?        00:00:00 /usr/sbin/httpd -DFOREGROUND
apache    7102  7047  0 11:22 ?        00:00:00 /usr/sbin/httpd -DFOREGROUND
root     28963 17739  0 12:51 pts/1    00:00:00 grep –color=auto 7047

********************
On Network Node
********************

[root@ip-192-169-142-147 ~(keystone_admin)]# neutron agent-list
+————————————–+——————–+—————————————-+——-+—————-+—————————+
| id                                   | agent_type         | host                                   | alive | admin_state_up | binary                    |
+————————————–+——————–+—————————————-+——-+—————-+—————————+
| 217fb0f5-8dd1-4361-aae7-cc9a7d18d6e4 | Open vSwitch agent | ip-192-169-142-147.ip.secureserver.net | 🙂   | True           | neutron-openvswitch-agent |
| 5dabfc17-db64-470c-9f01-8d2297d155f3 | L3 agent           | ip-192-169-142-147.ip.secureserver.net | 🙂   | True           | neutron-l3-agent          |
| 5e3c6e2f-3f6d-4ede-b058-bc1b317d4ee1 | Open vSwitch agent | ip-192-169-142-137.ip.secureserver.net | 🙂   | True           | neutron-openvswitch-agent |
| f0f02931-e7e6-4b01-8b87-46224cb71e6d | DHCP agent         | ip-192-169-142-147.ip.secureserver.net | 🙂   | True           | neutron-dhcp-agent        |
| f16a5d9d-55e6-47c3-b509-ca445d05d34d | Metadata agent     | ip-192-169-142-147.ip.secureserver.net | 🙂   | True           | neutron-metadata-agent    |
+————————————–+——————–+—————————————-+——-+—————-+—————————+

[root@ip-192-169-142-147 ~(keystone_admin)]# ovs-vsctl show
9221d1c1-008a-464a-ac26-1e0340407714
Bridge br-tun
fail_mode: secure
Port br-tun
Interface br-tun
type: internal
Port “vxlan-0a000089”
Interface “vxlan-0a000089″
type: vxlan
options: {df_default=”true”, in_key=flow, local_ip=”10.0.0.147″, out_key=flow, remote_ip=”10.0.0.137″}
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Bridge br-ex
Port phy-br-ex
Interface phy-br-ex
type: patch
options: {peer=int-br-ex}
Port “eth2”
Interface “eth2”
Port “qg-1deeaf96-e8”
Interface “qg-1deeaf96-e8”
type: internal
Port br-ex
Interface br-ex
type: internal
Bridge br-int
fail_mode: secure
Port “qr-1909e3bb-fd”
tag: 2
Interface “qr-1909e3bb-fd”
type: internal
Port “tapfdf24cad-f8”
tag: 2
Interface “tapfdf24cad-f8”
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
Port int-br-ex
Interface int-br-ex
type: patch
options: {peer=phy-br-ex}
ovs_version: “2.4.0”

[root@ip-192-169-142-147 ~(keystone_admin)]# dmesg | grep promisc
[    2.233302] device ovs-system entered promiscuous mode
[    2.273206] device br-int entered promiscuous mode
[    2.274981] device qr-838ad1f3-7d entered promiscuous mode
[    2.276333] device tap0f21eab4-db entered promiscuous mode
[    2.312740] device br-tun entered promiscuous mode
[    2.314509] device qg-2b712b60-d0 entered promiscuous mode
[    2.315921] device br-ex entered promiscuous mode
[    2.316022] device eth2 entered promiscuous mode
[   10.704329] device qr-838ad1f3-7d left promiscuous mode
[   10.729045] device tap0f21eab4-db left promiscuous mode
[   10.761844] device qg-2b712b60-d0 left promiscuous mode
[  224.746399] device eth2 left promiscuous mode
[  232.173791] device eth2 entered promiscuous mode
[  232.978909] device tap0f21eab4-db entered promiscuous mode
[  233.690854] device qr-838ad1f3-7d entered promiscuous mode
[  233.895213] device qg-2b712b60-d0 entered promiscuous mode
[ 1253.611501] device qr-838ad1f3-7d left promiscuous mode
[ 1254.017129] device qg-2b712b60-d0 left promiscuous mode
[ 1404.697825] device tapfdf24cad-f8 entered promiscuous mode
[ 1421.812107] device qr-1909e3bb-fd entered promiscuous mode
[ 1422.045593] device qg-1deeaf96-e8 entered promiscuous mode
[ 6111.042488] device tap0f21eab4-db left promiscuous mode

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-dd26c4ed-f757-416d-a772-64b503ffc497 ip route
default via 172.24.4.225 dev qg-1deeaf96-e8
50.0.0.0/24 dev qr-1909e3bb-fd  proto kernel  scope link  src 50.0.0.1
172.24.4.224/28 dev qg-1deeaf96-e8  proto kernel  scope link  src 172.24.4.227 

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-dd26c4ed-f757-416d-a772-64b503ffc497 ifconfig
lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
inet 127.0.0.1  netmask 255.0.0.0
inet6 ::1  prefixlen 128  scopeid 0x10<host>
loop  txqueuelen 0  (Local Loopback)
RX packets 0  bytes 0 (0.0 B)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 0  bytes 0 (0.0 B)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qg-1deeaf96-e8: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
inet 172.24.4.227  netmask 255.255.255.240  broadcast 172.24.4.239
inet6 fe80::f816:3eff:fe93:12de  prefixlen 64  scopeid 0x20<link>
ether fa:16:3e:93:12:de  txqueuelen 0  (Ethernet)
RX packets 864432  bytes 1185656986 (1.1 GiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 382639  bytes 29347929 (27.9 MiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qr-1909e3bb-fd: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
inet 50.0.0.1  netmask 255.255.255.0  broadcast 50.0.0.255
inet6 fe80::f816:3eff:feae:d1e0  prefixlen 64  scopeid 0x20<link>
ether fa:16:3e:ae:d1:e0  txqueuelen 0  (Ethernet)
RX packets 382969  bytes 29386380 (28.0 MiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 864601  bytes 1185686714 (1.1 GiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qdhcp-153edd99-9152-49ad-a445-7280aa9df187 ip route
default via 50.0.0.1 dev tapfdf24cad-f8
50.0.0.0/24 dev tapfdf24cad-f8  proto kernel  scope link  src 50.0.0.10 

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qdhcp-153edd99-9152-49ad-a445-7280aa9df187 ifconfig
lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
inet 127.0.0.1  netmask 255.0.0.0
inet6 ::1  prefixlen 128  scopeid 0x10<host>
loop  txqueuelen 0  (Local Loopback)
RX packets 0  bytes 0 (0.0 B)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 0  bytes 0 (0.0 B)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

tapfdf24cad-f8: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
inet 50.0.0.10  netmask 255.255.255.0  broadcast 50.0.0.255
inet6 fe80::f816:3eff:fe98:c66  prefixlen 64  scopeid 0x20<link>
ether fa:16:3e:98:0c:66  txqueuelen 0  (Ethernet)
RX packets 63  bytes 6445 (6.2 KiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 14  bytes 2508 (2.4 KiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-dd26c4ed-f757-416d-a772-64b503ffc497 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever

16: qr-1909e3bb-fd: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN
link/ether fa:16:3e:ae:d1:e0 brd ff:ff:ff:ff:ff:ff
inet 50.0.0.1/24 brd 50.0.0.255 scope global qr-1909e3bb-fd
valid_lft forever preferred_lft forever
inet6 fe80::f816:3eff:feae:d1e0/64 scope link
valid_lft forever preferred_lft forever

17: qg-1deeaf96-e8: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN
link/ether fa:16:3e:93:12:de brd ff:ff:ff:ff:ff:ff
inet 172.24.4.227/28 brd 172.24.4.239 scope global qg-1deeaf96-e8
valid_lft forever preferred_lft forever
inet 172.24.4.229/32 brd 172.24.4.229 scope global qg-1deeaf96-e8
valid_lft forever preferred_lft forever
inet 172.24.4.230/32 brd 172.24.4.230 scope global qg-1deeaf96-e8
valid_lft forever preferred_lft forever
inet6 fe80::f816:3eff:fe93:12de/64 scope link
valid_lft forever preferred_lft forever