Sample of /etc/openstack-dashboard/local_settings

March 14, 2014

[root@dfw02 ~(keystone_admin)]$ cat  /etc/openstack-dashboard/local_settings | grep -v ^# | grep -v ^$
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = [‘192.168.1.127’, ‘localhost’]
SESSION_ENGINE = “django.contrib.sessions.backends.cached_db”
DATABASES = {
‘default’: {
‘ENGINE’: ‘django.db.backends.mysql’,
‘NAME’: ‘dash’,
‘USER’: ‘dash’,
‘PASSWORD’: ‘fedora’,
‘HOST’: ‘192.168.1.127’,
‘default-character-set’: ‘utf8’
}
}

HORIZON_CONFIG = {
‘dashboards’: (‘project’, ‘admin’, ‘settings’,),
‘default_dashboard’: ‘project’,
‘user_home’: ‘openstack_dashboard.views.get_user_home’,
‘ajax_queue_limit’: 10,
‘auto_fade_alerts’: {
‘delay’: 3000,
‘fade_duration’: 1500,
‘types’: [‘alert-success’, ‘alert-info’]
},

‘help_url’: “http://docs.openstack.org”,

‘exceptions’: {‘recoverable’: exceptions.RECOVERABLE,
‘not_found’: exceptions.NOT_FOUND,
‘unauthorized’: exceptions.UNAUTHORIZED},
}

from horizon.utils import secret_key

LOCAL_PATH = ‘/var/lib/openstack-dashboard’
SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, ‘.secret_key_store’))

CACHES = {
‘default’: {
‘BACKEND’ : ‘django.core.cache.backends.memcached.MemcachedCache’,
‘LOCATION’ : ‘127.0.0.1:11211’,
}
}

EMAIL_BACKEND = ‘django.core.mail.backends.console.EmailBackend’

OPENSTACK_HOST = “192.168.1.127”

OPENSTACK_KEYSTONE_URL = “http://%s:5000/v2.0” % OPENSTACK_HOST

OPENSTACK_KEYSTONE_DEFAULT_ROLE = “Member”
OPENSTACK_KEYSTONE_BACKEND = {
‘name’: ‘native’,
‘can_edit_user’: True,
‘can_edit_group’: True,
‘can_edit_project’: True,
‘can_edit_domain’: True,
‘can_edit_role’: True
}

OPENSTACK_HYPERVISOR_FEATURES = {
‘can_set_mount_point’: False,
# NOTE: as of Grizzly this is not yet supported in Nova so enabling this
# setting will not do anything useful
‘can_encrypt_volumes’: False
}

OPENSTACK_NEUTRON_NETWORK = {
‘enable_lb’: False,
‘enable_firewall’: False,
‘enable_quotas’: True,
‘enable_vpn’: False,
# The profile_support option is used to detect if an external router can be
# configured via the dashboard. When using specific plugins the
# profile_support can be turned on if needed.
‘profile_support’: None,
#’profile_support’: ‘cisco’,
}
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
TIME_ZONE = “UTC”
POLICY_FILES_PATH = ‘/etc/openstack-dashboard’
POLICY_FILES = {
‘identity’: ‘keystone_policy.json’,
‘compute’: ‘nova_policy.json’
}

LOGGING = {
‘version’: 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
‘disable_existing_loggers’: False,

‘handlers’: {
‘null’: {
‘level’: ‘DEBUG’,
‘class’: ‘django.utils.log.NullHandler’,
},

‘console’: {
# Set the level to “DEBUG” for verbose output logging.
‘level’: ‘INFO’,
‘class’: ‘logging.StreamHandler’,
},

‘loggers’: {

# Logging from django.db.backends is VERY verbose, send to null
# by default.
‘django.db.backends’: {
‘handlers’: [‘null’],
‘propagate’: False,
},

‘requests’: {
‘handlers’: [‘null’],
‘propagate’: False,
},

‘horizon’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘openstack_dashboard’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘novaclient’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘cinderclient’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘keystoneclient’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘glanceclient’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘neutronclient’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘heatclient’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘ceilometerclient’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘troveclient’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘swiftclient’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘openstack_auth’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘nose.plugins.manager’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘django’: {
‘handlers’: [‘console’],
‘level’: ‘DEBUG’,
‘propagate’: False,
},

‘iso8601’: {
‘handlers’: [‘null’],
‘propagate’: False,
},

}

}

SECURITY_GROUP_RULES = {
‘all_tcp’: {
‘name’: ‘ALL TCP’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘1’,
‘to_port’: ‘65535’,
},

‘all_udp’: {
‘name’: ‘ALL UDP’,
‘ip_protocol’: ‘udp’,
‘from_port’: ‘1’,
‘to_port’: ‘65535’,
},

‘all_icmp’: {
‘name’: ‘ALL ICMP’,
‘ip_protocol’: ‘icmp’,
‘from_port’: ‘-1’,
‘to_port’: ‘-1’,
},

‘ssh’: {
‘name’: ‘SSH’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ’22’,
‘to_port’: ’22’,
},

‘smtp’: {
‘name’: ‘SMTP’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ’25’,
‘to_port’: ’25’,
},

‘dns’: {
‘name’: ‘DNS’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ’53’,
‘to_port’: ’53’,
},

‘http’: {
‘name’: ‘HTTP’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ’80’,
‘to_port’: ’80’,
},

‘pop3’: {
‘name’: ‘POP3’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘110’,
‘to_port’: ‘110’,
},

‘imap’: {
‘name’: ‘IMAP’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘143’,
‘to_port’: ‘143’,
},

‘ldap’: {
‘name’: ‘LDAP’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘389’,
‘to_port’: ‘389’,
},

‘https’: {
‘name’: ‘HTTPS’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘443’,
‘to_port’: ‘443’,
},

‘smtps’: {
‘name’: ‘SMTPS’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘465’,
‘to_port’: ‘465’,
},

‘imaps’: {
‘name’: ‘IMAPS’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘993’,
‘to_port’: ‘993’,
},

‘pop3s’: {
‘name’: ‘POP3S’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘995’,
‘to_port’: ‘995’,
},

‘ms_sql’: {
‘name’: ‘MS SQL’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘1433’,
‘to_port’: ‘1433’,
},

‘mysql’: {
‘name’: ‘MYSQL’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘3306’,
‘to_port’: ‘3306’,
},

‘rdp’: {
‘name’: ‘RDP’,
‘ip_protocol’: ‘tcp’,
‘from_port’: ‘3389’,
‘to_port’: ‘3389’,
},

}


Setup Dashboard&VNC console on Two Node Controller&Compute Neutron GRE+OVS+Gluster Fedora 20 Cluster

March 13, 2014

This post follows up  Gluster 3.4.2 on Two Node Controller&Compute Neutron GRE+OVS Fedora 20 Cluster in particular,  it could be performed after Basic Setup  to make system management more comfortable the only CLI.

It’s also easy to create instance via  Dashboard :

  Placing in post creating panel customization script ( analog –user-data)

#cloud-config
password: mysecret
chpasswd: { expire: False }
ssh_pwauth: True

To be able log in as “fedora” and set MTU=1457  inside VM (GRE tunneling)

   Key-pair submitted upon creation works like this :

[root@dfw02 Downloads(keystone_boris)]$ ssh -l fedora -i key2.pem  192.168.1.109
Last login: Sat Mar 15 07:47:45 2014

[fedora@vf20rs015 ~]$ uname -a
Linux vf20rs015.novalocal 3.13.6-200.fc20.x86_64 #1 SMP Fri Mar 7 17:02:28 UTC 2014 x86_64 x86_64 x86_64 GNU/Linux

[fedora@vf20rs015 ~]$ ifconfig
eth0: flags=4163  mtu 1457
inet 40.0.0.7  netmask 255.255.255.0  broadcast 40.0.0.255
inet6 fe80::f816:3eff:fe1e:1de6  prefixlen 64  scopeid 0x20
ether fa:16:3e:1e:1d:e6  txqueuelen 1000  (Ethernet)
RX packets 225  bytes 25426 (24.8 KiB)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 221  bytes 23674 (23.1 KiB)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73  mtu 65536
inet 127.0.0.1  netmask 255.0.0.0
inet6 ::1  prefixlen 128  scopeid 0x10
loop  txqueuelen 0  (Local Loopback)
RX packets 0  bytes 0 (0.0 B)
RX errors 0  dropped 0  overruns 0  frame 0
TX packets 0  bytes 0 (0.0 B)
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

Setup described at link mentioned above was originally suggested by Kashyap Chamarthy  for VMs running on non-default Libvirt’s subnet . From my side came attempt to reproduce this setup on physical F20 boxes and arbitrary network , not connected to Libvirt, preventive updates for mysql.user table ,which allowed remote connections for  nova-compute and neutron-openvswitch-agent  from Compute to Controller,   changes to /etc/sysconfig/iptables to enable  Gluster 3.4.2 setup on F20  systems ( view http://bderzhavets.blogspot.com/2014/03/setup-gluster-342-on-two-node-neutron.html ) . I have also fixed typo in dhcp_agent.ini :- the reference for “dnsmasq.conf” and added to dnsmasq.conf line “dhcp-option=26,1454”. Updated configuration files are critical for launching instance without “Customization script” and allow to work with usual ssh keypair.  Actually , when updates are done instance gets created with MTU 1454. View  [2]. This setup is pretty much focused on ability to transfer neutron metadata from Controller to Compute F20 nodes and is done manually with no answer-files. It stops exactly at the point when `nova boot ..`  loads instance on Compute, which obtains internal IP via DHCP running on Controller and may be assigned floating IP to be able communicate with Internet. No attempts to setup dashboard has been done due to core target was neutron GRE+OVS functionality (just a proof of concept).

Setup

– Controller node: Nova, Keystone, Cinder, Glance, Neutron (using Open vSwitch plugin and GRE tunneling ), Dashboard

 – Compute node: Nova (nova-compute), Neutron (openvswitch-agent)

dwf02.localdomain   -  Controller (192.168.1.127) 
dwf01.localdomain   -  Compute   (192.168.1.137)

1. First step follows  http://docs.openstack.org/havana/install-guide/install/yum/content/install_dashboard.html   and  http://docs.openstack.org/havana/install-guide/install/yum/content/dashboard-session-database.html Sequence of actions per manuals above :-

# yum install memcached python-memcached mod_wsgi openstack-dashboard

Modify the value of CACHES[‘default’][‘LOCATION’] in /etc/openstack-dashboard/local_settings to match the ones set in /etc/sysconfig/memcached. Open /etc/openstack-dashboard/local_settings and look for this line:

CACHES =

{ ‘default’:

{ ‘BACKEND’ : ‘django.core.cache.backends.memcached.MemcachedCache’,

‘LOCATION’ : ‘127.0.0.1:11211’ }

}

Update the ALLOWED_HOSTS in local_settings.py to include the addresses you wish to access the dashboard from. Edit /etc/openstack-dashboard/local_settings:

ALLOWED_HOSTS = [‘Controller-IP’, ‘my-desktop’]

This guide assumes that you are running the Dashboard on the controller node. You can easily run the dashboard on a separate server, by changing the appropriate settings in local_settings.py. Edit /etc/openstack-dashboard/local_settings and change OPENSTACK_HOST to the hostname of your Identity Service:

OPENSTACK_HOST = “Controller-IP”

Start the Apache web server and memcached: # service httpd restart

# systemctl start memcached

# systemctl enable memcached

To configure the MySQL database, create the dash database:

mysql> CREATE DATABASE dash; Create a MySQL user for the newly-created dash database that has full control of the database. Replace DASH_DBPASS with a password for the new user:

mysql> GRANT ALL ON dash.* TO ‘dash’@’%’ IDENTIFIED BY ‘fedora’;

mysql> GRANT ALL ON dash.* TO ‘dash’@’localhost’ IDENTIFIED BY ‘fedora’;

In the local_settings file /etc/openstack-dashboard/local_settings

SESSION_ENGINE = ‘django.contrib.sessions.backends.db’

DATABASES =

{ ‘default’:

{ # Database configuration here

‘ENGINE’: ‘django.db.backends.mysql’,

‘NAME’: ‘dash’,

‘USER’: ‘dash’, ‘PASSWORD’:

‘fedora’, ‘HOST’: ‘Controller-IP’,

‘default-character-set’: ‘utf8’ }

}

After configuring the local_settings as shown, you can run the manage.py syncdb command to populate this newly-created database.

# /usr/share/openstack-dashboard/manage.py syncdb

Attempting to run syncdb you  might get an error like ‘dash’@’yourhost’ is not authorized to do it with password ‘YES’.  Then ( for instance in my case)

# mysql -u root -p

MariaDB [(none)]> SELECT User, Host, Password FROM mysql.user;

MariaDB [(none)]>  insert into mysql.user(User,Host,Password) values (‘dash’,’dallas1.localdomain’,’ ‘);

Query OK, 1 row affected, 4 warnings (0.00 sec)

MariaDB [(none)]> UPDATE mysql.user SET Password = PASSWORD(‘fedora’)

> WHERE User = ‘dash’ ;

Query OK, 1 row affected (0.00 sec) Rows matched: 3  Changed: 1  Warnings: 0

MariaDB [(none)]>  SELECT User, Host, Password FROM mysql.user;

.   .  .  .

| dash     | %                   | *C9E492EC67084E4255B200FD34BDF396E3CE1A36 |

| dash     | localhost       | *C9E492EC67084E4255B200FD34BDF396E3CE1A36 |

| dash     | dallas1.localdomain | *C9E492EC67084E4255B200FD34BDF396E3CE1A36 | +———-+———————+——————————————-+

20 rows in set (0.00 sec)

That is exactly the same issue which comes up when starting openstack-nova-scheduler & openstcak-nova-conductor  services during basic installation of Controller on Fedora 20. View Basic setup in particular :-

Set table mysql.user in proper status

shell> mysql -u root -p
mysql> insert into mysql.user (User,Host,Password) values ('nova','dfw02.localdomain',' ');
mysql> UPDATE mysql.user SET Password = PASSWORD('nova')
    ->    WHERE User = 'nova';
mysql> FLUSH PRIVILEGES;

Start, enable nova-{api,scheduler,conductor} services

  $ for i in start enable status; \
    do systemctl $i openstack-nova-api; done

  $ for i in start enable status; \
    do systemctl $i openstack-nova-scheduler; done

  $ for i in start enable status; \
    do systemctl $i openstack-nova-conductor; done

 # service httpd restart

Finally on Controller (dfw02  – 192.168.1.127)  file /etc/openstack-dashboard/local_settings  looks like https://bderzhavets.wordpress.com/2014/03/14/sample-of-etcopenstack-dashboardlocal_settings/

At this point dashboard is functional, but instances sessions outputs are unavailable via dashboard.  I didn’t get any error code, just

Instance Detail: VF20RS03

OverviewLogConsole

Loading…

2. Second step skipped in mentioned manual , however known by experienced persons https://ask.openstack.org/en/question/520/vnc-console-in-dashboard-fails-to-connect-ot-server-code-1006/

**************************************

Controller  dfw02 – 192.168.1.127

**************************************

# ssh-keygen (Hit Enter to accept all of the defaults)

# ssh-copy-id -i ~/.ssh/id_rsa.pub root@dfw01

[root@dfw02 ~(keystone_boris)]$ ssh -L 5900:127.0.0.1:5900 -N -f -l root 192.168.1.137
[root@dfw02 ~(keystone_boris)]$ ssh -L 5901:127.0.0.1:5901 -N -f -l root 192.168.1.137
[root@dfw02 ~(keystone_boris)]$ ssh -L 5902:127.0.0.1:5902 -N -f -l root 192.168.1.137
[root@dfw02 ~(keystone_boris)]$ ssh -L 5903:127.0.0.1:5903 -N -f -l root 192.168.1.137
[root@dfw02 ~(keystone_boris)]$ ssh -L 5904:127.0.0.1:5904 -N -f -l root 192.168.1.137

Compute’s  IP is 192.168.1.137

Update /etc/nova/nova.conf:

novncproxy_host=0.0.0.0

novncproxy_port=6080

novncproxy_base_url=http://192.168.1.127:6080/vnc_auto.html

[root@dfw02 ~(keystone_admin)]$ systemctl enable openstack-nova-consoleauth.service
ln -s ‘/usr/lib/systemd/system/openstack-nova-consoleauth.service’ ‘/etc/systemd/system/multi-user.target.wants/openstack-nova-consoleauth.service’
[root@dfw02 ~(keystone_admin)]$ systemctl enable openstack-nova-novncproxy.service
ln -s ‘/usr/lib/systemd/system/openstack-nova-novncproxy.service’ ‘/etc/systemd/system/multi-user.target.wants/openstack-nova-novncproxy.service’

[root@dfw02 ~(keystone_admin)]$ systemctl start openstack-nova-consoleauth.service
[root@dfw02 ~(keystone_admin)]$ systemctl start openstack-nova-novncproxy.service

[root@dfw02 ~(keystone_admin)]$ systemctl status openstack-nova-consoleauth.service

openstack-nova-consoleauth.service – OpenStack Nova VNC console auth Server

Loaded: loaded (/usr/lib/systemd/system/openstack-nova-consoleauth.service; enabled)

Active: active (running) since Thu 2014-03-13 19:14:45 MSK; 20min ago

Main PID: 14679 (nova-consoleaut)

CGroup: /system.slice/openstack-nova-consoleauth.service

└─14679 /usr/bin/python /usr/bin/nova-consoleauth –logfile /var/log/nova/consoleauth.log

Mar 13 19:14:45 dfw02.localdomain systemd[1]: Started OpenStack Nova VNC console auth Server.

[root@dfw02 ~(keystone_admin)]$ systemctl status openstack-nova-novncproxy.service

openstack-nova-novncproxy.service – OpenStack Nova NoVNC Proxy Server

Loaded: loaded (/usr/lib/systemd/system/openstack-nova-novncproxy.service; enabled)

Active: active (running) since Thu 2014-03-13 19:14:58 MSK; 20min ago

Main PID: 14762 (nova-novncproxy)

CGroup: /system.slice/openstack-nova-novncproxy.service

├─14762 /usr/bin/python /usr/bin/nova-novncproxy –web /usr/share/novnc/

└─17166 /usr/bin/python /usr/bin/nova-novncproxy –web /usr/share/novnc/

Mar 13 19:23:54 dfw02.localdomain nova-novncproxy[14762]: 20: 127.0.0.1: Path: ‘/websockify’

Mar 13 19:23:54 dfw02.localdomain nova-novncproxy[14762]: 20: connecting to: 127.0.0.1:5900

Mar 13 19:23:55 dfw02.localdomain nova-novncproxy[14762]: 19: 127.0.0.1: ignoring empty handshake

Mar 13 19:24:31 dfw02.localdomain nova-novncproxy[14762]: 22: 127.0.0.1: ignoring socket not ready

Mar 13 19:24:32 dfw02.localdomain nova-novncproxy[14762]: 23: 127.0.0.1: Plain non-SSL (ws://) WebSocket connection

Mar 13 19:24:32 dfw02.localdomain nova-novncproxy[14762]: 23: 127.0.0.1: Version hybi-13, base64: ‘True’

Mar 13 19:24:32 dfw02.localdomain nova-novncproxy[14762]: 23: 127.0.0.1: Path: ‘/websockify’

Mar 13 19:24:32 dfw02.localdomain nova-novncproxy[14762]: 23: connecting to: 127.0.0.1:5901

Mar 13 19:24:37 dfw02.localdomain nova-novncproxy[14762]: 26: 127.0.0.1: ignoring empty handshake

Mar 13 19:24:37 dfw02.localdomain nova-novncproxy[14762]: 25: 127.0.0.1: ignoring empty handshake

Hint: Some lines were ellipsized, use -l to show in full.

[root@dfw02 ~(keystone_admin)]$ netstat -lntp | grep 6080

tcp        0      0 0.0.0.0:6080            0.0.0.0:*               LISTEN      14762/python

*********************************

Compute  dfw01 – 192.168.1.137

*********************************

Update  /etc/nova/nova.conf:

vnc_enabled=True

novncproxy_base_url=http://192.168.1.127:6080/vnc_auto.html

vncserver_listen=0.0.0.0

vncserver_proxyclient_address=192.168.1.137

# systemctl restart openstack-nova-compute

Finally :-

[root@dfw02 ~(keystone_admin)]$ systemctl list-units | grep nova

openstack-nova-api.service                      loaded active running   OpenStack Nova API Server
openstack-nova-conductor.service           loaded active running   OpenStack Nova Conductor Server
openstack-nova-consoleauth.service       loaded active running   OpenStack Nova VNC console auth Server
openstack-nova-novncproxy.service         loaded active running   OpenStack Nova NoVNC Proxy Server
openstack-nova-scheduler.service            loaded active running   OpenStack Nova Scheduler Server

[root@dfw02 ~(keystone_admin)]$ nova-manage service list

Binary           Host                                 Zone             Status     State Updated_At

nova-scheduler   dfw02.localdomain                    internal         enabled    🙂   2014-03-13 16:56:54

nova-conductor   dfw02.localdomain                    internal         enabled    🙂   2014-03-13 16:56:54

nova-compute     dfw01.localdomain                     nova             enabled    🙂   2014-03-13 16:56:45

nova-consoleauth dfw02.localdomain                   internal         enabled    🙂   2014-03-13 16:56:47

[root@dfw02 ~(keystone_admin)]$ neutron agent-list

+————————————–+——————–+——————-+——-+—————-+

| id                                   | agent_type         | host              | alive | admin_state_up |

+————————————–+——————–+——————-+——-+—————-+

| 037b985d-7a7d-455b-8536-76eed40b0722 | L3 agent           | dfw02.localdomain | 🙂   | True           |

| 22438ee9-b4ea-4316-9492-eb295288f61a | Open vSwitch agent | dfw02.localdomain | 🙂   | True           |

| 76ed02e2-978f-40d0-879e-1a2c6d1f7915 | DHCP agent         | dfw02.localdomain | 🙂   | True           |

| 951632a3-9744-4ff4-a835-c9f53957c617 | Open vSwitch agent | dfw01.localdomain | 🙂   | True           |

+————————————–+——————–+——————-+——-+—————-+

Users console views :-

    Admin Console views :-

[root@dallas2 ~]# service openstack-nova-compute status -l
Redirecting to /bin/systemctl status  -l openstack-nova-compute.service
openstack-nova-compute.service – OpenStack Nova Compute Server
Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled)
Active: active (running) since Thu 2014-03-20 16:29:07 MSK; 6h ago
Main PID: 1685 (nova-compute)
CGroup: /system.slice/openstack-nova-compute.service
├─1685 /usr/bin/python /usr/bin/nova-compute –logfile /var/log/nova/compute.log
└─3552 /usr/sbin/glusterfs –volfile-id=cinder-volumes012 –volfile-server=192.168.1.130 /var/lib/nova/mnt/1c9688348ab38662e3ac8fb121077d34

Mar 20 22:20:15 dallas2.localdomain sudo[11210]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/nova-rootwrap /etc/nova/rootwrap.conf ip link set qvb372fd13e-d2 up
Mar 20 22:20:15 dallas2.localdomain sudo[11213]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/nova-rootwrap /etc/nova/rootwrap.conf ip link set qvb372fd13e-d2 promisc on
Mar 20 22:20:16 dallas2.localdomain sudo[11216]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/nova-rootwrap /etc/nova/rootwrap.conf ip link set qvo372fd13e-d2 up
Mar 20 22:20:16 dallas2.localdomain sudo[11219]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/nova-rootwrap /etc/nova/rootwrap.conf ip link set qvo372fd13e-d2 promisc on
Mar 20 22:20:16 dallas2.localdomain sudo[11222]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/nova-rootwrap /etc/nova/rootwrap.conf ip link set qbr372fd13e-d2 up
Mar 20 22:20:16 dallas2.localdomain sudo[11225]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/nova-rootwrap /etc/nova/rootwrap.conf brctl addif qbr372fd13e-d2 qvb372fd13e-d2
Mar 20 22:20:16 dallas2.localdomain sudo[11228]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/nova-rootwrap /etc/nova/rootwrap.conf ovs-vsctl — –may-exist add-port br-int qvo372fd13e-d2 — set Interface qvo372fd13e-d2 external-ids:iface-id=372fd13e-d283-43ba-9a4e-a1684660f4ce external-ids:iface-status=active external-ids:attached-mac=fa:16:3e:0d:4a:12 external-ids:vm-uuid=9679d849-7e4b-4cb5-b644-43279d53f01b
Mar 20 22:20:16 dallas2.localdomain ovs-vsctl[11230]: ovs|00001|vsctl|INFO|Called as /bin/ovs-vsctl — –may-exist add-port br-int qvo372fd13e-d2 — set Interface qvo372fd13e-d2 external-ids:iface-id=372fd13e-d283-43ba-9a4e-a1684660f4ce external-ids:iface-status=active external-ids:attached-mac=fa:16:3e:0d:4a:12 external-ids:vm-uuid=9679d849-7e4b-4cb5-b644-43279d53f01b
Mar 20 22:20:16 dallas2.localdomain sudo[11244]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/nova-rootwrap /etc/nova/rootwrap.conf tee /sys/class/net/tap372fd13e-d2/brport/hairpin_mode
Mar 20 22:25:53 dallas2.localdomain nova-compute[1685]: 2014-03-20 22:25:53.102 1685 WARNING nova.compute.manager [-] Found 5 in the database and 2 on the hypervisor.

[root@dallas2 ~]# ovs-vsctl show
3e7422a7-8828-4e7c-b595-8a5b6504bc08
Bridge br-int
Port “qvod0e086e7-32”
tag: 1
Interface “qvod0e086e7-32”
Port br-int
            Interface br-int
type: internal
Port “qvo372fd13e-d2”
tag: 1
            Interface “qvo372fd13e-d2”
Port “qvob49ecf5e-8e”
tag: 1
Interface “qvob49ecf5e-8e”
Port “qvo756757a8-40”
tag: 1
Interface “qvo756757a8-40”
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port “qvo4d1f9115-03”
tag: 1
Interface “qvo4d1f9115-03”
Bridge br-tun
Port “gre-1”
Interface “gre-1″
type: gre
options: {in_key=flow, local_ip=”192.168.1.140″, out_key=flow, remote_ip=”192.168.1.130”}
Port br-tun
Interface br-tun
type: internal
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
ovs_version: “2.0.0”

[root@dallas1 ~(keystone_boris)]$ nova list
+————————————–+————–+———–+————+————-+—————————–+
| ID                                   | Name         | Status    | Task State | Power State | Networks                    |
+————————————–+————–+———–+————+————-+—————————–+
| 690d29ae-4c3c-4b2e-b2df-e4d654668336 | UbuntuSRS007 | SUSPENDED | None       | Shutdown    | int=10.0.0.6, 192.168.1.103 |
| 9c791573-1238-44c4-a103-6873fddc17d1 | UbuntuTS019  | SUSPENDED | None       | Shutdown    | int=10.0.0.5, 192.168.1.107 |
| 70db20be-efa6-4a96-bf39-6250962784a3 | VF20RS015    | SUSPENDED | None       | Shutdown    | int=10.0.0.7, 192.168.1.101 |
| 3c888e6a-dd4f-489a-82bb-1f1f9ce6a696 | VF20RS017    | ACTIVE    | None       | Running     | int=10.0.0.4, 192.168.1.102 |
| 9679d849-7e4b-4cb5-b644-43279d53f01b | VF20RS024    | ACTIVE    | None       | Running     | int=10.0.0.2, 192.168.1.105 |
+————————————–+————–+———–+————+————-+—————————–+
[root@dallas1 ~(keystone_boris)]$ nova show 9679d849-7e4b-4cb5-b644-43279d53f01b
+————————————–+———————————————————-+
| Property                             | Value                                                    |
+————————————–+———————————————————-+
| status                               | ACTIVE                                                   |
| updated                              | 2014-03-20T18:20:16Z                                     |
| OS-EXT-STS:task_state                | None                                                     |
| key_name                             | key2                                                     |
| image                                | Attempt to boot from volume – no image supplied          |
| int network                          | 10.0.0.2, 192.168.1.105                                  |
| hostId                               | 8477c225f2a46d84dcd609798bf5ee71cc8d20b44256b3b2a54b723f |
| OS-EXT-STS:vm_state                  | active                                                   |
| OS-SRV-USG:launched_at               | 2014-03-20T18:20:16.000000                               |
| flavor                               | m1.small (2)                                             |
| id                                   | 9679d849-7e4b-4cb5-b644-43279d53f01b                     |
| security_groups                      | [{u’name’: u’default’}]                                  |
| OS-SRV-USG:terminated_at             | None                                                     |
| user_id                              | df4a984ce2f24848a6b84aaa99e296f1                         |
| name                                 | VF20RS024                                                |
| created                              | 2014-03-20T18:20:10Z                                     |
| tenant_id                            | e896be65e94a4893b870bc29ba86d7eb                         |
| OS-DCF:diskConfig                    | MANUAL                                                   |
| metadata                             | {}                                                       |
| os-extended-volumes:volumes_attached | [{u’id’: u’abc0f5b8-5144-42b7-b49f-a42a20ddd88f‘}]       |
| accessIPv4                           |                                                          |
| accessIPv6                           |                                                          |
| progress                             | 0                                                        |
| OS-EXT-STS:power_state               | 1                                                        |
| OS-EXT-AZ:availability_zone          | nova                                                     |
| config_drive                         |                                                          |
+————————————–+———————————————————-+
[root@dallas1 ~(keystone_boris)]$ ls -l /FDR/Replicate
total 8383848
-rw-rw-rw-. 2 root root 5368709120 Mar 17 21:58 volume-4b807fe8-dcd2-46eb-b7dd-6ab10641c32a
-rw-rw-rw-. 2 root root 5368709120 Mar 20 18:26 volume-4df4fadf-1be9-4a09-b51c-723b8a6b9c23
-rw-rw-rw-. 2 root root 5368709120 Mar 19 13:46 volume-6ccc137a-6361-42ee-8925-57c6a2eeccf4
-rw-rw-rw-. 2 qemu qemu 5368709120 Mar 20 23:18 volume-abc0f5b8-5144-42b7-b49f-a42a20ddd88f
-rw-rw-rw-. 2 qemu qemu 5368709120 Mar 20 23:18 volume-ec9670b8-fa64-46e9-9695-641f51bf1421

[root@dallas1 ~(keystone_boris)]$ ssh 192.168.1.140
Last login: Thu Mar 20 20:15:49 2014
[root@dallas2 ~]# ls -l /FDR/Replicate
total 8383860
-rw-rw-rw-. 2 root root 5368709120 Mar 17 21:58 volume-4b807fe8-dcd2-46eb-b7dd-6ab10641c32a
-rw-rw-rw-. 2 root root 5368709120 Mar 20 18:26 volume-4df4fadf-1be9-4a09-b51c-723b8a6b9c23
-rw-rw-rw-. 2 root root 5368709120 Mar 19 13:46 volume-6ccc137a-6361-42ee-8925-57c6a2eeccf4
-rw-rw-rw-. 2 qemu qemu 5368709120 Mar 20 23:19 volume-abc0f5b8-5144-42b7-b49f-a42a20ddd88f
-rw-rw-rw-. 2 qemu qemu 5368709120 Mar 20 23:19 volume-ec9670b8-fa64-46e9-9695-641f51bf1421


Setup Gluster 3.4.2 on Two Node Controller&Compute Neutron GRE+OVS Fedora 20 Cluster

March 10, 2014

This post is an update for http://bderzhavets.blogspot.com/2014/01/setting-up-two-physical-node-openstack.html  . It’s focused on Gluster 3.4.2  implementation including tuning /etc/sysconfig/iptables files on Controller and Compute Nodes.
Copying ssh-key from master node to compute, step by step verification of gluster volume replica 2  functionality and switching RDO Havana cinder services to work with gluster volume created  to store instances bootable cinders volumes for performance improvement. Of course creating gluster bricks under “/”  is not recommended . It should be a separate mount point for “xfs” filesystem to store gluster bricks on each node.

 Manual RDO Havana setup itself was originally suggested by Kashyap Chamarthy  for F20 VMs running on non-default Libvirt’s subnet . From my side came attempt to reproduce this setup on physical F20 boxes and arbitrary network , not connected to Libvirt, preventive updates for mysql.user table ,which allowed remote connections for  nova-compute and neutron-openvswitch-agent  from Compute to Controller,   changes to /etc/sysconfig/iptables to enable  Gluster 3.4.2 setup on F20  systems ( view http://bderzhavets.blogspot.com/2014/03/setup-gluster-342-on-two-node-neutron.html ) . I have also fixed typo in dhcp_agent.ini :- the reference for “dnsmasq.conf” and added to dnsmasq.conf line “dhcp-option=26,1454”. Updated configuration files are critical for launching instance without “Customization script” and allow to work with usual ssh keypair.  Actually , when updates are done instance gets created with MTU 1454. View  [2]. Original  setup is pretty much focused on ability to transfer neutron metadata from Controller to Compute F20 nodes and is done manually with no answer-files. It stops exactly at the point when `nova boot ..`  loads instance on Compute, which obtains internal IP via DHCP running on Controller and may be assigned floating IP to be able communicate with Internet. No attempts to setup dashboard has been done due to core target was neutron GRE+OVS functionality (just a proof of concept). Regarding Dashboard Setup&VNC Console,  view   :-
Setup Dashboard&VNC console on Two Node Controller&Compute Neutron GRE+OVS+Gluster Fedora 20 Cluster

Updated setup procedure itself may be viewed here

Setup 

– Controller node: Nova, Keystone, Cinder, Glance, Neutron (using Open vSwitch plugin and GRE tunneling )

 – Compute node: Nova (nova-compute), Neutron (openvswitch-agent)

dallas1.localdomain   –  Controller (192.168.1.130)

dallas2.localdomain   –  Compute   (192.168.1.140)

First step is tuning /etc/sysconfig/iptables for IPv4 iptables firewall (service firewalld should be disabled) :-

Update /etc/sysconfig/iptables on both nodes:-

-A INPUT -p tcp -m multiport –dport 24007:24047 -j ACCEPT
-A INPUT -p tcp –dport 111 -j ACCEPT
-A INPUT -p udp –dport 111 -j ACCEPT
-A INPUT -p tcp -m multiport –dport 38465:38485 -j ACCEPT

Comment out lines bellow , ignoring instruction from http://kashyapc.fedorapeople.org/virt/openstack/neutron-configs-GRE-OVS-two-node.txt  . It’s critical for Gluster functionality. Having them active you are supposed to work with thin LVM as cinder volumes. You won’t be able even remote mount with “-t glusterfs” option. Gluster’s  replications will be dead for ever.

# -A FORWARD -j REJECT –reject-with icmp-host-prohibited
# -A INPUT -j REJECT –reject-with icmp-host-prohibited

Restart service iptables on both nodes

Second step:-

On dallas1, run the following commands :

# ssh-keygen (Hit Enter to accept all of the defaults)
# ssh-copy-id -i ~/.ssh/id_rsa.pub root@dallas2

On both nodes run :-

# yum  -y install glusterfs glusterfs-server glusterfs-fuse
# service glusterd start

On dallas1

#gluster peer probe dallas2.localdomain
Should return “success”

[root@dallas1 ~(keystone_admin)]$ gluster peer status

Number of Peers: 1
Hostname: dallas2.localdomain
Uuid: b3b1cf43-2fec-4904-82d4-b9be03f77c5f
State: Peer in Cluster (Connected)
On dallas2
[root@dallas2 ~]# gluster peer status
Number of Peers: 1
Hostname: 192.168.1.130
Uuid: a57433dd-4a1a-4442-a5ae-ba2f682e5c79
State: Peer in Cluster (Connected)

*************************************************************************************
On Controller (192.168.1.130)  & Compute nodes (192.168.1.140)
**********************************************************************************

Verify ports availability:-

[root@dallas1 ~(keystone_admin)]$ netstat -lntp | grep gluster
tcp    0      0 0.0.0.0:655        0.0.0.0:*    LISTEN      2591/glusterfs
tcp    0      0 0.0.0.0:49152      0.0.0.0:*    LISTEN      2524/glusterfsd
tcp    0      0 0.0.0.0:2049       0.0.0.0:*    LISTEN      2591/glusterfs
tcp    0      0 0.0.0.0:38465      0.0.0.0:*    LISTEN      2591/glusterfs
tcp    0      0 0.0.0.0:38466      0.0.0.0:*    LISTEN      2591/glusterfs
tcp    0      0 0.0.0.0:49155      0.0.0.0:*    LISTEN      2525/glusterfsd
tcp    0      0 0.0.0.0:38468      0.0.0.0:*    LISTEN      2591/glusterfs
tcp    0      0 0.0.0.0:38469      0.0.0.0:*    LISTEN      2591/glusterfs
tcp    0      0 0.0.0.0:24007      0.0.0.0:*    LISTEN      2380/glusterd

************************************

Switching Cinder to Gluster volume

************************************

# gluster volume create cinder-volumes021  replica 2 ddallas1.localdomain:/FDR/Replicate   dallas2.localdomain:/FDR/Replicate force
# gluster volume start cinder-volumes021
# gluster volume set cinder-volumes021  auth.allow 192.168.1.*

[root@dallas1 ~(keystone_admin)]$ gluster volume info cinder-volumes012

Volume Name: cinder-volumes012
Type: Replicate
Volume ID: 9ee31c6c-0ae3-4fee-9886-b9cb6a518f48
Status: Started
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: dallas1.localdomain:/FDR/Replicate
Brick2: dallas2.localdomain:/FDR/Replicate
Options Reconfigured:
storage.owner-gid: 165
storage.owner-uid: 165
auth.allow: 192.168.1.*

[root@dallas1 ~(keystone_admin)]$ gluster volume status cinder-volumes012

Status of volume: cinder-volumes012
Gluster process                                                    Port    Online    Pid
——————————————————————————
Brick dallas1.localdomain:/FDR/Replicate         49155    Y    2525
Brick dallas2.localdomain:/FDR/Replicate         49152    Y    1615
NFS Server on localhost                                  2049    Y    2591
Self-heal Daemon on localhost                         N/A    Y    2596
NFS Server on dallas2.localdomain                   2049    Y    2202
Self-heal Daemon on dallas2.localdomain          N/A    Y    2197

# openstack-config –set /etc/cinder/cinder.conf DEFAULT volume_driver cinder.volume.drivers.glusterfs.GlusterfsDriver
# openstack-config –set /etc/cinder/cinder.conf DEFAULT glusterfs_shares_config /etc/cinder/shares.conf
# openstack-config –set /etc/cinder/cinder.conf DEFAULT glusterfs_mount_point_base /var/lib/cinder/volumes
# vi /etc/cinder/shares.conf
192.168.1.130:cinder-volumes021
:wq

Make sure all thin LVM have been deleted via `cinder list` , if no then delete them all.

[root@dallas1 ~(keystone_admin)]$ for i in api scheduler volume ; do service openstack-cinder-${i} restart ; done

It should add row to `df -h` output :

192.168.1.130:cinder-volumes012  187G   32G  146G  18% /var/lib/cinder/volumes/1c9688348ab38662e3ac8fb121077d34

[root@dallas1 ~(keystone_admin)]$ openstack-status

== Nova services ==

openstack-nova-api:                        active
openstack-nova-cert:                       inactive  (disabled on boot)
openstack-nova-compute:               inactive  (disabled on boot)
openstack-nova-network:                inactive  (disabled on boot)
openstack-nova-scheduler:             active
openstack-nova-volume:                  inactive  (disabled on boot)
openstack-nova-conductor:             active
== Glance services ==
openstack-glance-api:                   active
openstack-glance-registry:           active
== Keystone service ==
openstack-keystone:                     active
== neutron services ==
neutron-server:                         active
neutron-dhcp-agent:                active
neutron-l3-agent:                     active
neutron-metadata-agent:        active
neutron-lbaas-agent:                    inactive  (disabled on boot)
neutron-openvswitch-agent:       active
neutron-linuxbridge-agent:         inactive  (disabled on boot)
neutron-ryu-agent:                      inactive  (disabled on boot)
neutron-nec-agent:                      inactive  (disabled on boot)
neutron-mlnx-agent:                   inactive  (disabled on boot)
== Cinder services ==
openstack-cinder-api:                   active
openstack-cinder-scheduler:        active
openstack-cinder-volume:             active
== Support services ==
mysqld:                                 inactive  (disabled on boot)
libvirtd:                               active
openvswitch:                            active
dbus:                                   active
tgtd:                                   active
qpidd:                                  active
== Keystone users ==
+———————————-+———+———+——-+
|                id                |   name  | enabled | email |
+———————————-+———+———+——-+
| 871cf99617ff40e09039185aa7ab11f8 |  admin  |   True  |       |
| df4a984ce2f24848a6b84aaa99e296f1 |  boris  |   True  |       |
| 57fc5466230b497a9f206a20618dbe25 |  cinder |   True  |       |
| cdb2e5af7bae4c5486a1e3e2f42727f0 |  glance |   True  |       |
| adb14139a0874c74b14d61d2d4f22371 | neutron |   True  |       |
| 2485122e3538409c8a6fa2ea4343cedf |   nova  |   True  |       |
+———————————-+———+———+——-+
== Glance images ==
+————————————–+———————+————-+——————+———–+——–+
| ID                                   | Name                | Disk Format | Container Format | Size      | Status |
+————————————–+———————+————-+——————+———–+——–+
| 592faef8-308a-4438-867a-17adf685cde4 | CirrOS 31           | qcow2       | bare             | 13147648  | active |
| d0e90250-5814-4685-9b8d-65ec9daa7117 | Fedora 20 x86_64    | qcow2       | bare             | 214106112 | active |
| 3e6eea8e-32e6-4373-9eb1-e04b8a3167f9 | Ubuntu Server 13.10 | qcow2       | bare             | 244777472 | active |
+————————————–+———————+————-+——————+———–+——–+
== Nova managed services ==
+—————-+———————+———-+———+——-+—————————-+—————–+
| Binary         | Host                | Zone     | Status  | State | Updated_at                 | Disabled Reason |
+—————-+———————+———-+———+——-+—————————-+—————–+
| nova-scheduler | dallas1.localdomain | internal | enabled | up    | 2014-03-09T14:19:31.000000 | None            |
| nova-conductor | dallas1.localdomain | internal | enabled | up    | 2014-03-09T14:19:30.000000 | None            |
| nova-compute   | dallas2.localdomain | nova     | enabled | up    | 2014-03-09T14:19:33.000000 | None            |
+—————-+———————+———-+———+——-+—————————-+—————–+
== Nova networks ==
+————————————–+——-+——+
| ID                                   | Label | Cidr |
+————————————–+——-+——+
| 0ed406bf-3552-4036-9006-440f3e69618e | ext   | None |
| 166d9651-d299-47df-a5a1-b368e87b612f | int   | None |
+————————————–+——-+——+
== Nova instance flavors ==
+—-+———–+———–+——+———–+——+——-+————-+———–+
| ID | Name      | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
+—-+———–+———–+——+———–+——+——-+————-+———–+
| 1  | m1.tiny   | 512       | 1    | 0         |      | 1     | 1.0         | True      |
| 2  | m1.small  | 2048      | 20   | 0         |      | 1     | 1.0         | True      |
| 3  | m1.medium | 4096      | 40   | 0         |      | 2     | 1.0         | True      |
| 4  | m1.large  | 8192      | 80   | 0         |      | 4     | 1.0         | True      |
| 5  | m1.xlarge | 16384     | 160  | 0         |      | 8     | 1.0         | True      |
+—-+———–+———–+——+———–+——+——-+————-+———–+
== Nova instances ==
+—-+——+——–+————+————-+———-+
| ID | Name | Status | Task State | Power State | Networks |
+—-+——+——–+————+————-+———-+
+—-+——+——–+————+————-+———-+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+———–+——–+————+————-+—————————–+
| ID                                   | Name      | Status | Task State | Power State | Networks                    |
+————————————–+———–+——–+————+————-+—————————–+
| 9566adec-9406-4c3e-bce5-109ecb8bcf6b | VF20RS007 | ACTIVE | None       | Running     | int=10.0.0.2, 192.168.1.101 |
| 8142ee4c-ef56-4b61-8a0b-ecd82d21484f | VF20RS012 | ACTIVE | None       | Running     | int=10.0.0.4, 192.168.1.102 |
+————————————–+———–+——–+————+————-+—————————–+
[root@dallas1 ~(keystone_boris)]$ df -h

Filesystem                       Size  Used Avail Use% Mounted on
/dev/mapper/fedora01-root        187G   32G  146G  18% /
devtmpfs                         3.9G     0  3.9G   0% /dev
tmpfs                            3.9G  184K  3.9G   1% /dev/shm
tmpfs                            3.9G  9.1M  3.9G   1% /run
tmpfs                            3.9G     0  3.9G   0% /sys/fs/cgroup
tmpfs                            3.9G  464K  3.9G   1% /tmp
/dev/sdb5                        477M  122M  327M  28% /boot
tmpfs                            3.9G  9.1M  3.9G   1% /run/netns
192.168.1.130:cinder-volumes012  187G   32G  146G  18% /var/lib/cinder/volumes/1c9688348ab38662e3ac8fb121077d34

(neutron) agent-list

+————————————–+——————–+———————+——-+—————-+
| id                                   | agent_type         | host                | alive | admin_state_up |
+————————————–+——————–+———————+——-+—————-+
| 3ed1cd15-81af-4252-9d6f-e9bb140bf6cf | L3 agent           | dallas1.localdomain | 🙂   | True           |
| a088a6df-633c-4959-a316-510c99f3876b | DHCP agent         | dallas1.localdomain | 🙂   | True           |
| a3e5200c-b391-4930-b3ee-58c8d1b13c73 | Open vSwitch agent | dallas1.localdomain | 🙂   | True           |
| b6da839a-0d93-44ad-9793-6d0919fbb547 | Open vSwitch agent | dallas2.localdomain | 🙂   | True           |
+————————————–+——————–+———————+——-+—————-+
If Controller has been correctly set up:-

[root@dallas1 ~(keystone_admin)]$ netstat -lntp | grep python
tcp    0     0 0.0.0.0:8700      0.0.0.0:*     LISTEN      1160/python
tcp    0     0 0.0.0.0:35357     0.0.0.0:*     LISTEN      1163/python
tcp   0      0 0.0.0.0:9696      0.0.0.0:*      LISTEN      1165/python
tcp   0      0 0.0.0.0:8773      0.0.0.0:*      LISTEN      1160/python
tcp   0      0 0.0.0.0:8774      0.0.0.0:*      LISTEN      1160/python
tcp   0      0 0.0.0.0:9191      0.0.0.0:*      LISTEN      1173/python
tcp   0      0 0.0.0.0:8776      0.0.0.0:*      LISTEN      8169/python
tcp   0      0 0.0.0.0:5000      0.0.0.0:*      LISTEN      1163/python
tcp   0      0 0.0.0.0:9292      0.0.0.0:*      LISTEN      1168/python 

**********************************************
Creating instance utilizing glusterfs volume
**********************************************

[root@dallas1 ~(keystone_boris)]$ glance image-list

+————————————–+———————+————-+——————+———–+——–+
| ID                                   | Name                | Disk Format | Container Format | Size      | Status |
+————————————–+———————+————-+——————+———–+——–+
| 592faef8-308a-4438-867a-17adf685cde4 | CirrOS 31           | qcow2       | bare             | 13147648  | active |
| d0e90250-5814-4685-9b8d-65ec9daa7117 | Fedora 20 x86_64    | qcow2       | bare             | 214106112 | active |
| 3e6eea8e-32e6-4373-9eb1-e04b8a3167f9 | Ubuntu Server 13.10 | qcow2       | bare             | 244777472 | active |
+————————————–+———————+————-+——————+———–+——–+

I have to notice that schema with `cinder create –image-id  .. –display_name VOL_NAME SIZE` & `nova boot –flavor 2 –user-data=./myfile.txt –block_device_mapping vda=volume_id:::0 VM_NAME`  doesn’t work stable  for me in meantime.

As of 03/11 standard schema via `cinder create –image-id IMAGE_ID –display_name VOL_NAME SIZE `& ` nova boot –flavor 2 –user-data=./myfile.txt –block_device_mapping vda=VOLUME_ID:::0  INSTANCE_NAME`  started to work fine. However, schema described bellow on the contrary stopped to work on glusterfs based cinder’s volumes.

[root@dallas1 ~(keystone_boris)]$ nova boot –flavor 2 –user-data=./myfile.txt –block-device source=image,id=d0e90250-5814-4685-9b8d-65ec9daa7117,dest=volume,size=5,shutdown=preserve,bootindex=0 VF20RS012

+————————————–+————————————————-+
| Property                             | Value                                           |
+————————————–+————————————————-+
| status                               | BUILD                                           |
| updated                              | 2014-03-09T12:41:22Z                            |
| OS-EXT-STS:task_state                | scheduling                                      |
| key_name                             | None                                            |
| image                                | Attempt to boot from volume – no image supplied |
| hostId                               |                                                 |
| OS-EXT-STS:vm_state                  | building                                        |
| OS-SRV-USG:launched_at               | None                                            |
| flavor                               | m1.small                                        |
| id                                   | 8142ee4c-ef56-4b61-8a0b-ecd82d21484f            |
| security_groups                      | [{u’name’: u’default’}]                         |
| OS-SRV-USG:terminated_at             | None                                            |
| user_id                              | df4a984ce2f24848a6b84aaa99e296f1                |
| name                                 | VF20RS012                                       |
| adminPass                            | eFDhC8ZSCFU2                                    |
| tenant_id                            | e896be65e94a4893b870bc29ba86d7eb                |
| created                              | 2014-03-09T12:41:22Z                            |
| OS-DCF:diskConfig                    | MANUAL                                          |
| metadata                             | {}                                              |
| os-extended-volumes:volumes_attached | []                                              |
| accessIPv4                           |                                                 |
| accessIPv6                           |                                                 |
| progress                             | 0                                               |
| OS-EXT-STS:power_state               | 0                                               |
| OS-EXT-AZ:availability_zone          | nova                                            |
| config_drive                         |                                                 |
+————————————–+————————————————-+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+———–+———–+———————-+————-+—————————–+
| ID                                   | Name      | Status    | Task State           | Power State | Networks                    |
+————————————–+———–+———–+———————-+————-+—————————–+
| 9566adec-9406-4c3e-bce5-109ecb8bcf6b | VF20RS007 | SUSPENDED | None                 | Shutdown    | int=10.0.0.2, 192.168.1.101 |
| 8142ee4c-ef56-4b61-8a0b-ecd82d21484f | VF20RS012 | BUILD     | block_device_mapping | NOSTATE     |                             |
+————————————–+———–+———–+———————-+————-+—————————–+
WAIT …
[root@dallas1 ~(keystone_boris)]$ nova list
+————————————–+———–+———–+————+————-+—————————–+
| ID                                   | Name      | Status    | Task State | Power State | Networks                    |
+————————————–+———–+———–+————+————-+—————————–+
| 9566adec-9406-4c3e-bce5-109ecb8bcf6b | VF20RS007 | SUSPENDED | None       | Shutdown    | int=10.0.0.2, 192.168.1.101 |
| 8142ee4c-ef56-4b61-8a0b-ecd82d21484f | VF20RS012 | ACTIVE    | None       | Running     | int=10.0.0.4                |
+————————————–+———–+———–+————+————-+—————————–+
[root@dallas1 ~(keystone_boris)]$ neutron floatingip-create ext

Created a new floatingip:

+———————+————————————–+
| Field               | Value                                |
+———————+————————————–+
| fixed_ip_address    |                                      |
| floating_ip_address | 192.168.1.102                        |
| floating_network_id | 0ed406bf-3552-4036-9006-440f3e69618e |
| id                  | 5c74667d-9b22-4092-ae0a-70ff3a06e785 |
| port_id             |                                      |
| router_id           |                                      |
| tenant_id           | e896be65e94a4893b870bc29ba86d7eb     |
+———————+————————————–+

[root@dallas1 ~(keystone_boris)]$ neutron port-list –device-id 8142ee4c-ef56-4b61-8a0b-ecd82d21484f

+————————————–+——+——————-+———————————————————————————+
| id                                   | name | mac_address       | fixed_ips                                                                       |
+————————————–+——+——————-+———————————————————————————+
| dc60b5f4-739e-49bd-a004-3ef806e2b488 |      | fa:16:3e:70:56:cc | {“subnet_id”: “2e838119-3e2e-46e8-b7cc-6d00975046f2”, “ip_address”: “10.0.0.4”} |
+————————————–+——+——————-+———————————————————————————+

[root@dallas1 ~(keystone_boris)]$ neutron floatingip-associate 5c74667d-9b22-4092-ae0a-70ff3a06e785 dc60b5f4-739e-49bd-a004-3ef806e2b488

Associated floatingip 5c74667d-9b22-4092-ae0a-70ff3a06e785

[root@dallas1 ~(keystone_boris)]$ ping 192.168.1.102

PING 192.168.1.102 (192.168.1.102) 56(84) bytes of data.
64 bytes from 192.168.1.102: icmp_seq=1 ttl=63 time=6.23 ms
64 bytes from 192.168.1.102: icmp_seq=2 ttl=63 time=0.702 ms
64 bytes from 192.168.1.102: icmp_seq=3 ttl=63 time=1.07 ms
64 bytes from 192.168.1.102: icmp_seq=4 ttl=63 time=0.693 ms
64 bytes from 192.168.1.102: icmp_seq=5 ttl=63 time=1.80 ms
64 bytes from 192.168.1.102: icmp_seq=6 ttl=63 time=0.750 ms
^C

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+———–+——–+————+————-+—————————–+
| ID                                   | Name      | Status | Task State | Power State | Networks                    |
+————————————–+———–+——–+————+————-+—————————–+
| 9566adec-9406-4c3e-bce5-109ecb8bcf6b | VF20RS007 | ACTIVE | None       | Running     | int=10.0.0.2, 192.168.1.101 |
| 8142ee4c-ef56-4b61-8a0b-ecd82d21484f | VF20RS012 | ACTIVE | None       | Running     | int=10.0.0.4, 192.168.1.102 |
+————————————–+———–+——–+————+————-+—————————–+

[root@dallas1 ~(keystone_boris)]$ cinder list

+————————————–+——–+————–+——+————-+———-+————————————–+
|                  ID                  | Status | Display Name | Size | Volume Type | Bootable |             Attached to              |
+————————————–+——–+————–+——+————-+———-+————————————–+

| 575be853-b104-458e-bc72-1785ef524416 | in-use |              |  5   |     None    |   true   | 8142ee4c-ef56-4b61-8a0b-ecd82d21484f |
| 9794bd45-8923-4f3e-a48f-fa1d62a964f8  | in-use |              |  5   |     None    |   true   | 9566adec-9406-4c3e-bce5-109ecb8bcf6b |
+————————————–+——–+————–+——+————-+———-+——————————

On Compute:-

[root@dallas1 ~]# ssh 192.168.1.140

Last login: Sun Mar  9 16:46:40 2014

[root@dallas2 ~]# df -h

Filesystem                       Size  Used Avail Use% Mounted on
/dev/mapper/fedora01-root        187G   18G  160G  11% /
devtmpfs                         3.9G     0  3.9G   0% /dev
tmpfs                            3.9G  3.1M  3.9G   1% /dev/shm
tmpfs                            3.9G  9.4M  3.9G   1% /run
tmpfs                            3.9G     0  3.9G   0% /sys/fs/cgroup
tmpfs                            3.9G  115M  3.8G   3% /tmp
/dev/sdb5                        477M  122M  327M  28% /boot
192.168.1.130:cinder-volumes012  187G   32G  146G  18% /var/lib/nova/mnt/1c9688348ab38662e3ac8fb121077d34

[root@dallas2 ~]# ps -ef| grep nova

nova      1548     1  0 16:29 ?        00:00:42 /usr/bin/python /usr/bin/nova-compute –logfile /var/log/nova/compute.log

root      3005     1  0 16:34 ?        00:00:38 /usr/sbin/glusterfs –volfile-id=cinder-volumes012 –volfile-server=192.168.1.130 /var/lib/nova/mnt/1c9688348ab38662e3ac8fb121077d34

qemu      4762     1 58 16:42 ?        00:52:17 /usr/bin/qemu-system-x86_64 -name instance-00000061 -S -machine pc-i440fx-1.6,accel=tcg,usb=off -cpu Penryn,+osxsave,+xsave,+pdcm,+xtpr,+tm2,+est,+smx,+vmx,+ds_cpl,+monitor,+dtes64,+pbe,+tm,+ht,+ss,+acpi,+ds,+vme -m 2048 -realtime mlock=off -smp 1,sockets=1,cores=1,threads=1 -uuid 8142ee4c-ef56-4b61-8a0b-ecd82d21484f -smbios type=1,manufacturer=Fedora Project,product=OpenStack Nova,version=2013.2.2-1.fc20,serial=6050001e-8c00-00ac-818a-90e6ba2d11eb,uuid=8142ee4c-ef56-4b61-8a0b-ecd82d21484f -no-user-config -nodefaults -chardev socket,id=charmonitor,path=/var/lib/libvirt/qemu/instance-00000061.monitor,server,nowait -mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc -no-shutdown -device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -drive file=/var/lib/nova/mnt/1c9688348ab38662e3ac8fb121077d34/volume-575be853-b104-458e-bc72-1785ef524416,if=none,id=drive-virtio-disk0,format=raw,serial=575be853-b104-458e-bc72-1785ef524416,cache=none -device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1 -netdev tap,fd=24,id=hostnet0 -device virtio-net-pci,netdev=hostnet0,id=net0,mac=fa:16:3e:70:56:cc,bus=pci.0,addr=0x3 -chardev file,id=charserial0,path=/var/lib/nova/instances/8142ee4c-ef56-4b61-8a0b-ecd82d21484f/console.log -device isa-serial,chardev=charserial0,id=serial0 -chardev pty,id=charserial1 -device isa-serial,chardev=charserial1,id=serial1 -device usb-tablet,id=input0 -vnc 127.0.0.1:0 -k en-us -device cirrus-vga,id=video0,bus=pci.0,addr=0x2 -device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x5

qemu      6330     1 44 16:49 ?        00:36:02 /usr/bin/qemu-system-x86_64 -name instance-0000005f -S -machine pc-i440fx-1.6,accel=tcg,usb=off -cpu Penryn,+osxsave,+xsave,+pdcm,+xtpr,+tm2,+est,+smx,+vmx,+ds_cpl,+monitor,+dtes64,+pbe,+tm,+ht,+ss,+acpi,+ds,+vme -m 2048 -realtime mlock=off -smp 1,sockets=1,cores=1,threads=1 -uuid 9566adec-9406-4c3e-bce5-109ecb8bcf6b -smbios type=1,manufacturer=Fedora Project,product=OpenStack Nova,version=2013.2.2-1.fc20,serial=6050001e-8c00-00ac-818a-90e6ba2d11eb,uuid=9566adec-9406-4c3e-bce5-109ecb8bcf6b -no-user-config -nodefaults -chardev socket,id=charmonitor,path=/var/lib/libvirt/qemu/instance-0000005f.monitor,server,nowait -mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc -no-shutdown -device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -drive file=/var/lib/nova/mnt/1c9688348ab38662e3ac8fb121077d34/volume-9794bd45-8923-4f3e-a48f-fa1d62a964f8,if=none,id=drive-virtio-disk0,format=raw,serial=9794bd45-8923-4f3e-a48f-fa1d62a964f8,cache=none -device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1 -netdev tap,fd=27,id=hostnet0 -device virtio-net-pci,netdev=hostnet0,id=net0,mac=fa:16:3e:50:84:72,bus=pci.0,addr=0x3 -chardev file,id=charserial0,path=/var/lib/nova/instances/9566adec-9406-4c3e-bce5-109ecb8bcf6b/console.log -device isa-serial,chardev=charserial0,id=serial0 -chardev pty,id=charserial1 -device isa-serial,chardev=charserial1,id=serial1 -device usb-tablet,id=input0 -vnc 127.0.0.1:1 -k en-us -device cirrus-vga,id=video0,bus=pci.0,addr=0x2 -incoming fd:24 -device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x5

root     24713 24622  0 18:11 pts/4    00:00:00 grep –color=auto nova

[root@dallas2 ~]# ps -ef| grep neutron

neutron   1549     1  0 16:29 ?        00:00:53 /usr/bin/python /usr/bin/neutron-openvswitch-agent –config-file /usr/share/neutron/neutron-dist.conf –config-file /etc/neutron/neutron.conf –config-file /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini –log-file /var/log/neutron/openvswitch-agent.log

root     24981 24622  0 18:12 pts/4    00:00:00 grep –color=auto neutron

  Top at Compute node (192.168.1.140)

      Runtime at Compute node ( dallas2 192.168.1.140)

 ******************************************************

Building Ubuntu 14.04 instance via cinder volume

******************************************************

[root@dallas1 ~(keystone_boris)]$ glance image-list
+————————————–+———————+————-+——————+———–+——–+
| ID                                   | Name                | Disk Format | Container Format | Size      | Status |
+————————————–+———————+————-+——————+———–+——–+
| 592faef8-308a-4438-867a-17adf685cde4 | CirrOS 31           | qcow2       | bare             | 13147648  | active |
| d0e90250-5814-4685-9b8d-65ec9daa7117 | Fedora 20 x86_64    | qcow2       | bare             | 214106112 | active |
| c2b7c3ed-e25d-44c4-a5e7-4e013c4a8b00 | Ubuntu 14.04        | qcow2       | bare             | 264176128 | active |
| 3e6eea8e-32e6-4373-9eb1-e04b8a3167f9 | Ubuntu Server 13.10 | qcow2       | bare             | 244777472 | active |
+————————————–+———————+————-+——————+———–+——–+
[root@dallas1 ~(keystone_boris)]$ cinder create –image-id c2b7c3ed-e25d-44c4-a5e7-4e013c4a8b00 –display_name UbuntuTrusty 5
+———————+————————————–+
|       Property      |                Value                 |
+———————+————————————–+
|     attachments     |                  []                  |
|  availability_zone  |                 nova                 |
|       bootable      |                false                 |
|      created_at     |      2014-03-10T06:35:39.873978      |
| display_description |                 None                 |
|     display_name    |             UbuntuTrusty             |
|          id         | 8bcc02a7-b9ba-4cd6-a6b9-0574889bf8d2 |
|       image_id      | c2b7c3ed-e25d-44c4-a5e7-4e013c4a8b00 |
|       metadata      |                  {}                  |
|         size        |                  5                   |
|     snapshot_id     |                 None                 |
|     source_volid    |                 None                 |
|        status       |               creating               |
|     volume_type     |                 None                 |
+———————+————————————–+

[root@dallas1 ~(keystone_boris)]$ cinder list

+————————————–+———–+————–+——+————-+———-+————————————–+
|                  ID                  |   Status  | Display Name | Size | Volume Type | Bootable |             Attached to              |
+————————————–+———–+————–+——+————-+———-+————————————–+
| 56ceaaa8-c0ec-45f3-98a4-555c1231b34e |   in-use  |              |  5   |     None    |   true   | e29606c5-582f-4766-ae1b-52043a698743 |
| 575be853-b104-458e-bc72-1785ef524416 |   in-use  |              |  5   |     None    |   true   | 8142ee4c-ef56-4b61-8a0b-ecd82d21484f |
| 8bcc02a7-b9ba-4cd6-a6b9-0574889bf8d2 | available | UbuntuTrusty |  5   |     None    |   true   |                                      |
| 9794bd45-8923-4f3e-a48f-fa1d62a964f8 |   in-use  |              |  5   |     None    |   true   | 9566adec-9406-4c3e-bce5-109ecb8bcf6b |
+————————————–+———–+————–+——+————-+———-+————————————–+

[root@dallas1 ~(keystone_boris)]$  nova boot –flavor 2 –user-data=./myfile.txt –block_device_mapping vda=8bcc02a7-b9ba-4cd6-a6b9-0574889bf8d2:::0 UbuntuTR01

+————————————–+—————————————————-+
| Property                             | Value                                              |
+————————————–+—————————————————-+

| status                               | BUILD                                              |
| updated                              | 2014-03-10T06:40:14Z                               |
| OS-EXT-STS:task_state                | scheduling                                         |
| key_name                             | None                                               |
| image                                | Attempt to boot from volume – no image supplied    |
| hostId                               |                                                    |
| OS-EXT-STS:vm_state                  | building                                           |
| OS-SRV-USG:launched_at               | None                                               |
| flavor                               | m1.small                                           |
| id                                   | 0859e52d-c07b-4f56-ac79-2b37080d2843               |
| security_groups                      | [{u’name’: u’default’}]                            |
| OS-SRV-USG:terminated_at             | None                                               |
| user_id                              | df4a984ce2f24848a6b84aaa99e296f1                   |
| name                                 | UbuntuTR01                                         |
| adminPass                            | L8VuhttJMbJf                                       |
| tenant_id                            | e896be65e94a4893b870bc29ba86d7eb                   |
| created                              | 2014-03-10T06:40:13Z                               |
| OS-DCF:diskConfig                    | MANUAL                                             |
| metadata                             | {}                                                 |
| os-extended-volumes:volumes_attached | [{u’id’: u’8bcc02a7-b9ba-4cd6-a6b9-0574889bf8d2′}] |
| accessIPv4                           |                                                    |
| accessIPv6                           |                                                    |
| progress                             | 0                                                  |
| OS-EXT-STS:power_state               | 0                                                  |
| OS-EXT-AZ:availability_zone          | nova                                               |
| config_drive                         |                                                    |
+————————————–+—————————————————-+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+————+———–+————+————-+—————————–+
| ID                                   | Name       | Status    | Task State | Power State | Networks                    |
+————————————–+————+———–+————+————-+—————————–+
| 0859e52d-c07b-4f56-ac79-2b37080d2843 | UbuntuTR01 | ACTIVE    | None       | Running     | int=10.0.0.6                |
| 9566adec-9406-4c3e-bce5-109ecb8bcf6b | VF20RS007  | SUSPENDED | None       | Shutdown    | int=10.0.0.2, 192.168.1.101 |
| 8142ee4c-ef56-4b61-8a0b-ecd82d21484f | VF20RS012  | SUSPENDED | None       | Shutdown    | int=10.0.0.4, 192.168.1.102 |
| e29606c5-582f-4766-ae1b-52043a698743 | VF20RS016  | ACTIVE    | None       | Running     | int=10.0.0.5, 192.168.1.103 |
+————————————–+————+———–+————+————-+—————————–+

[root@dallas1 ~(keystone_boris)]$ neutron floatingip-create ext

Created a new floatingip:

+———————+————————————–+
| Field               | Value                                |
+———————+————————————–+
| fixed_ip_address    |                                      |
| floating_ip_address | 192.168.1.104                        |
| floating_network_id | 0ed406bf-3552-4036-9006-440f3e69618e |
| id                  | 9498ac85-82b0-468a-b526-64a659080ab9 |
| port_id             |                                      |
| router_id           |                                      |
| tenant_id           | e896be65e94a4893b870bc29ba86d7eb     |
+———————+————————————–+

[root@dallas1 ~(keystone_boris)]$ neutron port-list –device-id 0859e52d-c07b-4f56-ac79-2b37080d2843

+————————————–+——+——————-+———————————————————————————+
| id                                   | name | mac_address       | fixed_ips                                                                       |
+————————————–+——+——————-+———————————————————————————+
| 1f02fe57-d844-4fd8-a325-646f27163c8b |      | fa:16:3e:3f:a3:d4 | {“subnet_id”: “2e838119-3e2e-46e8-b7cc-6d00975046f2”, “ip_address”: “10.0.0.6”} |
+————————————–+——+——————-+———————————————————————————+

[root@dallas1 ~(keystone_boris)]$ neutron floatingip-associate  9498ac85-82b0-468a-b526-64a659080ab9 1f02fe57-d844-4fd8-a325-646f27163c8b

Associated floatingip 9498ac85-82b0-468a-b526-64a659080ab9

[root@dallas1 ~(keystone_boris)]$ ping 192.168.1.104

PING 192.168.1.104 (192.168.1.104) 56(84) bytes of data.
64 bytes from 192.168.1.104: icmp_seq=1 ttl=63 time=2.35 ms
64 bytes from 192.168.1.104: icmp_seq=2 ttl=63 time=2.56 ms
64 bytes from 192.168.1.104: icmp_seq=3 ttl=63 time=1.17 ms
64 bytes from 192.168.1.104: icmp_seq=4 ttl=63 time=4.08 ms
64 bytes from 192.168.1.104: icmp_seq=5 ttl=63 time=2.19 ms
^C


Up to date procedure of Creating cinder’s ThinLVM based Cloud Instance F20,Ubuntu 13.10 on Fedora 20 Havana Compute Node.

March 4, 2014

  This post follows up  https://bderzhavets.wordpress.com/2014/01/24/setting-up-two-physical-node-openstack-rdo-havana-neutron-gre-on-fedora-20-boxes-with-both-controller-and-compute-nodes-each-one-having-one-ethernet-adapter/

   Per my experience `cinder create –image-id  Image_id –display_name …..` && `nova boot –flavor 2 –user-data=./myfile.txt –block_device_mapping vda=Volume_id :::0 <VM_NAME>  doesn’t   work any longer, giving an error :-

$ tail -f /var/log/nova/compute.log  reports :-

 2014-03-03 13:28:43.646 1344 WARNING nova.virt.libvirt.driver [req-1bd6630e-b799-4d78-b702-f06da5f1464b df4a984ce2f24848a6b84aaa99e296f1 e896be65e94a4893b870bc29b a86d7eb] [instance: f621815f-3805-4f52-a878-9040c6a4af53] File injection into a boot from volume instance is not supported

Followed by python stack trace and Nova Exception

Workaround for this issue follows bellow. First stop and and start “tgtd” daemon :-

[root@dallas1 ~(keystone_admin)]$ service tgtd stop
Redirecting to /bin/systemctl stop  tgtd.service
[root@dallas1 ~(keystone_admin)]$ service tgtd status
Redirecting to /bin/systemctl status  tgtd.service
tgtd.service – tgtd iSCSI target daemon
Loaded: loaded (/usr/lib/systemd/system/tgtd.service; enabled)
Active: inactive (dead) since Tue 2014-03-04 11:46:18 MSK; 8s ago
Process: 11978 ExecStop=/usr/sbin/tgtadm –op delete –mode system (code=exited, status=0/SUCCESS)
Process: 11974 ExecStop=/usr/sbin/tgt-admin –update ALL -c /dev/null (code=exited, status=0/SUCCESS)
Process: 11972 ExecStop=/usr/sbin/tgtadm –op update –mode sys –name State -v offline (code=exited, status=0/SUCCESS)
Process: 1797 ExecStartPost=/usr/sbin/tgtadm –op update –mode sys –name State -v ready (code=exited, status=0/SUCCESS)
Process: 1791 ExecStartPost=/usr/sbin/tgt-admin -e -c $TGTD_CONFIG (code=exited, status=0/SUCCESS)
Process: 1790 ExecStartPost=/usr/sbin/tgtadm –op update –mode sys –name State -v offline (code=exited, status=0/SUCCESS)
Process: 1173 ExecStartPost=/bin/sleep 5 (code=exited, status=0/SUCCESS)
Process: 1172 ExecStart=/usr/sbin/tgtd -f $TGTD_OPTS (code=exited, status=0/SUCCESS)
Main PID: 1172 (code=exited, status=0/SUCCESS)

Mar 04 11:14:04 dallas1.localdomain tgtd[1172]: tgtd: work_timer_start(146) use timer_fd based scheduler
Mar 04 11:14:04 dallas1.localdomain tgtd[1172]: tgtd: bs_init_signalfd(271) could not open backing-store module direct…store
Mar 04 11:14:04 dallas1.localdomain tgtd[1172]: tgtd: bs_init(390) use signalfd notification
Mar 04 11:14:09 dallas1.localdomain systemd[1]: Started tgtd iSCSI target daemon.
Mar 04 11:26:01 dallas1.localdomain tgtd[1172]: tgtd: device_mgmt(246) sz:69 params:path=/dev/cinder-volumes/volume-a0…2864d
Mar 04 11:26:01 dallas1.localdomain tgtd[1172]: tgtd: bs_thread_open(412) 16
Mar 04 11:33:32 dallas1.localdomain tgtd[1172]: tgtd: device_mgmt(246) sz:69 params:path=/dev/cinder-volumes/volume-01…f2969
Mar 04 11:33:32 dallas1.localdomain tgtd[1172]: tgtd: bs_thread_open(412) 16
Mar 04 11:46:18 dallas1.localdomain systemd[1]: Stopping tgtd iSCSI target daemon…
Mar 04 11:46:18 dallas1.localdomain systemd[1]: Stopped tgtd iSCSI target daemon.
Hint: Some lines were ellipsized, use -l to show in full.

[root@dallas1 ~(keystone_admin)]$ service tgtd start
Redirecting to /bin/systemctl start  tgtd.service
[root@dallas1 ~(keystone_admin)]$ service tgtd status -l
Redirecting to /bin/systemctl status  -l tgtd.service
tgtd.service – tgtd iSCSI target daemon
Loaded: loaded (/usr/lib/systemd/system/tgtd.service; enabled)
Active: active (running) since Tue 2014-03-04 11:46:40 MSK; 4s ago
Process: 11978 ExecStop=/usr/sbin/tgtadm –op delete –mode system (code=exited, status=0/SUCCESS)
Process: 11974 ExecStop=/usr/sbin/tgt-admin –update ALL -c /dev/null (code=exited, status=0/SUCCESS)
Process: 11972 ExecStop=/usr/sbin/tgtadm –op update –mode sys –name State -v offline (code=exited, status=0/SUCCESS)
Process: 12084 ExecStartPost=/usr/sbin/tgtadm –op update –mode sys –name State -v ready (code=exited, status=0/SUCCESS)
Process: 12078 ExecStartPost=/usr/sbin/tgt-admin -e -c $TGTD_CONFIG (code=exited, status=0/SUCCESS)
Process: 12076 ExecStartPost=/usr/sbin/tgtadm –op update –mode sys –name State -v offline (code=exited, status=0/SUCCESS)
Process: 12052 ExecStartPost=/bin/sleep 5 (code=exited, status=0/SUCCESS)
Main PID: 12051 (tgtd)
CGroup: /system.slice/tgtd.service
└─12051 /usr/sbin/tgtd -f

Mar 04 11:46:35 dallas1.localdomain systemd[1]: Starting tgtd iSCSI target daemon…
Mar 04 11:46:35 dallas1.localdomain tgtd[12051]: librdmacm: Warning: couldn’t read ABI version.
Mar 04 11:46:35 dallas1.localdomain tgtd[12051]: librdmacm: Warning: assuming: 4
Mar 04 11:46:35 dallas1.localdomain tgtd[12051]: librdmacm: Fatal: unable to get RDMA device list
Mar 04 11:46:35 dallas1.localdomain tgtd[12051]: tgtd: iser_ib_init(3351) Failed to initialize RDMA; load kernel modules?
Mar 04 11:46:35 dallas1.localdomain tgtd[12051]: tgtd: work_timer_start(146) use timer_fd based scheduler
Mar 04 11:46:35 dallas1.localdomain tgtd[12051]: tgtd: bs_init_signalfd(271) could not open backing-store module directory /usr/lib64/tgt/backing-store
Mar 04 11:46:35 dallas1.localdomain tgtd[12051]: tgtd: bs_init(390) use signalfd notification
Mar 04 11:46:40 dallas1.localdomain systemd[1]: Started tgtd iSCSI target daemon.
[root@dallas1 ~(keystone_admin)]$ for i in api scheduler volume ; do service openstack-cinder-${i} restart ;done
Redirecting to /bin/systemctl restart  openstack-cinder-api.service
Redirecting to /bin/systemctl restart  openstack-cinder-scheduler.service
Redirecting to /bin/systemctl restart  openstack-cinder-volume.service
[root@dallas1 ~(keystone_Boris)]$ glance image-list

+————————————–+———————+————-+——————+———–+——–+
| ID                                   | Name                | Disk Format | Container Format | Size      | Status |
+————————————–+———————+————-+——————+———–+——–+
| 592faef8-308a-4438-867a-17adf685cde4 | CirrOS 31           | qcow2       | bare             | 13147648  | active |
| d0e90250-5814-4685-9b8d-65ec9daa7117 | Fedora 20 x86_64    | qcow2       | bare             | 214106112 | active |
| 3e6eea8e-32e6-4373-9eb1-e04b8a3167f9 | Ubuntu Server 13.10 | qcow2       | bare             | 244777472 | active |
+————————————–+———————+————-+——————+———–+——–+

Create thin LVM via Nova with login option “fedora”&”mysecret” in one command

[root@dallas1 ~(keystone_boris)]$ glance image-list

+————————————–+———————+————-+——————+———–+——–+
| ID                                   | Name                | Disk Format | Container Format | Size      | Status |
+————————————–+———————+————-+——————+———–+——–+
| 592faef8-308a-4438-867a-17adf685cde4 | CirrOS 31           | qcow2       | bare             | 13147648  | active |
| d0e90250-5814-4685-9b8d-65ec9daa7117 | Fedora 20 x86_64    | qcow2       | bare             | 214106112 | active |
| 3e6eea8e-32e6-4373-9eb1-e04b8a3167f9 | Ubuntu Server 13.10 | qcow2       | bare             | 244777472 | active |
+————————————–+———————+————-+——————+———–+——–+
[root@dallas1 ~(keystone_boris)]$ nova boot –flavor 2 –user-data=./myfile.txt –block-device source=image,id=d0e90250-5814-4685-9b8d-65ec9daa7117,dest=volume,size=5,shutdown=preserve,bootindex=0 VF20RS01

+————————————–+————————————————-+
| Property                             | Value                                           |
+————————————–+————————————————-+
| status                               | BUILD                                           |
| updated                              | 2014-03-07T05:50:18Z                            |
| OS-EXT-STS:task_state                | scheduling                                      |
| key_name                             | None                                            |
| image                                | Attempt to boot from volume – no image supplied |
| hostId                               |                                                 |
| OS-EXT-STS:vm_state                  | building                                        |
| OS-SRV-USG:launched_at               | None                                            |
| flavor                               | m1.small                                        |
| id                                   | 770e33f7-7aab-49f1-95ca-3cf343f744ef            |
| security_groups                      | [{u’name’: u’default’}]                         |
| OS-SRV-USG:terminated_at             | None                                            |
| user_id                              | df4a984ce2f24848a6b84aaa99e296f1                |
| name                                 | VF20RS01                                        |
| adminPass                            | CqjGVUm9bbs9                                    |
| tenant_id                            | e896be65e94a4893b870bc29ba86d7eb                |
| created                              | 2014-03-07T05:50:18Z                            |
| OS-DCF:diskConfig                    | MANUAL                                          |
| metadata                             | {}                                              |
| os-extended-volumes:volumes_attached | []                                              |
| accessIPv4                           |                                                 |
| accessIPv6                           |                                                 |
| progress                             | 0                                               |
| OS-EXT-STS:power_state               | 0                                               |
| OS-EXT-AZ:availability_zone          | nova                                            |
| config_drive                         |                                                 |
+————————————–+————————————————-+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+———-+——–+———————-+————-+———-+
| ID                                   | Name     | Status | Task State           | Power State | Networks |
+————————————–+———-+——–+———————-+————-+———-+
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01 | BUILD  | block_device_mapping | NOSTATE     |          |
+————————————–+———-+——–+———————-+————-+———-+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+———-+——–+———————-+————-+———-+
| ID                                   | Name     | Status | Task State           | Power State | Networks |
+————————————–+———-+——–+———————-+————-+———-+
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01 | BUILD  | block_device_mapping | NOSTATE     |          |
+————————————–+———-+——–+———————-+————-+———-+
[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+———-+——–+————+————-+————–+
| ID                                   | Name     | Status | Task State | Power State | Networks     |
+————————————–+———-+——–+————+————-+————–+
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01 | ACTIVE | None       | Running     | int=10.0.0.2 |
+————————————–+———-+——–+————+————-+————–+

[root@dallas1 ~(keystone_boris)]$ neutron floatingip-create ext

Created a new floatingip:

+———————+————————————–+
| Field               | Value                                |
+———————+————————————–+
| fixed_ip_address    |                                      |
| floating_ip_address | 192.168.1.101                        |
| floating_network_id | 0ed406bf-3552-4036-9006-440f3e69618e |
| id                  | f7d9cd3f-e544-4f23-821d-0307ed4eb852 |
| port_id             |                                      |
| router_id           |                                      |
| tenant_id           | e896be65e94a4893b870bc29ba86d7eb     |
+———————+————————————–+

[root@dallas1 ~(keystone_boris)]$ neutron port-list –device-id 770e33f7-7aab-49f1-95ca-3cf343f744ef

+————————————–+——+——————-+———————————————————————————+
| id                                   | name | mac_address       | fixed_ips                                                                       |
+————————————–+——+——————-+———————————————————————————+
| 8b5f142e-ce99-40e0-bbbe-620b201c0323 |      | fa:16:3e:0d:c4:e6 | {“subnet_id”: “2e838119-3e2e-46e8-b7cc-6d00975046f2”, “ip_address”: “10.0.0.2”} |
+————————————–+——+——————-+———————————————————————————+

[root@dallas1 ~(keystone_boris)]$ neutron floatingip-associate f7d9cd3f-e544-4f23-821d-0307ed4eb852 8b5f142e-ce99-40e0-bbbe-620b201c0323
Associated floatingip f7d9cd3f-e544-4f23-821d-0307ed4eb852

[root@dallas1 ~(keystone_boris)]$ ping 192.168.1.101

PING 192.168.1.101 (192.168.1.101) 56(84) bytes of data.
64 bytes from 192.168.1.101: icmp_seq=1 ttl=63 time=7.75 ms
64 bytes from 192.168.1.101: icmp_seq=2 ttl=63 time=1.06 ms
64 bytes from 192.168.1.101: icmp_seq=3 ttl=63 time=1.27 ms
64 bytes from 192.168.1.101: icmp_seq=4 ttl=63 time=1.43 ms
64 bytes from 192.168.1.101: icmp_seq=5 ttl=63 time=1.80 ms
64 bytes from 192.168.1.101: icmp_seq=6 ttl=63 time=0.916 ms
64 bytes from 192.168.1.101: icmp_seq=7 ttl=63 time=0.919 ms
64 bytes from 192.168.1.101: icmp_seq=8 ttl=63 time=0.930 ms
64 bytes from 192.168.1.101: icmp_seq=9 ttl=63 time=0.977 ms
64 bytes from 192.168.1.101: icmp_seq=10 ttl=63 time=0.690 ms
^C

— 192.168.1.101 ping statistics —

10 packets transmitted, 10 received, 0% packet loss, time 9008ms

rtt min/avg/max/mdev = 0.690/1.776/7.753/2.015 ms

[root@dallas1 ~(keystone_boris)]$ glance image-list

+————————————–+———————+————-+——————+———–+——–+
| ID                                   | Name                | Disk Format | Container Format | Size      | Status |
+————————————–+———————+————-+——————+———–+——–+
| 592faef8-308a-4438-867a-17adf685cde4 | CirrOS 31           | qcow2       | bare             | 13147648  | active |
| d0e90250-5814-4685-9b8d-65ec9daa7117 | Fedora 20 x86_64    | qcow2       | bare             | 214106112 | active |
| 3e6eea8e-32e6-4373-9eb1-e04b8a3167f9 | Ubuntu Server 13.10 | qcow2       | bare             | 244777472 | active |
+————————————–+———————+————-+——————+———–+——–+

[root@dallas1 ~(keystone_boris)]$ nova boot –flavor 2 –user-data=./myfile.txt –block-device source=image,id=3e6eea8e-32e6-4373-9eb1-e04b8a3167f9,dest=volume,size=5,shutdown=preserve,bootindex=0 UbuntuRS01

+————————————–+————————————————-+
| Property                             | Value                                           |
+————————————–+————————————————-+
| status                               | BUILD                                           |
| updated                              | 2014-03-07T05:53:44Z                            |
| OS-EXT-STS:task_state                | scheduling                                      |
| key_name                             | None                                            |
| image                                | Attempt to boot from volume – no image supplied |
| hostId                               |                                                 |
| OS-EXT-STS:vm_state                  | building                                        |
| OS-SRV-USG:launched_at               | None                                            |
| flavor                               | m1.small                                        |
| id                                   | bfcb2120-942f-4d3f-a173-93f6076a4be8            |
| security_groups                      | [{u’name’: u’default’}]                         |
| OS-SRV-USG:terminated_at             | None                                            |
| user_id                              | df4a984ce2f24848a6b84aaa99e296f1                |
| name                                 | UbuntuRS01                                      |
| adminPass                            | bXND2XTsvuA4                                    |
| tenant_id                            | e896be65e94a4893b870bc29ba86d7eb                |
| created                              | 2014-03-07T05:53:44Z                            |
| OS-DCF:diskConfig                    | MANUAL                                          |
| metadata                             | {}                                              |
| os-extended-volumes:volumes_attached | []                                              |
| accessIPv4                           |                                                 |
| accessIPv6                           |                                                 |
| progress                             | 0                                               |
| OS-EXT-STS:power_state               | 0                                               |
| OS-EXT-AZ:availability_zone          | nova                                            |
| config_drive                         |                                                 |
+————————————–+————————————————-+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+————+——–+———————-+————-+—————————–+
| ID                                   | Name       | Status | Task State           | Power State | Networks                    |
+————————————–+————+——–+———————-+————-+—————————–+
| bfcb2120-942f-4d3f-a173-93f6076a4be8 | UbuntuRS01 | BUILD  | block_device_mapping | NOSTATE     |                             |
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01   | ACTIVE | None                 | Running     | int=10.0.0.2, 192.168.1.101 |
+————————————–+————+——–+———————-+————-+—————————–+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+————+——–+————+————-+—————————–+
| ID                                   | Name       | Status | Task State | Power State | Networks                    |
+————————————–+————+——–+————+————-+—————————–+
| bfcb2120-942f-4d3f-a173-93f6076a4be8 | UbuntuRS01 | ACTIVE | None       | Running     | int=10.0.0.4                |
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01   | ACTIVE | None       | Running     | int=10.0.0.2, 192.168.1.101 |
+————————————–+————+——–+————+————-+—————————–+

[root@dallas1 ~(keystone_boris)]$ neutron floatingip-create ext

Created a new floatingip:

+———————+————————————–+

| Field               | Value                                |

+———————+————————————–+
| fixed_ip_address    |                                      |
| floating_ip_address | 192.168.1.102                        |
| floating_network_id | 0ed406bf-3552-4036-9006-440f3e69618e |
| id                  | b3d3f262-5142-4a99-9b8d-431c231cb1d7 |
| port_id             |                                      |
| router_id           |                                      |
| tenant_id           | e896be65e94a4893b870bc29ba86d7eb     |
+———————+————————————–+

[root@dallas1 ~(keystone_boris)]$ neutron port-list –device-id bfcb2120-942f-4d3f-a173-93f6076a4be8

+————————————–+——+——————-+———————————————————————————+

| id                                   | name | mac_address       | fixed_ips                                                                       |

+————————————–+——+——————-+———————————————————————————+
| c81ca027-8f9b-49c3-af10-adc60f5d4d12 |      | fa:16:3e:ac:86:50 | {“subnet_id”: “2e838119-3e2e-46e8-b7cc-6d00975046f2”, “ip_address”: “10.0.0.4”} |
+————————————–+——+——————-+———————————————————————————+

[root@dallas1 ~(keystone_boris)]$ neutron floatingip-associate b3d3f262-5142-4a99-9b8d-431c231cb1d7 c81ca027-8f9b-49c3-af10-adc60f5d4d12

Associated floatingip b3d3f262-5142-4a99-9b8d-431c231cb1d7

[root@dallas1 ~(keystone_boris)]$ ping 192.168.1.102

PING 192.168.1.102 (192.168.1.102) 56(84) bytes of data.
64 bytes from 192.168.1.102: icmp_seq=1 ttl=63 time=3.84 ms
64 bytes from 192.168.1.102: icmp_seq=2 ttl=63 time=3.06 ms
64 bytes from 192.168.1.102: icmp_seq=3 ttl=63 time=6.58 ms
64 bytes from 192.168.1.102: icmp_seq=4 ttl=63 time=7.98 ms
64 bytes from 192.168.1.102: icmp_seq=5 ttl=63 time=2.09 ms
64 bytes from 192.168.1.102: icmp_seq=6 ttl=63 time=1.06 ms
64 bytes from 192.168.1.102: icmp_seq=7 ttl=63 time=3.55 ms
64 bytes from 192.168.1.102: icmp_seq=8 ttl=63 time=2.01 ms
64 bytes from 192.168.1.102: icmp_seq=9 ttl=63 time=1.05 ms
64 bytes from 192.168.1.102: icmp_seq=10 ttl=63 time=3.45 ms
64 bytes from 192.168.1.102: icmp_seq=11 ttl=63 time=2.31 ms
64 bytes from 192.168.1.102: icmp_seq=12 ttl=63 time=0.977 ms
^C

— 192.168.1.102 ping statistics —

12 packets transmitted, 12 received, 0% packet loss, time 11014ms

rtt min/avg/max/mdev = 0.977/3.168/7.985/2.091 ms

[root@dallas1 ~(keystone_boris)]$ nova boot –flavor 2 –user-data=./myfile.txt –block-device source=image,id=d0e90250-5814-4685-9b8d-65ec9daa7117,dest=volume,size=5,shutdown=preserve,bootindex=0 VF20GLX

+————————————–+————————————————-+

| Property                             | Value                                           |

+————————————–+————————————————-+
| status                               | BUILD                                           |
| updated                              | 2014-03-07T05:58:40Z                            |
| OS-EXT-STS:task_state                | scheduling                                      |
| key_name                             | None                                            |
| image                                | Attempt to boot from volume – no image supplied |
| hostId                               |                                                 |
| OS-EXT-STS:vm_state                  | building                                        |
| OS-SRV-USG:launched_at               | None                                            |
| flavor                               | m1.small                                        |
| id                                   | 62ff1641-2c96-470f-9147-9272d68d2e5c            |
| security_groups                      | [{u’name’: u’default’}]                         |
| OS-SRV-USG:terminated_at             | None                                            |
| user_id                              | df4a984ce2f24848a6b84aaa99e296f1                |
| name                                 | VF20GLX                                         |
| adminPass                            | E9KXeLp8fWig                                    |
| tenant_id                            | e896be65e94a4893b870bc29ba86d7eb                |
| created                              | 2014-03-07T05:58:40Z                            |
| OS-DCF:diskConfig                    | MANUAL                                          |
| metadata                             | {}                                              |
| os-extended-volumes:volumes_attached | []                                              |
| accessIPv4                           |                                                 |
| accessIPv6                           |                                                 |
| progress                             | 0                                               |
| OS-EXT-STS:power_state               | 0                                               |
| OS-EXT-AZ:availability_zone          | nova                                            |
| config_drive                         |                                                 |
+————————————–+————————————————-+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+————+——–+———————-+————-+—————————–+
| ID                                   | Name       | Status | Task State           | Power State | Networks                    |
+————————————–+————+——–+———————-+————-+—————————–+
| bfcb2120-942f-4d3f-a173-93f6076a4be8 | UbuntuRS01 | ACTIVE | None                 | Running     | int=10.0.0.4, 192.168.1.102 |
| 62ff1641-2c96-470f-9147-9272d68d2e5c | VF20GLX    | BUILD  | block_device_mapping | NOSTATE     |                             |
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01   | ACTIVE | None                 | Running     | int=10.0.0.2, 192.168.1.101 |
+————————————–+————+——–+———————-+————-+—————————–+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+————+——–+———————-+————-+—————————–+
| ID                                   | Name       | Status | Task State           | Power State | Networks                    |
+————————————–+————+——–+———————-+————-+—————————–+
| bfcb2120-942f-4d3f-a173-93f6076a4be8 | UbuntuRS01 | ACTIVE | None                 | Running     | int=10.0.0.4, 192.168.1.102 |
| 62ff1641-2c96-470f-9147-9272d68d2e5c | VF20GLX    | BUILD  | block_device_mapping | NOSTATE     |                             |
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01   | ACTIVE | None                 | Running     | int=10.0.0.2, 192.168.1.101 |
+————————————–+————+——–+———————-+————-+—————————–+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+————+——–+———————-+————-+—————————–+
| ID                                   | Name       | Status | Task State           | Power State | Networks                    |
+————————————–+————+——–+———————-+————-+—————————–+
| bfcb2120-942f-4d3f-a173-93f6076a4be8 | UbuntuRS01 | ACTIVE | None                 | Running     | int=10.0.0.4, 192.168.1.102 |
| 62ff1641-2c96-470f-9147-9272d68d2e5c | VF20GLX    | BUILD  | block_device_mapping | NOSTATE     |                             |
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01   | ACTIVE | None                 | Running     | int=10.0.0.2, 192.168.1.101 |
+————————————–+————+——–+———————-+————-+—————————–+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+————+——–+———————-+————-+—————————–+
| ID                                   | Name       | Status | Task State           | Power State | Networks                    |
+————————————–+————+——–+———————-+————-+—————————–+
| bfcb2120-942f-4d3f-a173-93f6076a4be8 | UbuntuRS01 | ACTIVE | None                 | Running     | int=10.0.0.4, 192.168.1.102 |
| 62ff1641-2c96-470f-9147-9272d68d2e5c | VF20GLX    | BUILD  | block_device_mapping | NOSTATE     |                             |
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01   | ACTIVE | None                 | Running     | int=10.0.0.2, 192.168.1.101 |
+————————————–+————+——–+———————-+————-+—————————–+

[root@dallas1 ~(keystone_boris)]$ nova list

+————————————–+————+——–+————+————-+—————————–+

| ID                                   | Name       | Status | Task State | Power State | Networks                    |

+————————————–+————+——–+————+————-+—————————–+
| bfcb2120-942f-4d3f-a173-93f6076a4be8 | UbuntuRS01 | ACTIVE | None       | Running     | int=10.0.0.4, 192.168.1.102 |
| 62ff1641-2c96-470f-9147-9272d68d2e5c | VF20GLX    | ACTIVE | None       | Running     | int=10.0.0.5                |
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01   | ACTIVE | None       | Running     | int=10.0.0.2, 192.168.1.101 |
+————————————–+————+——–+————+————-+—————————–+

[root@dallas1 ~(keystone_boris)]$ neutron floatingip-create extCreated a new floatingip:

+———————+————————————–+
| Field               | Value                                |
———————+————————————–+

| fixed_ip_address    |                                      |
| floating_ip_address | 192.168.1.103                        |
| floating_network_id | 0ed406bf-3552-4036-9006-440f3e69618e |
| id                  | 3fb87bb2-f485-4f1c-b2b7-7c5d90588d27 |
| port_id             |                                      |
| router_id           |                                      |
| tenant_id           | e896be65e94a4893b870bc29ba86d7eb     |
+———————+————————————–+

[root@dallas1 ~(keystone_boris)]$ neutron port-list –device-id 62ff1641-2c96-470f-9147-9272d68d2e5c

+————————————–+——+——————-+———————————————————————————+
| id                                   | name | mac_address       | fixed_ips                                                                       |
+————————————–+——+——————-+———————————————————————————+
| 0845ad30-4d2c-487d-8847-2b6e3e8b9b9d |      | fa:16:3e:2c:84:62 | {“subnet_id”: “2e838119-3e2e-46e8-b7cc-6d00975046f2”, “ip_address”: “10.0.0.5”} |
+————————————–+——+——————-+———————————————————————————+

[root@dallas1 ~(keystone_boris)]$ neutron floatingip-associate 3fb87bb2-f485-4f1c-b2b7-7c5d90588d27 0845ad30-4d2c-487d-8847-2b6e3e8b9b9d

Associated floatingip 3fb87bb2-f485-4f1c-b2b7-7c5d90588d27

[root@dallas1 ~(keystone_boris)]$ ping 192.168.1.103

PING 192.168.1.103 (192.168.1.103) 56(84) bytes of data.
64 bytes from 192.168.1.103: icmp_seq=1 ttl=63 time=4.08 ms
64 bytes from 192.168.1.103: icmp_seq=2 ttl=63 time=1.59 ms
64 bytes from 192.168.1.103: icmp_seq=3 ttl=63 time=1.22 ms
64 bytes from 192.168.1.103: icmp_seq=4 ttl=63 time=1.49 ms
64 bytes from 192.168.1.103: icmp_seq=5 ttl=63 time=1.11 ms
64 bytes from 192.168.1.103: icmp_seq=6 ttl=63 time=0.980 ms
64 bytes from 192.168.1.103: icmp_seq=7 ttl=63 time=6.71 ms
^C

— 192.168.1.103 ping statistics —

7 packets transmitted, 7 received, 0% packet loss, time 6007ms

rtt min/avg/max/mdev = 0.980/2.458/6.711/1.996 ms

[root@dallas1 ~(keystone_boris)]$ nova list
+————————————–+————+——–+————+————-+—————————–+
| ID                                   | Name       | Status | Task State | Power State | Networks                    |
+————————————–+————+——–+————+————-+—————————–+
| bfcb2120-942f-4d3f-a173-93f6076a4be8 | UbuntuRS01 | ACTIVE | None       | Running     | int=10.0.0.4, 192.168.1.102 |
| 62ff1641-2c96-470f-9147-9272d68d2e5c | VF20GLX    | ACTIVE | None       | Running     | int=10.0.0.5, 192.168.1.103 |
| 770e33f7-7aab-49f1-95ca-3cf343f744ef | VF20RS01   | ACTIVE | None       | Running     | int=10.0.0.2, 192.168.1.101 |
+————————————–+————+——–+————+————-+—————————–+

[root@dallas1 ~(keystone_admin)]$  vgdisplay
….

— Volume group —
VG Name               cinder-volumes
System ID
Format                lvm2
Metadata Areas        1
Metadata Sequence No  66
VG Access             read/write
VG Status             resizable
MAX LV                0
Cur LV                3
Open LV               3
Max PV                0
Cur PV                1
Act PV                1
VG Size               20.00 GiB
PE Size               4.00 MiB
Total PE              5119
Alloc PE / Size       3840 / 15.00 GiB
Free  PE / Size       1279 / 5.00 GiB
VG UUID               M11ikP-i6sd-ftwG-3XIH-F9wt-cSHe-m9kCtU


….

Three volumes have been created each one 5 GB

 [root@dallas1 ~(keystone_admin)]$ losetup -a

/dev/loop0: [64768]:14 (/cinder-volumes)

Same messages in log , but now it works

2014-03-03 23:50:19.851 6729 WARNING nova.virt.libvirt.driver [req-98443a14-3c3f-49f5-bf21-c183531a1778 df4a984ce2f24848a6b84aaa99e296f1 e896be65e94a4893b870bc29ba86d7eb] [instance: baffc298-3b45-4e01-8891-1e6510e3dc0e] File injection into a boot from volume instance is not supported

2014-03-03 23:50:21.439 6729 WARNING nova.virt.libvirt.volume [req-98443a14-3c3f-49f5-bf21-c183531a1778 df4a984ce2f24848a6b84aaa99e296f1 e896be65e94a4893b870bc29ba86d7eb] ISCSI volume not yet found at: vda. Will rescan &amp; retry.  Try number: 0

2014-03-03 23:50:21.518 6729 WARNING nova.virt.libvirt.vif [req-98443a14-3c3f-49f5-bf21-c183531a1778 df4a984ce2f24848a6b84aaa99e296f1 e896be65e94a4893b870bc29ba86d7eb] Deprecated: The LibvirtHybridOVSBridgeDriver VIF driver is now deprecated and will be removed in the next release. Please use the LibvirtGenericVIFDriver VIF driver, together with a network plugin that reports the ‘vif_type’ attribute

2014-03-03 23:52:12.020 6729 WARNING nova.virt.libvirt.driver [req-1ea0e44e-b651-4f79-9d83-1ba872534440 df4a984ce2f24848a6b84aaa99e296f1 e896be65e94a4893b870bc29ba86d7eb] [instance: a64a7a24-ff8a-4d01-aa59-80393a4213df] File injection into a boot from volume instance is not supported

2014-03-03 23:52:13.629 6729 WARNING nova.virt.libvirt.volume [req-1ea0e44e-b651-4f79-9d83-1ba872534440 df4a984ce2f24848a6b84aaa99e296f1 e896be65e94a4893b870bc29ba86d7eb] ISCSI volume not yet found at: vda. Will rescan &amp; retry.  Try number: 0

2014-03-03 23:52:13.709 6729 WARNING nova.virt.libvirt.vif [req-1ea0e44e-b651-4f79-9d83-1ba872534440 df4a984ce2f24848a6b84aaa99e296f1 e896be65e94a4893b870bc29ba86d7eb] Deprecated: The LibvirtHybridOVSBridgeDriver VIF driver is now deprecated and will be removed in the next release. Please use the LibvirtGenericVIFDriver VIF driver, together with a network plugin that reports the ‘vif_type’ attribute

2014-03-03 23:56:11.127 6729 WARNING nova.compute.manager [-] Found 4 in the database and 1 on the hypervisor.