"Setup on Fedora 20 " - Views: 1,784 · Hits: 1,784 - Type: Public

Setup itself :-

[root@dfw02 ]# yum -y install mariadb-server
[root@dfw02 ]# chown mysql:mysql /var/log/mariadb/*.log
[root@dfw02 ]# systemctl enable mariadb
[root@dfw02 ]# systemctl start mariadb
[root@dfw02 ]# mysql -u root 

[root@dfw02 ]#  mysql -u root

MariaDB [(none)]> UPDATE mysql.user SET Password = PASSWORD('password')
    ->     WHERE User = 'root';
MariaDB [(none)]>  FLUSH PRIVILEGES;

MariaDB [(none)]> SELECT User, Host, Password FROM mysql.user;
+----------+-------------------+-------------------------------------------+
| User     | Host              | Password                                  |
+----------+-------------------+-------------------------------------------+
| root     | localhost         | *E0DC09146F1310B49A34199B04274A9EED6F9EC7 |
| root     | dfw02.localdomain | *E0DC09146F1310B49A34199B04274A9EED6F9EC7 |
| root     | 127.0.0.1         | *E0DC09146F1310B49A34199B04274A9EED6F9EC7 |
| root     | ::1               | *E0DC09146F1310B49A34199B04274A9EED6F9EC7 |
+----------+-------------------+-------------------------------------------+


== Initial ==

On controller node (192.168.1.127):

  $ yum install openstack-keystone openstack-utils dnsmasq-utils -y
  $ yum install openstack-glance openstack-cinder openstack-neutron \
    openstack-neutron-openvswitch -y
  $ yum install openstack-nova -y

On compute (192.168.1.137):

  $ yum install openstack-neutron openstack-neutron-openvswitch \
   openstack-nova  bridge-utils -y

For iptables (on both Controller & Compute nodes):

  $ service iptables save [OK]
  $ systemctl stop firewalld
  $ systemctl disable firewalld
  $ yum install iptables-services
  # Create this below file, otherwise starting iptables will fail
  $ systemctl enable iptables 
  $ systemctl start iptables


== Controller ==

Keystone
---------

  $ openstack-db --init --service keystone

  $ export SERVICE_TOKEN=$(openssl rand -hex 10)

  $ echo $SERVICE_TOKEN
  38b5a7143cdfd7063f42

  $ export SERVICE_ENDPOINT=http://192.168.1.127:35357/v2.0

  $ echo $SERVICE_TOKEN > /tmp/ks_admin_token

  $ openstack-config --set /etc/keystone/keystone.conf \
    DEFAULT admin_token $SERVICE_TOKEN

  $ keystone-manage pki_setup --keystone-user keystone \
    --keystone-group keystone

  $ chown -R keystone:keystone /etc/keystone/ssl

  $ for i in start enable status; \
    do systemctl $i openstack-keystone; done

  $ keystone service-create --name keystone --type identity \
    --description "Keystone Identity Service"
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  | description |    Keystone Identity Service     |
  |      id     | b608338394c8443f88d1bf22bd2029de |
  |     name    |             keystone             |
  |     type    |             identity             |
  +-------------+----------------------------------+

  $ keystone endpoint-create --service_id b608338394c8443f88d1bf22bd2029de \
        --publicurl 'http://192.168.1.127:5000/v2.0' \
        --adminurl 'http://192.168.1.127:35357/v2.0' \
        --internalurl 'http://192.168.1.127:5000/v2.0'
  +-------------+-----------------------------------+
  |   Property  |               Value               |
  +-------------+-----------------------------------+
  |   adminurl  | http://192.168.1.127:35357/v2.0 |
  |      id     |  b6ec04d22c4e4dca867d9f0eb5908fca |
  | internalurl |  http://192.168.1.127:5000/v2.0 |
  |  publicurl  |  http://192.168.1.127:5000/v2.0 |
  |    region   |             regionOne             |
  |  service_id |  b608338394c8443f88d1bf22bd2029de |
  +-------------+-----------------------------------+
 
  $ keystone user-create --name admin --pass fedora
  +----------+----------------------------------+
  | Property |              Value               |
  +----------+----------------------------------+
  |  email   |                                  |
  | enabled  |               True               |
  |    id    | 076818c611d443238bc6ca45ad0021ac |
  |   name   |              admin               |
  +----------+----------------------------------+
  
  $ keystone role-create --name admin
  +----------+----------------------------------+
  | Property |              Value               |
  +----------+----------------------------------+
  |    id    | 90950360ac844ff598b9a75e269afbe1 |
  |   name   |              admin               |
  +----------+----------------------------------+
  
  $ keystone tenant-create --name admin
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  | description |                                  |
  |   enabled   |               True               |
  |      id     | 1580cadb173c475eb9c381e78f13b109 |
  |     name    |              admin               |
  +-------------+----------------------------------+

  $ keystone user-role-add --user admin \
    --role admin --tenant admin

  $ cat >> ~/keystonerc_admin <<EOF
  export OS_USERNAME=admin
  export OS_TENANT_NAME=admin
  export OS_PASSWORD=fedora
  export OS_AUTH_URL=http://192.168.1.127:35357/v2.0/
  export PS1='[\u@\h \W(keystone_admin)]\$ '
  EOF

  $ . keystonerc_admin

  $ keystone user-create --name kashyap --pass fedora
  WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
  +----------+----------------------------------+
  | Property |              Value               |
  +----------+----------------------------------+
  |  email   |                                  |
  | enabled  |               True               |
  |    id    | 1c18b2231aa34dbe9c31cd390aaedb42 |
  |   name   |             kashyap              |
  +----------+----------------------------------+
  
  $ keystone role-create --name user
  WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
  +----------+----------------------------------+
  | Property |              Value               |
  +----------+----------------------------------+
  |    id    | 6fac6b1cd0c24ba0a949d12acc757311 |
  |   name   |               user               |
  +----------+----------------------------------+
  
  $ keystone tenant-create --name ostenant
  WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  | description |                                  |
  |   enabled   |               True               |
  |      id     | 2c845a6ad20e45ccb0b045cee27a9661 |
  |     name    |             ostenant             |
  +-------------+----------------------------------+

  $ keystone user-role-add --user kashyap \
  --role user --tenant ostenant
  WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).

  $ cat >> ~/keystonerc_kashyap <<EOF
  export OS_USERNAME=kashyap
  export OS_TENANT_NAME=ostenant
  export OS_PASSWORD=fedora
  export OS_AUTH_URL=http://192.168.1.127:35357/v2.0/
  export PS1='[\u@\h \W(keystone_kashyap)]\$ '
  EOF

  # Logout and ssh into the controller node again
  $ . keystonerc_kashyap
  $ keystone user-list
  $ . keystonerc_admin
  $ keystone user-list

  # Disable qpid authentication
  $ yum install qpid-cpp-server -y
  $ sed -i 's/auth=.*/auth=no/g' /etc/qpidd.conf
  $ grep auth /etc/qpidd.conf 
  auth=no
  # Start and enable qpidd.service
  $ for i in start enable status; \
    do systemctl $i qpidd; done

Glance
------

  $ openstack-db --init --service glance

  $ keystone tenant-create --name services
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  | description |                                  |
  |   enabled   |               True               |
  |      id     | a04e8158c9974f2699185994791e78c1 |
  |     name    |             services             |
  +-------------+----------------------------------+

  $ keystone user-create --name glance --pass fedora
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  | description |                                  |
  |   enabled   |               True               |
  |      id     | a04e8158c9974f2699185994791e78c1 |
  |     name    |             services             |
  +-------------+----------------------------------+

  $ keystone user-role-add --user glance \
    --role admin --tenant services


  # Update glance-api.conf
  $ openstack-config --set /etc/glance/glance-api.conf \
    paste_deploy flavor keystone
  $ openstack-config --set /etc/glance/glance-api.conf \
    keystone_authtoken admin_tenant_name services
  $ openstack-config --set /etc/glance/glance-api.conf \
    keystone_authtoken admin_user glance
  $ openstack-config --set /etc/glance/glance-api.conf \
    keystone_authtoken admin_password fedora

  # Update glance-registry.conf
  $ openstack-config --set /etc/glance/glance-registry.conf \
    paste_deploy flavor keystone
  $ openstack-config --set /etc/glance/glance-registry.conf \
    keystone_authtoken admin_tenant_name services
  $ openstack-config --set /etc/glance/glance-registry.conf \
    keystone_authtoken admin_user glance
  $ openstack-config --set /etc/glance/glance-registry.conf \
    keystone_authtoken admin_password fedora

  # Start glance-registry service
  $ for i in start enable status; \
  do systemctl $i openstack-glance-registry; done

  # Start glance-api service
  $ for i in start enable status; \
  do systemctl $i openstack-glance-api; done


  $ keystone service-create --name glance --type image --description \
    "Glance Image Service"
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  | description |       Glance Image Service       |
  |      id     | 12462e8dbc924b6595a671dd8c974418 |
  |     name    |              glance              |
  |     type    |              image               |
  +-------------+----------------------------------+

  $ keystone endpoint-create --service_id 12462e8dbc924b6595a671dd8c974418 \
    --publicurl http://192.168.1.127:9292 \
    --adminurl http://192.168.1.127:9292 \
    --internalurl http://192.168.1.127:9292
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  |   adminurl  |   http://192.168.1.127:9292    |
  |      id     | 4fe449e667bb4c9b8b8073124e5bff7b |
  | internalurl |   http://192.168.1.127:9292    |
  |  publicurl  |   http://192.168.1.127:9292    |
  |    region   |            regionOne             |
  |  service_id | 12462e8dbc924b6595a671dd8c974418 |
  +-------------+----------------------------------+

  $ glance index


Cinder
------

  $ keystone user-create --name cinder --pass fedora
  +----------+----------------------------------+
  | Property |              Value               |
  +----------+----------------------------------+
  |  email   |                                  |
  | enabled  |               True               |
  |    id    | c3b5ab209788481e9fc5e293f1964566 |
  |   name   |              cinder              |
  +----------+----------------------------------+

  $ keystone user-role-add --user cinder \
    --role admin --tenant services

  $ openstack-db --init --service cinder

  # Update cinder.conf with Keystone creds
  $ openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
  $ openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_tenant_name services
  $ openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_user cinder
  $ openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_password fedora

  $ openstack-config --set /etc/cinder/cinder.conf DEFAULT qpid_hostname 192.168.1.127
  $ openstack-config --set /etc/cinder/cinder.conf DEFAULT qpid_port 5672

  # Storage for Cinder
  $ dd if=/dev/zero of=/cinder-volumes bs=1 count=0 seek=5G
  $ losetup -fv /cinder-volumes
  $ losetup -l
  $ vgcreate cinder-volumes /dev/loop0
  $ vgdisplay cinder-volumes

  # Update ISCSI conf and restart tgtd
  $ echo "include /etc/cinder/volumes/*" >> /etc/tgt/targets.conf

  $ for i in enable start status; \
  do systemctl $i tgtd.service; done


  # Start Cinder services
  $ for i in enable start status; \
    do systemctl $i openstack-cinder-api; done
  $ for i in enable start status; \
    do systemctl $i openstack-cinder-scheduler; done
  $ for i in enable start status; \
    do systemctl $i openstack-cinder-volume; done

  # Create Keystone entries
  $ keystone service-create --name cinder \
    --type volume --description "Cinder Volume Service"
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  | description |      Cinder Volume Service       |
  |      id     | 27cec0d0d0504080a3142fba28b348a8 |
  |     name    |              cinder              |
  |     type    |              volume              |
  +-------------+----------------------------------+

  $ keystone endpoint-create --service_id 27cec0d0d0504080a3142fba28b348a8 \
    --publicurl "http://192.168.1.127:8776/v1/\$(tenant_id)s" \
    --adminurl "http://192.168.1.127:8776/v1/\$(tenant_id)s" \
    --internalurl "http://192.168.1.127:8776/v1/\$(tenant_id)s"
  +-------------+----------------------------------------------+
  |   Property  |                    Value                     |
  +-------------+----------------------------------------------+
  |   adminurl  | http://192.168.1.127:8776/v1/$(tenant_id)s |
  |      id     |       bff693cc42644ad6b88669e66a7ca821       |
  | internalurl | http://192.168.1.127:8776/v1/$(tenant_id)s |
  |  publicurl  | http://192.168.1.127:8776/v1/$(tenant_id)s |
  |    region   |                  regionOne                   |
  |  service_id |       27cec0d0d0504080a3142fba28b348a8       |
  +-------------+----------------------------------------------+

  # Test
  $ cinder create --display-name testvol1 1
  $ cinder list
  $ lvs


Neutron 
-------

Neutron on Controller node
~~~~~~~~~~~~~~~~~~~~~~~~~~

Create the database entries

  $ mysql -u root -p
  Enter password: 
  Welcome to the MariaDB monitor.  Commands end with ; or \g.
  Your MariaDB connection id is 16
  Server version: 5.5.33a-MariaDB MariaDB Server
  
  Copyright (c) 2000, 2013, Oracle, Monty Program Ab and others.
  
  Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
  
  MariaDB [(none)]> CREATE DATABASE neutron;
  Query OK, 1 row affected (0.00 sec)
  
  MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
      -> IDENTIFIED BY 'NEUTRON_DBPASS';
  Query OK, 0 rows affected (0.00 sec)
  
  MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
      -> IDENTIFIED BY 'NEUTRON_DBPASS';
  Query OK, 0 rows affected (0.00 sec)

  MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%'
  IDENTIFIED BY 'ovs_neutron';
  Query OK, 0 rows affected (0.00 sec)

  
  MariaDB [(none)]> quit
  Bye


Start OVS service, and create an integration bridge

  $ systemctl start openvswitch.service
  $ systemctl enable openvswitch.service

  $ systemctl enable neutron-ovs-cleanup.service

  $ ovs-vsctl add-br br-int

  $ cat  /etc/sysconfig/network-scripts/ifcfg-br-ex
  DEVICE="br-ex"
  BOOTPROTO="static"
  IPADDR="192.168.1.135"
  NETMASK="255.255.255.0"
  DNS1="83.221.202.254"
  BROADCAST="192.168.1.255"
  GATEWAY="192.168.1.1"
  NM_CONTROLLED="no"
  DEFROUTE="yes"
  IPV4_FAILURE_FATAL="yes"
  IPV6INIT=no
  ONBOOT="yes"
  TYPE="OVSBridge"
  DEVICETYPE="ovs"

 $ cat  /etc/sysconfig/network-scripts/ifcfg-p37p1
  NAME="p37p1"
  HWADDR=90:E6:BA:2D:11:EB
  ONBOOT="yes"
  TYPE="OVSPort"
  DEVICETYPE="ovs"
  OVS_BRIDGE=br-ex
  NM_CONTROLLED=no
  IPV6INIT=no

  $ systemctl stop NetworkManager
  $ systemctl disable NetworkManager
  $ systemctl restart network
  $ systemct status network   # must be running

 [root@dfw02 ~]# route -n
 Kernel IP routing table
 Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
 0.0.0.0         192.168.1.1     0.0.0.0         UG    0      0        0 br-ex
 169.254.0.0     0.0.0.0         255.255.0.0     U     1003   0        0 p37p1
 169.254.0.0     0.0.0.0         255.255.0.0     U     1010   0        0 br-ex
 192.168.1.0     0.0.0.0         255.255.255.0   U     0      0        0 br-ex
 192.168.122.0   0.0.0.0         255.255.255.0   U     0      0        0 virbr0

   
  $ neutron-server-setup
  Please select a plugin from: linuxbridge openvswitch
  Choice:
  openvswitch
  Quantum plugin: openvswitch
  Plugin: openvswitch => Database: ovs_neutron
  Redirecting to /bin/systemctl status  mysqld.service
  Please enter the password for the 'root' MySQL user:
  Verified connectivity to MySQL.
  Would you like to update the nova configuration files? (y/n):
  y
  Complete!


Update /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini to use
GRE tunneling:

  $ grep "\[ovs\]" -A7  ovs_neutron_plugin.ini
  [...]
  [ovs]
  tenant_network_type = gre
  tunnel_id_ranges = 1:1000
  enable_tunneling = True
  integration_bridge = br-int
  tunnel_bridge = br-tun
  local_ip = 192.168.1.127


Config neutron.conf

  $ openstack-config --set /etc/neutron/neutron.conf DEFAULT \
    core_plugin \
    neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2

  $ openstack-config --set /etc/neutron/neutron.conf DEFAULT \
    ovs_use_veth True

  $ openstack-config --set /etc/neutron/neutron.conf DEFAULT \
    allow_overlapping_ips True

  $ openstack-config --set /etc/neutron/neutron.conf DEFAULT \
    rpc_backend neutron.openstack.common.rpc.impl_qpid
    
  $ openstack-config --set /etc/neutron/neutron.conf DEFAULT \
    qpid_hostname 192.168.1.127
  
  $ openstack-config --set /etc/neutron/neutron.conf DEFAULT \
    qpid_port 5672


Make Neutron Keystone entries
  
  $ openstack-config --set /etc/neutron/neutron.conf \
    DEFAULT auth_strategy keystone
  $ openstack-config --set /etc/neutron/neutron.conf \
    keystone_authtoken auth_host 192.168.1.127
  $ openstack-config --set /etc/neutron/neutron.conf \
    keystone_authtoken admin_tenant_name services
  $ openstack-config --set /etc/neutron/neutron.conf \
    keystone_authtoken admin_user neutron
  $ openstack-config --set /etc/neutron/neutron.conf \
    keystone_authtoken admin_password fedora
  $ openstack-config --set /etc/neutron/neutron.conf \
    AGENT root_helper sudo neutron-rootwrap /etc/neutron/rootwrap.conf

Set the firewall driver

  $ openstack-config --set \
    /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini \
    SECURITYGROUP firewall_driver \
    neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

Ensure to have sql connection info noted clearly (Note, the syntax below
is user:password -- neutron:fedora)

  $ grep sql_connection plugin.ini 
    sql_connection =
    mysql://root:password@vm01-controller/ovs_neutron

Configure DHCP agent to use OVS

  $ openstack-config --set /etc/neutron/dhcp_agent.ini \
    DEFAULT interface_driver \
    neutron.agent.linux.interface.OVSInterfaceDriver

Handle routes

  $ openstack-config --set /etc/neutron/dhcp_agent.ini \
    DEFAULT handle_internal_only_routers TRUE

Handle external bridge network

  $ openstack-config --set /etc/neutron/dhcp_agent.ini \
    DEFAULT external_network_bridge br-ex

Use veth

  $ openstack-config --set /etc/neutron/dhcp_agent.ini \
    DEFAULT ovs_use_veth True

Use network namespaces

   $ openstack-config --set /etc/neutron/dhcp_agent.ini \
     DEFAULT use_namespaces True


DHCP agent configuration

  $ cat dhcp_agent.ini | grep -v ^$ | grep -v ^#
  [DEFAULT]
  interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
  handle_internal_only_routers = TRUE
  external_network_bridge = br-ex
  ovs_use_veth = True
  use_namespaces = True
  
L3 agent configuration   

  $ cat l3_agent.ini | grep -v ^$ | grep -v ^#
  [DEFAULT]
  interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
  handle_internal_only_routers = TRUE
  ovs_use_veth = True
  use_namespaces = True
  metadata_ip = 192.168.1.127
  metadata_port = 8700

Configure Identity records for Neutron

  $ keystone user-create --name=neutron --pass=fedora
  +----------+----------------------------------+
  | Property |              Value               |
  +----------+----------------------------------+
  |  email   |                                  |
  | enabled  |               True               |
  |    id    | d18d9504e4324e619b4c9482bdbbcf88 |
  |   name   |             neutron              |
  +----------+----------------------------------+
  
  $ keystone user-role-add --user neutron --role admin \
    --tenant services
  
  $ keystone service-create --name neutron --type network \
    --description "Neutron Network Service"
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  | description |     Neutron Network Service      |
  |      id     | 7aa4d0c833db478bad4efb8c0b1e0c3a |
  |     name    |             neutron              |
  |     type    |             network              |
  +-------------+----------------------------------+
  
  $ keystone endpoint-create --service_id 7aa4d0c833db478bad4efb8c0b1e0c3a \
    --publicurl "http://192.168.1.127:9696" \
    --adminurl "http://192.168.1.127:9696" \
    --internalurl "http://192.168.1.127:9696"
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  |   adminurl  |   http://192.168.1.127:9696    |
  |      id     | d09647c5ed774a258e73f8181fbc5e7f |
  | internalurl |   http://192.168.1.127:9696    |
  |  publicurl  |   http://192.168.1.127:9696    |
  |    region   |            regionOne             |
  |  service_id | 7aa4d0c833db478bad4efb8c0b1e0c3a |
  +-------------+----------------------------------+


Start, enable, Neutron services

  $ for i in start enable status; \
    do systemctl $i openvswitch; done
  $ for i in start enable status; \
    do systemctl $i neutron-server; done
  $ for i in start enable status; \
    do systemctl $i neutron-l3-agent; done
  $ for i in start enable status; \
    do systemctl $i neutron-dhcp-agent; done
  $ for i in start enable status; \
    do systemctl $i neutron-openvswitch-agent; done
  $ for i in start enable status; \
    do systemctl $i neutron-ovs-cleanup; done

Display OVS bridges and ports

  $ ovs-vsctl show
  6f5d0e33-7013-4816-bc97-29af9abe8309
      Bridge br-int
          Port br-int
              Interface br-int
                  type: internal
          Port patch-tun
              Interface patch-tun
                  type: patch
                  options: {peer=patch-int}
      Bridge br-tun
          Port patch-int
              Interface patch-int
                  type: patch
                  options: {peer=patch-tun}
          Port br-tun
              Interface br-tun
                  type: internal
      Bridge br-ex
          Port "eth0"
              Interface "eth0"
          Port br-ex
              Interface br-ex
                  type: internal
      ovs_version: "2.0.0"


Neutron on Compute node
~~~~~~~~~~~~~~~~~~~~~~~

Disable NM and start network

  $ systemctl stop NetworkManager
  $ systemctl disable NetworkManager
  $ systemctl enable network  (or $ chkconfig network on)
  $ systemctl restart network
  $ systemctl status network

Start OpenvSwitch

  $ for i in start enable status; \
    do systemctl $i openvswitch; done


Create the integration bridge

  $ ovs-vsctl add-br br-int 

Copy the Neutron configs from Control node:

  $ scp root@ostack-compute:/etc/neutron/neutron.conf \
    /etc/neutron/neutron.conf
  $ scp \
    root@ostack-compute:/etc/quantum/plugins/openvswitch/ovs_neutron_plugin.ini \
    /etc/quantum/plugins/openvswitch/ovs_neutron_plugin.ini

Start Neutron services

  $ for i in start enable status; \
    do systemctl $i neutron-openvswitch-agent; done
  $ for i in start enable status; \
    do systemctl $i neutron-ovs-cleanup; done

Nova on Controller node
~~~~~~~~~~~~~~~~~~~~~~~

  $ yum install openstack-nova -y
  $ openstack-db --init --service nova

  $ keystone user-create --name nova --pass fedora
  +----------+----------------------------------+
  | Property |              Value               |
  +----------+----------------------------------+
  |  email   |                                  |
  | enabled  |               True               |
  |    id    | 3c2c8fe89d544617b69414448258ad4f |
  |   name   |               nova               |
  +----------+----------------------------------+

  $ keystone user-role-add --user nova \
    --role admin --tenant services

Populate nova.conf with:

  $ cat nova.conf | grep -v ^$ | grep -v ^#
  [DEFAULT]
  logdir = /var/log/nova
  state_path = /var/lib/nova
  lock_path = /var/lib/nova/tmp
  volumes_dir = /etc/nova/volumes
  dhcpbridge = /usr/bin/nova-dhcpbridge
  dhcpbridge_flagfile = /etc/nova/nova.conf
  force_dhcp_release = True
  injected_network_template = /usr/share/nova/interfaces.template
  libvirt_nonblocking = True
  libvirt_inject_partition = -1
  libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
  iscsi_helper = tgtadm
  sql_connection = mysql://nova:[email protected]/nova
  compute_driver = libvirt.LibvirtDriver
  libvirt_type=qemu
  rpc_backend = nova.openstack.common.rpc.impl_qpid
  rootwrap_config = /etc/nova/rootwrap.conf
  auth_strategy = keystone
  firewall_driver=nova.virt.firewall.NoopFirewallDriver
  volume_api_class = nova.volume.cinder.API
  enabled_apis = ec2,osapi_compute,metadata
  my_ip=192.168.1.127
  qpid_hostname=192.168.1.127
  qpid_port=5672
  glance_host=192.168.1.127
  network_api_class = nova.network.neutronv2.api.API
  neutron_admin_username = neutron
  neutron_admin_password = fedora
  neutron_admin_auth_url = http://192.168.1.127:35357/v2.0/
  neutron_auth_strategy = keystone
  neutron_admin_tenant_name = services
  neutron_url = http://192.168.1.127:9696/
  security_group_api = neutron
  [keystone_authtoken]
  admin_tenant_name = services
  admin_user = nova
  admin_password = fedora
  auth_host = 192.168.1.127
  auth_port = 35357
  auth_protocol = http
  signing_dirname = /tmp/keystone-signing-nova

And ensure it has correct permissions

  $ chown root:nova /etc/nova/nova.conf
  $ restorecon /etc/nova/nova.conf
  $ chcon -u system_u -r object_r -t etc_t /etc/nova/nova.conf
  $ chmod 640 /etc/nova/nova.conf

Set table mysql.user in proper status

shell> mysql -u root -p
mysql> insert into mysql.user (User,Host,Password) values ('nova','dfw02.localdomain',' ');
mysql> UPDATE mysql.user SET Password = PASSWORD('nova')
    ->     WHERE User = 'nova';
mysql> FLUSH PRIVILEGES;
  
Start, enable nova-{api,scheduler,conductor} services

  $ for i in start enable status; \
    do systemctl $i openstack-nova-api; done

  $ for i in start enable status; \
    do systemctl $i openstack-nova-scheduler; done

  $ for i in start enable status; \
    do systemctl $i openstack-nova-conductor; done

****************************************************************
Create Nova service endpoints on Controller node
****************************************************************

  $ keystone service-create --name nova --type compute \
    --description "Nova Compute Service"
  +-------------+----------------------------------+
  |   Property  |              Value               |
  +-------------+----------------------------------+
  | description |       Nova Compute Service       |
  |      id     | 8a6bce14fa914387b709e126cddc2a40 |
  |     name    |               nova               |
  |     type    |             compute              |
  +-------------+----------------------------------+

  $ keystone endpoint-create --service_id \
    8a6bce14fa914387b709e126cddc2a40 \
    --publicurl "http://192.168.1.127:8774/v1.1/\$(tenant_id)s" \
    --adminurl "http://192.168.1.127:8774/v1.1/\$(tenant_id)s" \
    --internalurl "http://192.168.1.127:8774/v1.1/\$(tenant_id)s"
  +-------------+------------------------------------------------+
  |   Property  |                     Value                      |
  +-------------+------------------------------------------------+
  |   adminurl  | http://192.168.1.127:8774/v1.1/$(tenant_id)s |
  |      id     |        54f567c6beee49ce97aa06b5637cfa07        |
  | internalurl | http://192.168.1.127:8774/v1.1/$(tenant_id)s |
  |  publicurl  | http://192.168.1.127:8774/v1.1/$(tenant_id)s |
  |    region   |                   regionOne                    |
  |  service_id |        8a6bce14fa914387b709e126cddc2a40        |
  +-------------+------------------------------------------------+

  $ nova-manage service list
  Binary           Host                                 Zone             Status     State Updated_At
  nova-scheduler   vm01-controller                      internal         enabled    :-)   2013-11-08 16:01:13
  nova-conductor   vm01-controller                      internal         enabled    :-)   2013-11-08 16:01:09
  nova-compute     vm02-compute                         nova             enabled    :-)   2013-11-08 16:01:06


****************************************************************
Configure metadata service on Controller node :-
****************************************************************

Update Neutron metadat_agent.ini to communicate with Keystone

  $ openstack-config --set /etc/neutron/metadata_agent.ini \ 
    DEFAULT auth_url http://192.168.1.127:35357/v2.0/
  $ openstack-config --set /etc/neutron/metadata_agent.ini \
    DEFAULT auth_region regionOne
  $ openstack-config --set /etc/neutron/metadata_agent.ini \
    DEFAULT admin_tenant_name services
  $ openstack-config --set /etc/neutron/metadata_agent.ini \
    DEFAULT admin_user neutron
  $ openstack-config --set /etc/neutron/metadata_agent.ini \
    DEFAULT admin_password fedora
  
Update Neutron metadat_agent.ini to let it know to connect to Nova

  $ openstack-config --set /etc/neutron/metadata_agent.ini \
    DEFAULT nova_metadata_ip 192.168.1.127
  $ openstack-config --set /etc/neutron/metadata_agent.ini \
    DEFAULT nova_metadata_port 8700
  $ openstack-config --set /etc/neutron/metadata_agent.ini \
    DEFAULT metadata_proxy_shared_secret fedora
  
 Update Neutron l3_agent.ini (L3 agent sets up the routing).

  $  openstack-config --set /etc/neutron/l3_agent.ini \
     DEFAULT metadata_ip 192.168.1.127
  $  openstack-config --set /etc/neutron/l3_agent.ini \
     DEFAULT metadata_port 8700
  
Update nova.conf on Controller node to make sure it listens on this port for
metadata:

  $ openstack-config --set /etc/nova/nova.conf DEFAULT \
    metadata_host 192.168.1.127
  $ openstack-config --set /etc/nova/nova.conf DEFAULT \
    metadata_listen 0.0.0.0
  $ openstack-config --set /etc/nova/nova.conf DEFAULT \
    metadata_listen_port 8700
  $ openstack-config --set /etc/nova/nova.conf DEFAULT \
    service_neutron_metadata_proxy True
  $ openstack-config --set /etc/nova/nova.conf DEFAULT \
    neutron_metadata_proxy_shared_secret fedora
  
Start and Enable the services
  
  $ systemctl enable neutron-metadata-agent
  $ systemctl start neutron-metadata-agent
  
Restart Nova API and Neutron L3 agent

  $ systemctl restart openstack-nova-api
  $ systemctl restart neutron-l3-agent

All neutron services should be OK, otherwise double check config files.

Nova on Compute node
~~~~~~~~~~~~~~~~~~~~

Enable libvirtd

  $ for i in start enable status; \
    do systemctl $i libvirtd; done

Get nova.conf from Controller node

  $ scp nova.conf [email protected]:

Ensure to have correct permissions

  $ chown root:nova /etc/nova/nova.conf
  $ restorecon /etc/nova/nova.conf
  $ chcon -u system_u -r object_r -t etc_t /etc/nova/nova.conf
  $ chmod 640 /etc/nova/nova.conf

Replace the my_ip in nova.conf with Compute node's IP

  $ sed -i 's/my_ip=.*/my_ip=192.168.1.137/g' /etc/nova/nova.conf

Enable/Start Nova Compute service

  $ for i in start enable status; \
    do systemctl $i openstack-nova-compute; done

Finally  on Controller :-

# ovs-vsctl show
2790327e-fde5-4f35-9c99-b1180353b29e
    Bridge br-int
        Port br-int
            Interface br-int
                type: internal
        Port "qr-f38eb3d5-20"
            tag: 1
            Interface "qr-f38eb3d5-20"
                type: internal
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port "tap5d1add26-f3"
            tag: 1
            Interface "tap5d1add26-f3"
                type: internal
    Bridge br-ex
        Port "p37p1"
            Interface "p37p1"
        Port br-ex
            Interface br-ex
                type: internal
        Port "qg-0dea8587-32"
            Interface "qg-0dea8587-32"
                type: internal
    Bridge br-tun
        Port "gre-2"
            Interface "gre-2"
                type: gre
                options: {in_key=flow, local_ip="192.168.1.130", out_key=flow, remote_ip="192.168.1.140"}
        Port br-tun
            Interface br-tun
                type: internal
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
    ovs_version: "2.0.0"

On Compute 

# ovs-vsctl show
b2e33386-ca7e-46e2-b97e-6bbf511727ac
    Bridge br-int
        Port br-int
            Interface br-int
                type: internal
        Port "qvo30c356f8-c0"
            tag: 1
            Interface "qvo30c356f8-c0"
        Port "qvoa5c6c346-78"
            tag: 1
            Interface "qvoa5c6c346-78"
        Port "qvo56bfcccb-86"
            tag: 1
            Interface "qvo56bfcccb-86"
        Port "qvo051565c4-dd"
            tag: 1
            Interface "qvo051565c4-dd"
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
    Bridge br-tun
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
        Port "gre-1"
            Interface "gre-1"
                type: gre
                options: {in_key=flow, local_ip="192.168.1.140", out_key=flow, remote_ip="192.168.1.130"}
        Port br-tun
            Interface br-tun
                type: internal
    ovs_version: "2.0.0"


Create Neutron networks on Controller node 
------------------------------------------

Find the tenant_id of services tenant

  $ keystone tenant-list | grep services | awk '{print $2;}'
  a04e8158c9974f2699185994791e78c1

Create an external network

  $ neutron net-create --tenant-id a04e8158c9974f2699185994791e78c1 \
    ext --router:external=True 
  Created a new network:
  +---------------------------+--------------------------------------+
  | Field                     | Value                                |
  +---------------------------+--------------------------------------+
  | admin_state_up            | True                                 |
  | id                        | 12e4de23-34f8-4f9f-ba2b-810c36f3cc40 |
  | name                      | ext                                  |
  | provider:network_type     | gre                                  |
  | provider:physical_network |                                      |
  | provider:segmentation_id  | 1                                    |
  | router:external           | True                                 |
  | shared                    | False                                |
  | status                    | ACTIVE                               |
  | subnets                   |                                      |
  | tenant_id                 | a04e8158c9974f2699185994791e78c1     |
  +---------------------------+--------------------------------------+

Create subnet for the 'ext' network

  $ neutron subnet-create --tenant-id a04e8158c9974f2699185994791e78c1 \
  ext 192.168.1.0/24 --enable_dhcp=False --allocation-pool \
  start=192.168.1.100,end=192.168.1.200 --gateway-ip \
  192.168.1.1
  Created a new subnet:
  +------------------+-------------------------------------------------------+
  | Field            | Value                                                 |
  +------------------+-------------------------------------------------------+
  | allocation_pools | {"start": "192.168.1.100", "end": "192.168.1.200"} |
  | cidr             | 192.168.1.0/24                                      |
  | dns_nameservers  |                                                       |
  | enable_dhcp      | False                                                 |
  | gateway_ip       | 192.168.122.1                                         |
  | host_routes      |                                                       |
  | id               | 2b84cf48-0db5-4e9f-b8f7-cef2a204f497                  |
  | ip_version       | 4                                                     |
  | name             |                                                       |
  | network_id       | 12e4de23-34f8-4f9f-ba2b-810c36f3cc40                  |
  | tenant_id        | a04e8158c9974f2699185994791e78c1                      |
  +------------------+-------------------------------------------------------+

  $ neutron net-list
  +--------------------------------------+------+-------------------------------------------------------+
  | id                                   | name | subnets                                               |
  +--------------------------------------+------+-------------------------------------------------------+
  | 12e4de23-34f8-4f9f-ba2b-810c36f3cc40 | ext  | 2b84cf48-0db5-4e9f-b8f7-cef2a204f497 192.168.122.0/24 |
  +--------------------------------------+------+-------------------------------------------------------+
  
  $ neutron subnet-list
  +--------------------------------------+------+------------------+-------------------------------------------------------+
  | id                                   | name | cidr             | allocation_pools                                      |
  +--------------------------------------+------+------------------+-------------------------------------------------------+
  | 2b84cf48-0db5-4e9f-b8f7-cef2a204f497 |      | 192.168.1.0/24 | {"start": "192.168.1.100", "end": "192.168.1.200"} |
  +--------------------------------------+------+------------------+-------------------------------------------------------+
  
  $ neutron net-show ext
  +---------------------------+--------------------------------------+
  | Field                     | Value                                |
  +---------------------------+--------------------------------------+
  | admin_state_up            | True                                 |
  | id                        | 12e4de23-34f8-4f9f-ba2b-810c36f3cc40 |
  | name                      | ext                                  |
  | provider:network_type     | gre                                  |
  | provider:physical_network |                                      |
  | provider:segmentation_id  | 1                                    |
  | router:external           | True                                 |
  | shared                    | False                                |
  | status                    | ACTIVE                               |
  | subnets                   | 2b84cf48-0db5-4e9f-b8f7-cef2a204f497 |
  | tenant_id                 | a04e8158c9974f2699185994791e78c1     |
  +---------------------------+--------------------------------------+


Next, let's create an internal network under a tenant network
(ostenant). Source the keystone user's credentials:

  $ . keystonerc_kashyap

Create a router attached to the external network. This router routes
traffic to the internal subnets

  $ neutron router-create router1
  Created a new router:
  +-----------------------+--------------------------------------+
  | Field                 | Value                                |
  +-----------------------+--------------------------------------+
  | admin_state_up        | True                                 |
  | external_gateway_info |                                      |
  | id                    | d72adddf-4c02-4916-ae6d-16bfdaf59d99 |
  | name                  | router1                              |
  | status                | ACTIVE                               |
  | tenant_id             | 2c845a6ad20e45ccb0b045cee27a9661     |
  +-----------------------+--------------------------------------+

Connect the router to ext by setting the gateway for the router as ext

  $ neutron router-gateway-set router1 ext
  Set gateway for router router1

Create an internal network and its associated subnet

  $ neutron net-create int
  Created a new network:
  +----------------+--------------------------------------+
  | Field          | Value                                |
  +----------------+--------------------------------------+
  | admin_state_up | True                                 |
  | id             | 1a4157a6-5cf2-46e3-bdea-1533c8f54cdf |
  | name           | int                                  |
  | shared         | False                                |
  | status         | ACTIVE                               |
  | subnets        |                                      |
  | tenant_id      | 2c845a6ad20e45ccb0b045cee27a9661     |
  +----------------+--------------------------------------+

  $ neutron subnet-create int 30.0.0.0/24 --dns_nameservers list=true
  192.168.122.1
  Created a new subnet:
  +------------------+--------------------------------------------+
  | Field            | Value                                      |
  +------------------+--------------------------------------------+
  | allocation_pools | {"start": "30.0.0.2", "end": "30.0.0.254"} |
  | cidr             | 30.0.0.0/24                                |
  | dns_nameservers  | 192.168.122.1                              |
  | enable_dhcp      | True                                       |
  | gateway_ip       | 30.0.0.1                                   |
  | host_routes      |                                            |
  | id               | 04b95f13-cc77-4f9a-8206-7f6ed183461d       |
  | ip_version       | 4                                          |
  | name             |                                            |
  | network_id       | 1a4157a6-5cf2-46e3-bdea-1533c8f54cdf       |
  | tenant_id        | 2c845a6ad20e45ccb0b045cee27a9661           |
  +------------------+--------------------------------------------+

Connect the above subnet to the router by setting it as a port

  $ neutron router-interface-add router1 \
  04b95f13-cc77-4f9a-8206-7f6ed183461d
  Added interface 63ea2815-b524-4a12-931d-3e7db60ea170 to router router1.


List subnets (as keystonerc_kashyap)

  $ neutron subnet-list
  +--------------------------------------+------+-------------+--------------------------------------------+
  | id                                   | name | cidr        | allocation_pools                           |
  +--------------------------------------+------+-------------+--------------------------------------------+
  | 04b95f13-cc77-4f9a-8206-7f6ed183461d |      | 30.0.0.0/24 | {"start": "30.0.0.2", "end": "30.0.0.254"} |
  +--------------------------------------+------+-------------+--------------------------------------------+


Boot an instance and attach Floating IP
---------------------------------------

  $ nova keypair-add oskey1 > oskey1.priv
  $ chmod 600 oskey1.priv

  $ glance image-list
  +--------------------------------------+--------+-------------+------------------+---------+--------+
  | ID                                   | Name   | Disk Format | Container Format | Size    | Status |
  +--------------------------------------+--------+-------------+------------------+---------+--------+
  | fa7a83d1-3ddb-4c0e-9c07-839b6b00f8ca | cirros | qcow2       | bare             | 9761280 | active |
  +--------------------------------------+--------+-------------+------------------+---------+--------+

  $ nova boot --flavor 2 --key_name oskey1 --image \
    fa7a83d1-3ddb-4c0e-9c07-839b6b00f8ca cirr-guest1

  $ nova list
  +--------------------------------------+-------------+--------+------------+-------------+--------------+
  | ID                                   | Name        | Status | Task State | Power State | Networks     |
  +--------------------------------------+-------------+--------+------------+-------------+--------------+
  | acfbd460-d8d7-4eba-962b-de6f2f50db12 | cirr-guest2 | ACTIVE | None       | Running     | int=30.0.0.2 |
  +--------------------------------------+-------------+--------+------------+-------------+--------------+


Create and allocate Floating IP addresses
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Source the user's credentials (tenant will be 'ostenant')

  $ . keystonerc_kashyap

List the ports 

 $ neutron port-list --device-id acfbd460-d8d7-4eba-962b-de6f2f50db12
 +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
 | id                                   | name | mac_address       | fixed_ips                                                                       |
 +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
 | 4ecfe633-dbab-4660-a51c-78ea6dbcc149 |      | fa:16:3e:88:05:e0 | {"subnet_id": "04b95f13-cc77-4f9a-8206-7f6ed183461d", "ip_address": "30.0.0.2"} |
 +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+

Create a floating IP

  $ neutron floatingip-create ext
  Created a new floatingip:
  +---------------------+--------------------------------------+
  | Field               | Value                                |
  +---------------------+--------------------------------------+
  | fixed_ip_address    |                                      |
  | floating_ip_address | 192.168.122.11                       |
  | floating_network_id | 12e4de23-34f8-4f9f-ba2b-810c36f3cc40 |
  | id                  | 5976f269-bca2-4c44-8860-7d493909568f |
  | port_id             |                                      |
  | router_id           |                                      |
  | tenant_id           | 2c845a6ad20e45ccb0b045cee27a9661     |
  +---------------------+--------------------------------------+

  It can be noticed (by running 'keystone tenant-list') , the above
  tenant_id corresponds to 'ostenant')

List the floating IP

  $ neutron floatingip-list
  +--------------------------------------+------------------+---------------------+---------+
  | id                                   | fixed_ip_address | floating_ip_address | port_id |
  +--------------------------------------+------------------+---------------------+---------+
  | 5976f269-bca2-4c44-8860-7d493909568f |                  | 192.168.122.11      |         |
  +--------------------------------------+------------------+---------------------+---------+

Associate floating IP with the port of the VM

  $ neutron floatingip-associate 5976f269-bca2-4c44-8860-7d493909568f 4ecfe633-dbab-4660-a51c-78ea6dbcc149
  Associated floatingip 5976f269-bca2-4c44-8860-7d493909568f

Show the floating IP

  $ neutron floatingip-show 5976f269-bca2-4c44-8860-7d493909568f
  +---------------------+--------------------------------------+
  | Field               | Value                                |
  +---------------------+--------------------------------------+
  | fixed_ip_address    | 30.0.0.2                             |
  | floating_ip_address | 192.168.122.11                       |
  | floating_network_id | 12e4de23-34f8-4f9f-ba2b-810c36f3cc40 |
  | id                  | 5976f269-bca2-4c44-8860-7d493909568f |
  | port_id             | 4ecfe633-dbab-4660-a51c-78ea6dbcc149 |
  | router_id           | d72adddf-4c02-4916-ae6d-16bfdaf59d99 |
  | tenant_id           | 2c845a6ad20e45ccb0b045cee27a9661     |
  +---------------------+--------------------------------------+

List the running nova guest

  $ nova list
  +--------------------------------------+-------------+--------+------------+-------------+------------------------------+
  | ID                                   | Name        | Status | Task State | Power State | Networks                     |
  +--------------------------------------+-------------+--------+------------+-------------+------------------------------+
  | acfbd460-d8d7-4eba-962b-de6f2f50db12 | cirr-guest2 | ACTIVE | None       | Running     | int=30.0.0.2, 192.168.122.11 |
  +--------------------------------------+-------------+--------+------------+-------------+------------------------------+


Add the security rules
----------------------

$ neutron security-group-rule-create --protocol icmp \
  --direction ingress --remote-ip-prefix 0.0.0.0/0 default

$ neutron security-group-rule-create --protocol tcp \
  --port-range-min 22 --port-range-max 22 \
  --direction ingress --remote-ip-prefix 0.0.0.0/0 default


Tunneling to get VNC access of the guest
----------------------------------------

After configuring VNC, ssh tunnel into the compute host (running on a
baremetal)

  $ ssh root@baremetal -L 5901:192.168.1.137:5901

Connect to the nova guest via VNC

  $ vncviewer localhost:5901


Networking Diagnostics
----------------------

List the namespaces:

  $ ip netns list
  qrouter-d72adddf-4c02-4916-ae6d-16bfdaf59d99
  qdhcp-1a4157a6-5cf2-46e3-bdea-1533c8f54cdf

List interfaces inside the router namespace:

  $ ip netns exec qrouter-d72adddf-4c02-4916-ae6d-16bfdaf59d99 ip a
  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default 
      link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
      inet 127.0.0.1/8 scope host lo
         valid_lft forever preferred_lft forever
      inet6 ::1/128 scope host 
         valid_lft forever preferred_lft forever
  3: qr-63ea2815-b5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
      link/ether fa:16:3e:c7:1b:a3 brd ff:ff:ff:ff:ff:ff
      inet 30.0.0.1/24 brd 30.0.0.255 scope global qr-63ea2815-b5
         valid_lft forever preferred_lft forever
      inet6 fe80::f816:3eff:fec7:1ba3/64 scope link 
         valid_lft forever preferred_lft forever
  4: qg-e7110dba-a9: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
      link/ether fa:16:3e:2b:bc:5d brd ff:ff:ff:ff:ff:ff
      inet 192.168.122.10/24 brd 192.168.122.255 scope global qg-e7110dba-a9
         valid_lft forever preferred_lft forever
      inet 192.168.122.11/32 brd 192.168.122.11 scope global qg-e7110dba-a9
         valid_lft forever preferred_lft forever
      inet6 fe80::f816:3eff:fe2b:bc5d/64 scope link 
         valid_lft forever preferred_lft forever

List interfaces inside the dhcp namespace:

  $ ip netns exec qdhcp-1a4157a6-5cf2-46e3-bdea-1533c8f54cdf ip a
  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default 
      link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
      inet 127.0.0.1/8 scope host lo
         valid_lft forever preferred_lft forever
      inet6 ::1/128 scope host 
         valid_lft forever preferred_lft forever
  2: ns-ea8b99ea-1e: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
      link/ether fa:16:3e:1f:03:4d brd ff:ff:ff:ff:ff:ff
      inet 30.0.0.3/24 brd 30.0.0.255 scope global ns-ea8b99ea-1e
         valid_lft forever preferred_lft forever
      inet6 fe80::f816:3eff:fe1f:34d/64 scope link 
         valid_lft forever preferred_lft forever

Check routing table inside the router namespace:

  $ ip netns exec qrouter-d72adddf-4c02-4916-ae6d-16bfdaf59d99 ip r
  default via 192.168.122.1 dev qg-e7110dba-a9 
  30.0.0.0/24 dev qr-63ea2815-b5  proto kernel  scope link  src 30.0.0.1 
  192.168.122.0/24 dev qg-e7110dba-a9  proto kernel  scope link  src 192.168.122.10 


  
Check on the Routing on Cloud controller's router namespace, it should show
port 80 for 169.254.169.254 routes to the host at port 8700
  
  $ ip netns exec qrouter-d72adddf-4c02-4916-ae6d-16bfdaf59d99 \
    iptables -L -t nat | grep 169
  REDIRECT   tcp  --  anywhere             169.254.169.254      tcp dpt:http redir ports 8700