Sunday, October 25, 2015

Quick VRRP verification on RDO Liberty (CentOS 7.1)

 Sample bellow demonstrates uninterrupted access, providing via HA Neutron router,  to cloud VM running on second Compute node, when Controller and (L3-router enabled) first Compute node are swapping MASTER and BACKUP roles (as members of keepalived pair).
Convert DVR configuration been built in RDO Liberty DVR Neutron workflow on CentOS 7.1  in the same way as it was done in ([ 1 ]) .

Setup configuration
- Controller node: Nova, Keystone, Cinder, Glance, 

   Neutron (using Open vSwitch plugin && VXLAN )
- (2x) Compute node: Nova (nova-compute),
         Neutron (openvswitch-agent,l3-agent,metadata-agent )
*****************************************************
On Controller and first Compute Node
*****************************************************
# yum install keepalived
*************************************************************************
Stop and disable neutron-l3-agent on Second Compute Node
Update /etc/neutron/neutron.conf as follows
**************************************************************************
[DEFAULT]
 router_distributed = False
 l3_ha = True
 max_l3_agents_per_router = 2

*****************************************************************
Switch agent_mode to legacy on all nodes
Update /etc/neutron/plugins/ml2/openvswitch_agent.ini
*****************************************************************
[agent]
enable_distributed_routing = False

All nodes restart

************************************************************
Create HA router belongs tenant demo
*************************************************************
[root@ip-192-169-142-127 ~(keystone_admin)]# python
Python 2.7.5 (default, Jun 24 2015, 00:41:19)
[GCC 4.8.3 20140911 (Red Hat 4.8.3-9)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> from keystoneclient.v2_0 import client
>>> token = '3ad2de159f9649afb0c342ba57e637d9'
>>> endpoint = 'http://192.169.142.127:35357/v2.0'
>>> keystone = client.Client(token=token, endpoint=endpoint)
>>> keystone.tenants.list()
[<Tenant {u'enabled': True, u'description': u'default tenant', u'name': u'demo', u'id': u'0d166b0ff5fb40a2bf6453e81b27962e'}>, <Tenant {u'enabled': True, u'description': u'admin tenant', u'name': u'admin', u'id': u'21e6a247384f4208a70983d852562cc7'}>, <Tenant {u'enabled': True, u'description': u'Tenant for the openstack services', u'name': u'services', u'id': u'ea97cf808f664f7f8d8810ab164de9ec'}>]
>>>

# neutron router-create --ha True --tenant_id  0d166b0ff5fb40a2bf6453e81b27962 RouterHA

[root@ip-192-169-142-127 ~(keystone_admin)]# neutron router-list
+-----------------------------------------------------------------------------------------------------------------------
| id | distributed | ha   |
+-----------------------------------------------------------------------------------------------------------------------3d4a0d41-5838-49bd-b691-ecc9946d6e19 | RouterHA | {"network_id": "1b202547-e1de-4c35-86a9-3119d6844f88", "enable_snat": true, "external_fixed_ips": [{"subnet_id": "e6473e85-5a4c-4eea-a42b-3a63def678c5", "ip_address": "192.169.142.159"}]} | False       | True |


[root@ip-192-169-142-127 ~(keystone_admin)]# neutron router-show RouterHA
+-----------------------------------------------------------------------------------------------------------------------
| Field | Value|
+-----------------------------------------------------------------------------------------------------------------------
| admin_state_up| True  |
| distributed       | False  |
| external_gateway_info | {"network_id": "1b202547-e1de-4c35-86a9-3119d6844f88", "enable_snat": true, "external_fixed_ips": [{"subnet_id": "e6473e85-5a4c-4eea-a42b-3a63def678c5", "ip_address": "192.169.142.159"}]} |
| ha | True |
| id | 3d4a0d41-5838-49bd-b691-ecc9946d6e19 |
| name | RouterHA |
| routes|                 |
| status | ACTIVE |
| tenant_id | 0d166b0ff5fb40a2bf6453e81b27962e |
+-----------------------------------------------------------------------------------------------------------------------

Attach public && private networg to RouterHA


# neutron net-list

 +--------------------------------------+----------------------------------------------------+-------------------------------------------------------+
| id                                   | name                                               | subnets                                               |
+--------------------------------------+----------------------------------------------------+-------------------------------------------------------+
| 1b202547-e1de-4c35-86a9-3119d6844f88 | public               |
 e6473e85-5a4c-4eea-a42b-3a63def678c5 192.169.142.0/24 |
| 596eb520-da47-41a7-bfc1-8ace58d7ee98 | HA network tenant 0d166b0ff5fb40a2bf6453e81b27962e | c7d12fde-47f4-4744-bc88-78a4a7e91755 169.254.192.0/18 |
| 267c9192-29e2-41e2-8db4-826a6155dec9 | demo_network                                       | 89704ab3-5535-4c87-800e-39255a0a11d9 50.0.0.0/24      |
+--------------------------------------+----------------------------------------------------+-------------------------------------------------------+

# neutron router-port-list RouterHA

[root@ip-192-169-142-127 ~(keystone_admin)]# neutron router-port-list RouterHA
+--------------------------------------+-------------------------------------------------+-------------------+----------------------------------------------------------------------------------------+
| id                                   | name                                            | mac_address       | fixed_ips                                                                              |
+--------------------------------------+-------------------------------------------------+-------------------+----------------------------------------------------------------------------------------+
| 0a823561-8ce6-4c7d-8943-525e74f61210 |                                                 | fa:16:3e:14:ca:12 | {"subnet_id": "e6473e85-5a4c-4eea-a42b-3a63def678c5", "ip_address": "192.169.142.159"} |
| 1981fd35-3025-45ff-a6e5-ab5bc7d8af3e | HA port tenant 0d166b0ff5fb40a2bf6453e81b27962e | fa:16:3e:b8:d6:14 | {"subnet_id": "c7d12fde-47f4-4744-bc88-78a4a7e91755", "ip_address": "169.254.192.2"}   |
| 4b4ac14c-a3a9-4fc0-9c3a-36d0ae1f4b11 | HA port tenant 0d166b0ff5fb40a2bf6453e81b27962e | fa:16:3e:c5:b2:4b | {"subnet_id": "c7d12fde-47f4-4744-bc88-78a4a7e91755", "ip_address": "169.254.192.1"}   |

| 6d989cb9-dfc8-4e08-8629-3c1186268511 |                                                 | fa:16:3e:cf:e2:a0 | {"subnet_id": "89704ab3-5535-4c87-800e-39255a0a11d9", "ip_address": "50.0.0.1"}        |
+--------------------------------------+-------------------------------------------------+-------------------+----------------------------------------------------------------------------------------+

***************************************************************************
Start up configuration. Compute Node 1 is in MASTER STATE
***************************************************************************
Pinging running VM (FIP is 192.169.142.153 ) 

[boris@fedora21wks01 Downloads]$ ping 192.169.142.153
PING 192.169.142.153 (192.169.142.153) 56(84) bytes of data.
64 bytes from 192.169.142.153: icmp_seq=2 ttl=63 time=0.608 ms
64 bytes from 192.169.142.153: icmp_seq=3 ttl=63 time=0.402 ms
64 bytes from 192.169.142.153: icmp_seq=4 ttl=63 time=0.452 ms



  *************************************************************************
  Compute Node 1 shutdown . Controller went to MASTER STATE
  *************************************************************************
  Pinging running VM (FIP is 192.169.142.153 ) 

  [boris@fedora21wks01 Downloads]$ ping 192.169.142.153
  PING 192.169.142.153 (192.169.142.153) 56(84) bytes of data.
  64 bytes from 192.169.142.153: icmp_seq=10 ttl=63 time=0.568 ms
  64 bytes from 192.169.142.153: icmp_seq=12 ttl=63 time=0.724 ms
  64 bytes from 192.169.142.153: icmp_seq=13 ttl=63 time=0.448 ms


   [root@ip-192-169-142-127 ~(keystone_admin)]# ip netns exec qrouter-  3d4a0d41-5838-49bd-b691-ecc9946d6e19 ip a |grep "inet "
    inet 127.0.0.1/8 scope host lo
    inet 169.254.192.2/18 brd 169.254.255.255 scope global ha-1981fd35-30
    inet 169.254.0.1/24 scope global ha-1981fd35-30
    inet 50.0.0.1/24 scope global qr-6d989cb9-df
    inet 192.169.142.153/32 scope global qg-0a823561-8c
    inet 192.169.142.159/24 scope global qg-0a823561-8c

   *******************************************
   Compute Node 1 brought up again
   *******************************************
  
   *******************************************************************
   Controller (192.169.142.127)  has been rebooted
   *******************************************************************
  


    **************************************************************************
    Now Compute Node 1 goes to  MASTER  STATE again 
    **************************************************************************
   [root@ip-192-169-142-147 ~]# systemctl restart  neutron-l3-agent

  [boris@fedora21wks01 Downloads]$ ping 192.169.142.153
  PING 192.169.142.153 (192.169.142.153) 56(84) bytes of data.
  64 bytes from 192.169.142.153: icmp_seq=22 ttl=63 time=0.640 ms
  64 bytes from 192.169.142.153: icmp_seq=23 ttl=63 time=0.553 ms
  64 bytes from 192.169.142.153: icmp_seq=24 ttl=63 time=0.516 ms
 

On Controller :-

[root@ip-192-169-142-127 ~(keystone_admin)]# ip netns exec qrouter-3d4a0d41-5838-49bd-b691-ecc9946d6e19 ip a |grep "inet "
    inet 127.0.0.1/8 scope host lo
    inet 169.254.192.2/18 brd 169.254.255.255 scope global ha-1981fd35-30
[root@ip-192-169-142-127 ~(keystone_admin)]# ssh 192.169.142.147
Last login: Sun Oct 25 12:50:43 2015

On Compute :-

[root@ip-192-169-142-147 ~]# ip netns exec qrouter-3d4a0d41-5838-49bd-b691-ecc9946d6e19 ip a |grep "inet "
    inet 127.0.0.1/8 scope host lo
    inet 169.254.192.1/18 brd 169.254.255.255 scope global ha-4b4ac14c-a3
    inet 169.254.0.1/24 scope global ha-4b4ac14c-a3
    inet 50.0.0.1/24 scope global qr-6d989cb9-df
    inet 192.169.142.153/32 scope global qg-0a823561-8c
    inet 192.169.142.159/24 scope global qg-0a823561-8c

*********************************
Keepalived status :-
*********************************


   Generated keepalived.conf

  

No comments:

Post a Comment