I believe that similar procedure would work for RDO Newton when conversion
to routable "ctlplane" will be completed. To avoid hitting "OSDs fewer than Replicas" corresponding tripleo-heat-template get discovered and updated before running overcloud deployment. In meantime "ctlplane" is converted to
192.168.24.0/24 and br-ctlplane has IP 192.168.24.1 on both Mitaka and Newton.Changes bellow obviously were done only due to limited hardware ,
e.g. 4 Core CPU.
***************************************************************
Hack before runnng overcloud-deploy.sh
First find template with entry "osd_pool_default_size"
***************************************************************
[root@undercloud openstack-tripleo-heat-templates]# cat search.sh
for W in `find . -name "ceph*.yaml" -print`
do
echo $W
cat $W | grep "osd_pool_default_size"
done
***********************
Current directory
***********************
/usr/share/openstack-tripleo-heat-templates
*****************************************************************************************
Update /usr/share/openstack-tripleo-heat-templates/puppet/hieradata/ceph.yaml
*****************************************************************************************
ceph::profile::params::osd_pool_default_size: 2
******************
Create file
******************
[stack@undercloud ~]$ cat $HOME/network_env.yaml
{
"parameter_defaults": {
"ControlPlaneDefaultRoute": "192.168.24.1",
"ControlPlaneSubnetCidr": "24",
"DnsServers": [
"192.168.23.27"
],
"EC2MetadataIp": "192.168.24.1",
"ExternalAllocationPools": [
{
"end": "10.0.0.250",
"start": "10.0.0.4"
}
],
"ExternalNetCidr": "10.0.0.1/24",
"NeutronExternalNetworkBridge": ""
}
}
************************
Run deployment
************************
**************************
Deployment completed
**************************
[stack@undercloud ~]$ nova list
[root@overcloud-controller-1 ~]# ceph status
cluster a24eec1a-a1bf-11e6-bdb7-00d38065a6b2
health HEALTH_WARN
clock skew detected on mon.overcloud-controller-1, mon.overcloud-controller-0
Monitor clock skew detected
monmap e1: 3 mons at {overcloud-controller-0=172.16.1.9:6789/0,overcloud-controller-1=172.16.1.8:6789/0,overcloud-controller-2=172.16.1.7:6789/0}
election epoch 6, quorum 0,1,2 overcloud-controller-2,overcloud-controller-1,overcloud-controller-0
osdmap e13: 2 osds: 2 up, 2 in
pgmap v20: 192 pgs, 5 pools, 0 bytes data, 0 objects
17242 MB used, 85133 MB / 102375 MB avail
192 active+clean
[root@overcloud-controller-1 ~]# ceph health detail --format=json-pretty
{
"health": {
"health_services": [
{
"mons": [
{
"name": "overcloud-controller-2",
"kb_total": 52416312,
"kb_used": 8376616,
"kb_avail": 44039696,
"avail_percent": 84,
"last_updated": "2016-11-03 13:16:01.998578",
"store_stats": {
"bytes_total": 4195635,
"bytes_sst": 949,
"bytes_log": 4128768,
"bytes_misc": 65918,
"last_updated": "0.000000"
},
"health": "HEALTH_OK"
},
{
"name": "overcloud-controller-1",
"kb_total": 52416312,
"kb_used": 8377052,
"kb_avail": 44039260,
"avail_percent": 84,
"last_updated": "2016-11-03 13:15:57.391923",
"store_stats": {
"bytes_total": 6292787,
"bytes_sst": 949,
"bytes_log": 6225920,
"bytes_misc": 65918,
"last_updated": "0.000000"
},
"health": "HEALTH_OK"
},
{
"name": "overcloud-controller-0",
"kb_total": 52416312,
"kb_used": 8376728,
"kb_avail": 44039584,
"avail_percent": 84,
"last_updated": "2016-11-03 13:16:00.579998",
"store_stats": {
"bytes_total": 6292787,
"bytes_sst": 949,
"bytes_log": 6225920,
"bytes_misc": 65918,
"last_updated": "0.000000"
},
"health": "HEALTH_OK"
}
]
}
]
},
"timechecks": {
"epoch": 6,
"round": 14,
"round_status": "finished",
"mons": [
{
"name": "overcloud-controller-2",
"skew": 0.000000,
"latency": 0.000000,
"health": "HEALTH_OK"
},
{
"name": "overcloud-controller-1",
"skew": 0.314601,
"latency": 0.004384,
"health": "HEALTH_WARN",
"details": "clock skew 0.314601s > max 0.05s"
},
{
"name": "overcloud-controller-0",
"skew": 0.974456,
"latency": 0.004401,
"health": "HEALTH_WARN",
"details": "clock skew 0.974456s > max 0.05s"
}
]
},
"summary": [
{
"severity": "HEALTH_WARN",
"summary": "Monitor clock skew detected "
}
],
"overall_status": "HEALTH_WARN",
"detail": [
"mon.overcloud-controller-1 addr 172.16.1.8:6789\/0 clock skew 0.314601s > max 0.05s (latency 0.00438388s)",
"mon.overcloud-controller-0 addr 172.16.1.9:6789\/0 clock skew 0.974456s > max 0.05s (latency 0.00440124s)"
]
}
[root@overcloud-controller-0 ~]# pcs status
Cluster name: tripleo_cluster
Last updated: Thu Nov 3 15:46:26 2016 Last change: Thu Nov 3 12:58:12 2016 by root via cibadmin on overcloud-controller-1
Stack: corosync
Current DC: overcloud-controller-2 (version 1.1.13-10.el7_2.4-44eb2dd) - partition with quorum
3 nodes and 127 resources configured
Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Full list of resources:
ip-172.16.2.5 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0
ip-172.16.3.4 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1
Clone Set: haproxy-clone [haproxy]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Master/Slave Set: galera-master [galera]
Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: memcached-clone [memcached]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
ip-10.0.0.4 (ocf::heartbeat:IPaddr2): Started overcloud-controller-2
ip-172.16.2.4 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0
ip-172.16.1.4 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-core-clone [openstack-core]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Master/Slave Set: redis-master [redis]
Masters: [ overcloud-controller-0 ]
Slaves: [ overcloud-controller-1 overcloud-controller-2 ]
Clone Set: mongod-clone [mongod]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
ip-192.168.24.13 (ocf::heartbeat:IPaddr2): Started overcloud-controller-2
Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-l3-agent-clone [neutron-l3-agent]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started overcloud-controller-0
Clone Set: openstack-heat-engine-clone [openstack-heat-engine]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-gnocchi-metricd-clone [openstack-gnocchi-metricd]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-heat-api-clone [openstack-heat-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-glance-api-clone [openstack-glance-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-nova-api-clone [openstack-nova-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-sahara-api-clone [openstack-sahara-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-glance-registry-clone [openstack-glance-registry]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-gnocchi-statsd-clone [openstack-gnocchi-statsd]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-cinder-api-clone [openstack-cinder-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: delay-clone [delay]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-server-clone [neutron-server]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: httpd-clone [httpd]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
PCSD Status:
overcloud-controller-0: Online
overcloud-controller-1: Online
overcloud-controller-2: Online
Daemon Status:
corosync: active/enabled
pacemaker: active/enabled
pcsd: active/enabled
******************************************************************************************
DNS resolution appears to be lost after conversion to rout-able "ctlplane" on HA Controllers,thus HEALTH_WARNING bellow shows up, corresponding qcow2 images scp'ed from undercloud to controller_0
******************************************************************************************
[root@overcloud-controller-0 ~]# ceph status
cluster a24eec1a-a1bf-11e6-bdb7-00d38065a6b2
health HEALTH_WARN
clock skew detected on mon.overcloud-controller-1, mon.overcloud-controller-0
Monitor clock skew detected
monmap e1: 3 mons at {overcloud-controller-0=172.16.1.9:6789/0,overcloud-controller-1=172.16.1.8:6789/0,overcloud-controller-2=172.16.1.7:6789/0}
election epoch 6, quorum 0,1,2 overcloud-controller-2,overcloud-controller-1,overcloud-controller-0
osdmap e15: 2 osds: 2 up, 2 in
pgmap v994: 192 pgs, 5 pools, 7106 MB data, 1671 objects
31432 MB used, 70943 MB / 102375 MB avail
192 active+clean
client io 2255 kB/s rd, 3917 kB/s wr, 142 op/s
[root@overcloud-controller-0 ~]# ceph osd df tree
[root@overcloud-controller-0 ~]# ceph osd df plain
ID WEIGHT REWEIGHT SIZE USE AVAIL %USE VAR
0 0.04999 1.00000 51187M 15723M 35464M 30.72 1.00
1 0.04999 1.00000 51187M 15721M 35466M 30.71 1.00
TOTAL 102375M 31444M 70931M 30.71
MIN/MAX VAR: 1.00/1.00 STDDEV: 0.00
[root@overcloud-controller-0 ~]# glance image-list
[root@overcloud-controller-0 ~]# rbd -p images ls
0b6ef368-e91e-4f6f-9e48-4e3ff737faf4
6c4ff4bc-0b9e-45ee-976c-569a2ffc5421
[root@overcloud-controller-0 ~]# cinder list
[root@overcloud-controller-0 ~]# rbd -p volumes ls
volume-87367f39-616c-492d-808a-06627fe2dc36
volume-cec9b469-6085-428d-95fc-36fec076580a
to routable "ctlplane" will be completed. To avoid hitting "OSDs fewer than Replicas" corresponding tripleo-heat-template get discovered and updated before running overcloud deployment. In meantime "ctlplane" is converted to
192.168.24.0/24 and br-ctlplane has IP 192.168.24.1 on both Mitaka and Newton.Changes bellow obviously were done only due to limited hardware ,
e.g. 4 Core CPU.
***************************************************************
Hack before runnng overcloud-deploy.sh
First find template with entry "osd_pool_default_size"
***************************************************************
[root@undercloud openstack-tripleo-heat-templates]# cat search.sh
for W in `find . -name "ceph*.yaml" -print`
do
echo $W
cat $W | grep "osd_pool_default_size"
done
***********************
Current directory
***********************
/usr/share/openstack-tripleo-heat-templates
[root@undercloud openstack-tripleo-heat-templates]# ./search.sh
./network/config/bond-with-vlans/ceph-storage.yaml
./network/config/multiple-nics/ceph-storage.yaml
./network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
./network/config/single-nic-vlans/ceph-storage.yaml
./puppet/ceph-cluster-config.yaml
./puppet/ceph-storage-post.yaml
./puppet/ceph-storage.yaml
./puppet/extraconfig/ceph/ceph-external-config.yaml
./puppet/hieradata/ceph.yaml
ceph::profile::params::osd_pool_default_size: 3
*****************************************************************************************
Update /usr/share/openstack-tripleo-heat-templates/puppet/hieradata/ceph.yaml
*****************************************************************************************
ceph::profile::params::osd_pool_default_size: 2
******************
Create file
******************
[stack@undercloud ~]$ cat $HOME/network_env.yaml
{
"parameter_defaults": {
"ControlPlaneDefaultRoute": "192.168.24.1",
"ControlPlaneSubnetCidr": "24",
"DnsServers": [
"192.168.23.27"
],
"EC2MetadataIp": "192.168.24.1",
"ExternalAllocationPools": [
{
"end": "10.0.0.250",
"start": "10.0.0.4"
}
],
"ExternalNetCidr": "10.0.0.1/24",
"NeutronExternalNetworkBridge": ""
}
}
************************
Run deployment
************************
#!/bin/bash -x source /home/stack/stackrc openstack overcloud deploy \ --control-scale 3 --compute-scale 1 --ceph-storage-scale 2 \ --libvirt-type qemu \ --ntp-server pool.ntp.org \ --templates /usr/share/openstack-tripleo-heat-templates \ -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml \ -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml \ -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \ -e /usr/share/openstack-tripleo-heat-templates/environments/net-single-nic-with-vlans.yaml \ -e $HOME/network_env.yaml
**************************
Deployment completed
**************************
[stack@undercloud ~]$ nova list
+--------------------------------------+-------------------------+--------+------------+-------------+------------------------+ | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+-------------------------+--------+------------+-------------+------------------------+ | 028d32a4-6b23-4ed6-9278-cab365378a0e | overcloud-cephstorage-0 | ACTIVE | - | Running | ctlplane=192.168.24.14 | | 4be9eb02-ce0c-475b-aa44-9560781c2c91 | overcloud-cephstorage-1 | ACTIVE | - | Running | ctlplane=192.168.24.15 | | 7b7dcff4-4bb1-4769-85b5-7e4a69a0cd6b | overcloud-controller-0 | ACTIVE | - | Running | ctlplane=192.168.24.18 | | 0a837944-c10e-44c2-a382-04bfcf16b48d | overcloud-controller-1 | ACTIVE | - | Running | ctlplane=192.168.24.17 | | 9a17daf1-ec7f-4220-ba9a-40121a21c997 | overcloud-controller-2 | ACTIVE | - | Running | ctlplane=192.168.24.19 | | 17ebe9c9-2241-428c-88c1-56beba838662 | overcloud-novacompute-0 | ACTIVE | - | Running | ctlplane=192.168.24.16 | +--------------------------------------+-------------------------+--------+------------+-------------+------------------------+
[root@overcloud-controller-1 ~]# ceph status
cluster a24eec1a-a1bf-11e6-bdb7-00d38065a6b2
health HEALTH_WARN
clock skew detected on mon.overcloud-controller-1, mon.overcloud-controller-0
Monitor clock skew detected
monmap e1: 3 mons at {overcloud-controller-0=172.16.1.9:6789/0,overcloud-controller-1=172.16.1.8:6789/0,overcloud-controller-2=172.16.1.7:6789/0}
election epoch 6, quorum 0,1,2 overcloud-controller-2,overcloud-controller-1,overcloud-controller-0
osdmap e13: 2 osds: 2 up, 2 in
pgmap v20: 192 pgs, 5 pools, 0 bytes data, 0 objects
17242 MB used, 85133 MB / 102375 MB avail
192 active+clean
[root@overcloud-controller-1 ~]# ceph health detail --format=json-pretty
{
"health": {
"health_services": [
{
"mons": [
{
"name": "overcloud-controller-2",
"kb_total": 52416312,
"kb_used": 8376616,
"kb_avail": 44039696,
"avail_percent": 84,
"last_updated": "2016-11-03 13:16:01.998578",
"store_stats": {
"bytes_total": 4195635,
"bytes_sst": 949,
"bytes_log": 4128768,
"bytes_misc": 65918,
"last_updated": "0.000000"
},
"health": "HEALTH_OK"
},
{
"name": "overcloud-controller-1",
"kb_total": 52416312,
"kb_used": 8377052,
"kb_avail": 44039260,
"avail_percent": 84,
"last_updated": "2016-11-03 13:15:57.391923",
"store_stats": {
"bytes_total": 6292787,
"bytes_sst": 949,
"bytes_log": 6225920,
"bytes_misc": 65918,
"last_updated": "0.000000"
},
"health": "HEALTH_OK"
},
{
"name": "overcloud-controller-0",
"kb_total": 52416312,
"kb_used": 8376728,
"kb_avail": 44039584,
"avail_percent": 84,
"last_updated": "2016-11-03 13:16:00.579998",
"store_stats": {
"bytes_total": 6292787,
"bytes_sst": 949,
"bytes_log": 6225920,
"bytes_misc": 65918,
"last_updated": "0.000000"
},
"health": "HEALTH_OK"
}
]
}
]
},
"timechecks": {
"epoch": 6,
"round": 14,
"round_status": "finished",
"mons": [
{
"name": "overcloud-controller-2",
"skew": 0.000000,
"latency": 0.000000,
"health": "HEALTH_OK"
},
{
"name": "overcloud-controller-1",
"skew": 0.314601,
"latency": 0.004384,
"health": "HEALTH_WARN",
"details": "clock skew 0.314601s > max 0.05s"
},
{
"name": "overcloud-controller-0",
"skew": 0.974456,
"latency": 0.004401,
"health": "HEALTH_WARN",
"details": "clock skew 0.974456s > max 0.05s"
}
]
},
"summary": [
{
"severity": "HEALTH_WARN",
"summary": "Monitor clock skew detected "
}
],
"overall_status": "HEALTH_WARN",
"detail": [
"mon.overcloud-controller-1 addr 172.16.1.8:6789\/0 clock skew 0.314601s > max 0.05s (latency 0.00438388s)",
"mon.overcloud-controller-0 addr 172.16.1.9:6789\/0 clock skew 0.974456s > max 0.05s (latency 0.00440124s)"
]
}
[root@overcloud-controller-0 ~]# pcs status
Cluster name: tripleo_cluster
Last updated: Thu Nov 3 15:46:26 2016 Last change: Thu Nov 3 12:58:12 2016 by root via cibadmin on overcloud-controller-1
Stack: corosync
Current DC: overcloud-controller-2 (version 1.1.13-10.el7_2.4-44eb2dd) - partition with quorum
3 nodes and 127 resources configured
Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Full list of resources:
ip-172.16.2.5 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0
ip-172.16.3.4 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1
Clone Set: haproxy-clone [haproxy]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Master/Slave Set: galera-master [galera]
Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: memcached-clone [memcached]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
ip-10.0.0.4 (ocf::heartbeat:IPaddr2): Started overcloud-controller-2
ip-172.16.2.4 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0
ip-172.16.1.4 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1
Clone Set: rabbitmq-clone [rabbitmq]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-core-clone [openstack-core]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Master/Slave Set: redis-master [redis]
Masters: [ overcloud-controller-0 ]
Slaves: [ overcloud-controller-1 overcloud-controller-2 ]
Clone Set: mongod-clone [mongod]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
ip-192.168.24.13 (ocf::heartbeat:IPaddr2): Started overcloud-controller-2
Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-l3-agent-clone [neutron-l3-agent]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
openstack-cinder-volume (systemd:openstack-cinder-volume): Started overcloud-controller-0
Clone Set: openstack-heat-engine-clone [openstack-heat-engine]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-gnocchi-metricd-clone [openstack-gnocchi-metricd]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-heat-api-clone [openstack-heat-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-glance-api-clone [openstack-glance-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-nova-api-clone [openstack-nova-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-sahara-api-clone [openstack-sahara-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-glance-registry-clone [openstack-glance-registry]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-gnocchi-statsd-clone [openstack-gnocchi-statsd]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-cinder-api-clone [openstack-cinder-api]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: delay-clone [delay]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: neutron-server-clone [neutron-server]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: httpd-clone [httpd]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor]
Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
PCSD Status:
overcloud-controller-0: Online
overcloud-controller-1: Online
overcloud-controller-2: Online
Daemon Status:
corosync: active/enabled
pacemaker: active/enabled
pcsd: active/enabled
******************************************************************************************
DNS resolution appears to be lost after conversion to rout-able "ctlplane" on HA Controllers,thus HEALTH_WARNING bellow shows up, corresponding qcow2 images scp'ed from undercloud to controller_0
******************************************************************************************
[root@overcloud-controller-0 ~]# ceph status
cluster a24eec1a-a1bf-11e6-bdb7-00d38065a6b2
health HEALTH_WARN
clock skew detected on mon.overcloud-controller-1, mon.overcloud-controller-0
Monitor clock skew detected
monmap e1: 3 mons at {overcloud-controller-0=172.16.1.9:6789/0,overcloud-controller-1=172.16.1.8:6789/0,overcloud-controller-2=172.16.1.7:6789/0}
election epoch 6, quorum 0,1,2 overcloud-controller-2,overcloud-controller-1,overcloud-controller-0
osdmap e15: 2 osds: 2 up, 2 in
pgmap v994: 192 pgs, 5 pools, 7106 MB data, 1671 objects
31432 MB used, 70943 MB / 102375 MB avail
192 active+clean
client io 2255 kB/s rd, 3917 kB/s wr, 142 op/s
[root@overcloud-controller-0 ~]# ceph osd df tree
ID WEIGHT REWEIGHT SIZE USE AVAIL %USE VAR TYPE NAME -1 0.09998 - 102375M 31444M 70931M 30.71 1.00 root default -2 0.04999 - 51187M 15723M 35464M 30.72 1.00 host overcloud-cephstorage-1 0 0.04999 1.00000 51187M 15723M 35464M 30.72 1.00 osd.0 -3 0.04999 - 51187M 15721M 35466M 30.71 1.00 host overcloud-cephstorage-0 1 0.04999 1.00000 51187M 15721M 35466M 30.71 1.00 osd.1 TOTAL 102375M 31444M 70931M 30.71 MIN/MAX VAR: 1.00/1.00 STDDEV: 0.00
[root@overcloud-controller-0 ~]# ceph osd df plain
ID WEIGHT REWEIGHT SIZE USE AVAIL %USE VAR
0 0.04999 1.00000 51187M 15723M 35464M 30.72 1.00
1 0.04999 1.00000 51187M 15721M 35466M 30.71 1.00
TOTAL 102375M 31444M 70931M 30.71
MIN/MAX VAR: 1.00/1.00 STDDEV: 0.00
[root@overcloud-controller-0 ~]# glance image-list
+--------------------------------------+---------------+ | ID | Name | +--------------------------------------+---------------+ | 6c4ff4bc-0b9e-45ee-976c-569a2ffc5421 | CentOS72Cloud | | 0b6ef368-e91e-4f6f-9e48-4e3ff737faf4 | VF24Cloud | +--------------------------------------+---------------+
[root@overcloud-controller-0 ~]# rbd -p images ls
0b6ef368-e91e-4f6f-9e48-4e3ff737faf4
6c4ff4bc-0b9e-45ee-976c-569a2ffc5421
[root@overcloud-controller-0 ~]# cinder list
+--------------------------------------+--------+----------------+------+-------------+----------+--------------------------------------+ | ID | Status | Name | Size | Volume Type | Bootable | Attached to | +--------------------------------------+--------+----------------+------+-------------+----------+--------------------------------------+ | 87367f39-616c-492d-808a-06627fe2dc36 | in-use | vf24volume | 10 | - | true | 20105a2d-1c21-4162-b2cf-1701497690f0 | | cec9b469-6085-428d-95fc-36fec076580a | in-use | centos72volume | 12 | - | true | 9ff8b5a9-7485-4b59-9d30-d1259ad91f95 | +--------------------------------------+--------+----------------+------+-------------+----------+--------------------------------------+
[root@overcloud-controller-0 ~]# rbd -p volumes ls
volume-87367f39-616c-492d-808a-06627fe2dc36
volume-cec9b469-6085-428d-95fc-36fec076580a
No comments:
Post a Comment