cpe-deployments/config/bundle_focal.yaml
Arif Ali daeaf78436
Update to latest code
* Add new policies
* start migrating to CH
* Add asrock04 to the mix
2023-04-11 15:36:58 +01:00

1078 lines
32 KiB
YAML

# Copyright (c) 2017-2018 Canonical USA Inc. All rights reserved.
#
# Foundation HyperConverged
#
series: focal
variables:
# https://wiki.ubuntu.com/OpenStack/CloudArchive
# packages for an LTS release come in a form of SRUs
# do not use cloud:<pocket> for an LTS version as
# installation hooks will fail. Example:
openstack-origin: &openstack-origin distro
openstack-region: &openstack-region RegionOne
# !> Important <!
# configure that value for the API services as if they
# spawn too many workers you will get inconsistent failures
# due to CPU overcommit
worker-multiplier: &worker-multiplier 0.25
# Number of MySQL connections in the env. Default is not enough
# for environment of this size. So, bundle declares default of
# 2000. There's hardly a case for higher than this
mysql-connections: &mysql-connections 4000
# MySQL tuning level. Charm default is "safest", this however
# impacts performance. For spinning platters consider setting this
# to "fast"
mysql-tuning-level: &mysql-tuning-level safest
# Configure RAM allocation params for nova. For hyperconverged
# nodes, we need to have plenty reserves for service containers,
# Ceph OSDs, and swift-storage daemons. Those processes will not
# only directly allocate RAM but also indirectly via pagecache, file
# system caches, system buffers usage. Adjust for higher density
# clouds, e.g. high OSD/host ratio or when running >2 service
# containers/host adapt appropriately.
reserved-host-memory: &reserved-host-memory 512
#ram-allocation-ratio: &ram-allocation-ratio 0.999999 # XXX bug 1613839
ram-allocation-ratio: &ram-allocation-ratio 1.0 # now fixed
# changed from 4.0 as used on borehamwood 1 and adastral to 2.0 for borehamwood (003) env
cpu-allocation-ratio: &cpu-allocation-ratio 2.0
# This is Management network, unrelated to OpenStack and other applications
# OAM - Operations, Administration and Maintenance
oam-space: &oam-space oam
# This is OpenStack Admin network; for adminURL endpoints
admin-space: &admin-space oam
# This is OpenStack Public network; for publicURL endpoints
#public-space: &public-space external
public-space: &public-space oam
# This is OpenStack Internal network; for internalURL endpoints
internal-space: &internal-space oam
# CEPH configuration
# CEPH access network
ceph-public-space: &ceph-public-space ceph-access
# CEPH replication network
ceph-cluster-space: &ceph-cluster-space ceph-replica
overlay-space: &overlay-space overlay
# Workaround for 'only one default binding supported'
oam-space-constr: &oam-space-constr spaces=oam
ceph-access-constr: &ceph-access-constr spaces=ceph-access
combi-access-constr: &combi-access-constr spaces=ceph-access,oam
# CEPH OSD and journal devices; temporary workaround for #1674148
osd-devices: &osd-devices "/dev/sdb /dev/sdc"
customize-failure-domain: &customize-failure-domain True
# Expected OSD count is total number of OSD disks that will be part of Ceph cluster.
# Never set this number higher or much lower than the real number. 10-20% less than
# actual number is acceptable
expected-osd-count: &expected-osd-count 12
expected-mon-count: &expected-mon-count 3
nagios-context: &nagios-context arif-nc01
# Various VIPs
aodh-vip: &aodh-vip "10.0.1.211"
cinder-vip: &cinder-vip "10.0.1.212"
dashboard-vip: &dashboard-vip "10.0.1.213"
glance-vip: &glance-vip "10.0.1.214"
heat-vip: &heat-vip "10.0.1.215"
keystone-vip: &keystone-vip "10.0.1.216"
# not required for mysql-innodb-cluster
#mysql-vip: &mysql-vip "10.0.1.217"
neutron-api-vip: &neutron-api-vip "10.0.1.218"
nova-cc-vip: &nova-cc-vip "10.0.1.219"
gnocchi-vip: &gnocchi-vip "10.0.1.220"
contrail-vip: &contrail-api-vip "10.0.1.221"
vault-vip: &vault-vip "10.0.1.222"
placement-vip: &placement-vip "10.0.1.223"
# NTP configuration
ntp-source: &ntp-source "192.168.1.11"
# Add policy-routing to the external network
external-network-cidr: &external-network-cidr 192.168.1.0/24
external-network-gateway: &external-network-gateway 192.168.1.249
# After bundle has been deployed, log in to Landscape server and create
# an account. In the account settings, set the Registration key and then
# configure landscape-client to use that registration-key:
# juju config landscape-client registration-key=$your_registration_key
# Encryption At Rest
# removed for borehamwood 003 design as all storage shared and distributed on ceph
# ephemeral-device: &ephemeral-device /dev/disk/by-dname/ephemeral
# DNS configuration
# This configuration for overlay networks. Usually domain should be set to something
# like "openstack.customername.lan." (notice . at the end), while cidr is for PTR
# records, so in most cases 24 is just fine (16 is another option)
# dns-domain: &dns-domain "openstack.customername.lan."
# dns-cidr: &dns-cidr 24
# DNS server needs to be the same in the different charms to avoid conflict.
dns-servers: &dns-servers '192.168.1.13'
# Mappings to provide connectivity to a physical network, used by neutron-gateway
# and possibly neutron-openvswitch, therefore do not configure an IP address for
# this port in MAAS.
data-port: &data-port "br-data:ens9"
bridge-mappings: &bridge-mappings 'physnet1:br-data'
machines:
# Baremetals
# Control Nodes
"100":
constraints: tags=control,asrock01
"101":
constraints: tags=control,asrock02
"102":
constraints: tags=control,asrock03
"103":
constraints: tags=compute,asrock04
"104":
constraints: tags=compute,asrock02
"105":
constraints: tags=compute,asrock03
# LMA Nodes
"200":
constraints: tags=control,asrock01
"201":
constraints: tags=control,asrock04
"202":
constraints: tags=control,asrock02
# Landscape Nodes
"300":
constraints: tags=compute,asrock04
series: bionic
"301":
constraints: tags=compute,asrock03
series: bionic
"302":
constraints: tags=compute,asrock01
series: bionic
# Contrail Nodes
"400":
constraints: tags=compute,asrock01
"401":
constraints: tags=compute,asrock02
"402":
constraints: tags=compute,asrock03
# hyper-converged nova/ceph Nodes
"1000":
constraints: tags=compute,asrock01
"1001":
constraints: tags=compute,asrock01
"1002":
constraints: tags=compute,asrock02
"1003":
constraints: tags=compute,asrock02
"1004":
constraints: tags=compute,asrock03
"1005":
constraints: tags=compute,asrock03
"1006":
constraints: tags=compute,asrock04
"1007":
constraints: tags=compute,asrock04
applications:
# HAcluster
hacluster-aodh:
charm: cs:hacluster
hacluster-cinder:
charm: cs:hacluster
hacluster-glance:
charm: cs:hacluster
hacluster-gnocchi:
charm: cs:hacluster
hacluster-horizon:
charm: cs:hacluster
hacluster-keystone:
charm: cs:hacluster
hacluster-neutron:
charm: cs:hacluster
hacluster-nova:
charm: cs:hacluster
hacluster-heat:
charm: cs:hacluster
hacluster-vault:
charm: cs:hacluster
# CPU governor applications
sysconfig-compute:
charm: cs:sysconfig
options:
enable-iommu: false
governor: "performance"
enable-pti: true
update-grub: true
# sysconfig-storage:
# charm: cs:sysconfig
# options:
# enable-iommu: true
# governor: "performance"
# enable-pti: true
# update-grub: true
sysconfig-control:
charm: cs:sysconfig
options:
enable-iommu: true
governor: "performance"
enable-pti: true
update-grub: true
# bcache-tuning
#bcache-tuning:
# charm: cs:bcache-tuning
# Ceph
ceph-mon:
charm: cs:ceph-mon
num_units: 3
bindings:
"": *oam-space
public: *ceph-public-space
osd: *ceph-public-space
client: *ceph-public-space
admin: *ceph-public-space
cluster: *ceph-cluster-space
options:
expected-osd-count: *expected-osd-count
source: *openstack-origin
monitor-count: *expected-mon-count
customize-failure-domain: *customize-failure-domain
to:
- lxd:100
- lxd:101
- lxd:102
# ceph-mon2:
# charm: cs:ceph-mon
# num_units: 3
# bindings:
# "": *oam-space
# public: *ceph-public-space
# osd: *ceph-public-space
# client: *ceph-public-space
# admin: *ceph-public-space
# cluster: *ceph-cluster-space
# options:
# expected-osd-count: *expected-osd-count
# source: *openstack-origin
# monitor-count: *expected-mon-count
# customize-failure-domain: *customize-failure-domain
# to:
# - lxd:100
# - lxd:101
# - lxd:102
ceph-osd:
charm: cs:ceph-osd
num_units: 8
bindings:
"": *oam-space
public: *ceph-public-space
cluster: *ceph-cluster-space
secrets-storage: *internal-space
mon: *ceph-public-space
options:
osd-devices: *osd-devices
source: *openstack-origin
customize-failure-domain: *customize-failure-domain
autotune: false
aa-profile-mode: complain
bluestore: true
osd-encrypt: True
osd-encrypt-keymanager: vault
to:
- '1000'
- '1001'
- '1002'
- '1003'
- '1004'
- '1005'
- '1006'
- '1007'
# ceph-osd2:
# charm: cs:ceph-osd
# num_units: 6
# bindings:
# "": *oam-space
# public: *ceph-public-space
# cluster: *ceph-cluster-space
# secrets-storage: *internal-space
# mon: *ceph-public-space
# options:
# osd-devices: *osd-devices
# source: *openstack-origin
# customize-failure-domain: *customize-failure-domain
# autotune: false
# aa-profile-mode: complain
# bluestore: true
# osd-encrypt: True
# osd-encrypt-keymanager: vault
# to:
# - '1000'
# - '1001'
# - '1002'
# - '1003'
# - '1004'
# - '1005'
# OpenStack
aodh:
charm: cs:aodh
num_units: 3
bindings:
"": *oam-space
public: *public-space
admin: *admin-space
internal: *internal-space
shared-db: *internal-space
options:
worker-multiplier: *worker-multiplier
openstack-origin: *openstack-origin
region: *openstack-region
vip: *aodh-vip
use-internal-endpoints: True
to:
- lxd:200
- lxd:201
- lxd:202
gnocchi:
charm: cs:gnocchi
num_units: 3
bindings:
"": *oam-space
public: *public-space
admin: *admin-space
internal: *internal-space
shared-db: *internal-space
storage-ceph: *ceph-public-space
coordinator-memcached: *internal-space
options:
worker-multiplier: *worker-multiplier
openstack-origin: *openstack-origin
region: *openstack-region
vip: *gnocchi-vip
use-internal-endpoints: True
to:
- lxd:200
- lxd:201
- lxd:202
cinder:
charm: cs:cinder
num_units: 3
constraints: *combi-access-constr
bindings:
"": *oam-space
public: *public-space
admin: *admin-space
internal: *internal-space
shared-db: *internal-space
options:
worker-multiplier: *worker-multiplier
openstack-origin: *openstack-origin
block-device: None
glance-api-version: 2
vip: *cinder-vip
use-internal-endpoints: True
region: *openstack-region
enabled-services: "api,scheduler,volume"
to:
- lxd:100
- lxd:101
- lxd:102
# cinder2:
# charm: cs:cinder
# num_units: 3
# constraints: *combi-access-constr
# bindings:
# "": *oam-space
# public: *public-space
# admin: *admin-space
# internal: *internal-space
# shared-db: *internal-space
# options:
# worker-multiplier: *worker-multiplier
# openstack-origin: *openstack-origin
# block-device: None
# glance-api-version: 2
# vip: *cinder-vip
# use-internal-endpoints: True
# region: *openstack-region
# enabled-services: "backup"
# to:
# - as1-maas-node-07
# - as2-maas-node-07
# - as3-maas-node-07
# cinder3:
# charm: cs:cinder
# num_units: 3
# constraints: *combi-access-constr
# bindings:
# "": *oam-space
# public: *public-space
# admin: *admin-space
# internal: *internal-space
# shared-db: *internal-space
# options:
# worker-multiplier: *worker-multiplier
# openstack-origin: *openstack-origin
# block-device: None
# glance-api-version: 2
# vip: *cinder-vip
# use-internal-endpoints: True
# region: *openstack-region
# enabled-services: "backup"
# to:
# - lxd:200
# - lxd:201
# - lxd:202
# cinder-backup:
# charm: cs:cinder-backup
cinder-ceph:
charm: cs:cinder-ceph
options:
restrict-ceph-pools: False
glance:
charm: cs:glance
constraints: *combi-access-constr
bindings:
"": *oam-space
public: *public-space
admin: *admin-space
internal: *internal-space
shared-db: *internal-space
options:
worker-multiplier: *worker-multiplier
openstack-origin: *openstack-origin
vip: *glance-vip
use-internal-endpoints: True
restrict-ceph-pools: False
region: *openstack-region
num_units: 3
to:
- lxd:100
- lxd:101
- lxd:102
keystone:
charm: cs:keystone
num_units: 3
bindings:
"": *oam-space
public: *public-space
admin: *admin-space
internal: *internal-space
shared-db: *internal-space
options:
worker-multiplier: *worker-multiplier
openstack-origin: *openstack-origin
vip: *keystone-vip
region: *openstack-region
preferred-api-version: 3
token-provider: 'fernet'
to:
- lxd:103
- lxd:104
- lxd:105
mysql-innodb-cluster:
charm: cs:mysql-innodb-cluster
num_units: 3
bindings:
"": *oam-space
cluster: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
#innodb-buffer-pool-size: 16G
wait-timeout: 3600
enable-binlogs: False
snapd_refresh: 'max'
max-connections: *mysql-connections
tuning-level: *mysql-tuning-level
to:
- lxd:100
- lxd:101
- lxd:102
aodh-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
keystone-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
cinder-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
glance-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
gnocchi-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
heat-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
nova-cloud-controller-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
neutron-api-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
openstack-dashboard-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
placement-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
vault-mysql-router:
charm: cs:mysql-router
bindings:
"": *oam-space
shared-db: *internal-space
db-router: *internal-space
options:
source: *openstack-origin
neutron-api:
charm: cs:neutron-api
num_units: 3
bindings:
"": *oam-space
public: *public-space
admin: *admin-space
internal: *internal-space
shared-db: *internal-space
#neutron-plugin-api-subordinate: *overlay-space
options:
worker-multiplier: *worker-multiplier
openstack-origin: *openstack-origin
region: *openstack-region
neutron-security-groups: True
#overlay-network-type: vxlan gre
overlay-network-type: 'vxlan'
use-internal-endpoints: True
vip: *neutron-api-vip
enable-l3ha: True
dhcp-agents-per-network: 2
enable-ml2-port-security: True
default-tenant-network-type: vxlan
l2-population: True
#global-physnet-mtu: 9000
manage-neutron-plugin-legacy-mode: True
to:
- lxd:100
- lxd:101
- lxd:102
controller-server:
charm: cs:ubuntu
num_units: 6
bindings:
"": *oam-space
to:
- 100
- 101
- 102
- 103
- 104
- 105
lma-server:
charm: cs:ubuntu
num_units: 3
bindings:
"": *oam-space
to:
- 300
- 301
- 302
neutron-gateway:
charm: cs:neutron-gateway
num_units: 3
bindings:
"": *oam-space
data: *overlay-space
options:
worker-multiplier: *worker-multiplier
openstack-origin: *openstack-origin
bridge-mappings: *bridge-mappings
data-port: *data-port
aa-profile-mode: enforce
dns-servers: *dns-servers
customize-failure-domain: *customize-failure-domain
to:
- 100
- 101
- 102
neutron-openvswitch:
charm: cs:neutron-openvswitch
num_units: 0
bindings:
"": *oam-space
data: *overlay-space
options:
worker-multiplier: *worker-multiplier
bridge-mappings: *bridge-mappings
prevent-arp-spoofing: True
firewall-driver: openvswitch
dns-servers: *dns-servers
data-port: *data-port
nova-cloud-controller:
charm: cs:nova-cloud-controller
num_units: 3
bindings:
"": *oam-space
public: *public-space
admin: *admin-space
internal: *internal-space
shared-db: *internal-space
memcache: *internal-space
options:
worker-multiplier: *worker-multiplier
openstack-origin: *openstack-origin
network-manager: Neutron
region: *openstack-region
vip: *nova-cc-vip
console-access-protocol: novnc
console-proxy-ip: local
use-internal-endpoints: True
ram-allocation-ratio: *ram-allocation-ratio
cpu-allocation-ratio: *cpu-allocation-ratio
config-flags: "scheduler_max_attempts=20"
to:
- lxd:103
- lxd:104
- lxd:105
placement:
charm: cs:placement
num_units: 3
bindings:
"": *oam-space
public: *public-space
admin: *admin-space
internal: *internal-space
shared-db: *internal-space
options:
worker-multiplier: *worker-multiplier
openstack-origin: *openstack-origin
vip: *placement-vip
to:
- lxd:103
- lxd:104
- lxd:105
nova-compute:
charm: cs:nova-compute
num_units: 8
bindings:
"": *oam-space
internal: *internal-space
options:
openstack-origin: *openstack-origin
enable-live-migration: True
enable-resize: True
migration-auth-type: ssh
use-internal-endpoints: True
libvirt-image-backend: rbd
restrict-ceph-pools: False
aa-profile-mode: enforce
virt-type: kvm
customize-failure-domain: *customize-failure-domain
reserved-host-memory: *reserved-host-memory
#cpu-mode: custom
#cpu-model: 'Skylake-Server-IBRS'
to:
- 1000
- 1001
- 1002
- 1003
- 1004
- 1005
- 1006
- 1007
ntp:
charm: cs:ntp
options:
source: *ntp-source
pools: ''
openstack-dashboard:
charm: cs:openstack-dashboard
num_units: 3
constraints: *oam-space-constr
bindings:
"": *public-space
shared-db: *internal-space
options:
openstack-origin: *openstack-origin
webroot: "/"
secret: "encryptcookieswithme"
vip: *dashboard-vip
neutron-network-l3ha: True
neutron-network-lb: True
neutron-network-firewall: False
cinder-backup: False
password-retrieve: True
endpoint-type: 'publicURL'
to:
- lxd:100
- lxd:101
- lxd:102
rabbitmq-server:
charm: cs:rabbitmq-server
bindings:
"": *oam-space
amqp: *internal-space
cluster: *internal-space
options:
source: *openstack-origin
min-cluster-size: 3
cluster-partition-handling: pause_minority
num_units: 3
to:
- lxd:103
- lxd:104
- lxd:105
heat:
charm: cs:heat
num_units: 3
bindings:
"": *oam-space
public: *public-space
admin: *admin-space
internal: *internal-space
shared-db: *internal-space
#heat-plugin-subordinate: *overlay-space
options:
worker-multiplier: *worker-multiplier
openstack-origin: *openstack-origin
region: *openstack-region
vip: *heat-vip
use-internal-endpoints: True
config-flags: "max_nested_stack_depth=20"
to:
- lxd:100
- lxd:101
- lxd:102
memcached:
charm: cs:memcached
num_units: 3
constraints: *oam-space-constr
bindings:
"": *internal-space
cache: *internal-space
options:
allow-ufw-ip6-softfail: True
to:
- lxd:100
- lxd:101
- lxd:102
# canonical-livepatch:
# charm: cs:canonical-livepatch
# options:
# livepatch_key: include-file://../secrets/livepatch-key.txt
# livepatch_proxy: *snap-proxy
# #livepatch_proxy: 'http://10.2.65.7:8080'
# thruk-agent:
# charm: cs:thruk-agent
# series: bionic
# options:
# source: 'deb http://ppa.launchpad.net/canonical-bootstack/thruk/ubuntu bionic main'
# key: |
# -----BEGIN PGP PUBLIC KEY BLOCK-----
# Version: SKS 1.1.6
# Comment: Hostname: keyserver.ubuntu.com
# mQINBFQSRaQBEADDAtFnmi0w6ddIoR5olNu2778ACItGLtLPmlKTHJUjbs26nLZQcp5OY2DR
# cE03k55eXy7mn1aSxQaIqbC6lSPzpy+d1RTXMJmIJcEuyJKmJ2XfS9TgdhS3hrYmmNuFnBqp
# xc8FAqDnD/BnlF1suhgLf0mxiEZaTev5/ps3f/Ma8RK5ev5rM3ou/8iLewXlXBH83lf2OnzV
# BuYeAc/ikAnSg7dxyI26RMqdPi60NC67AVYqEddg8XoJ7zppUkvH4F+SlbgeadwEj6tjsOO3
# S/CII9AuSyUbkxm10HHKh3WiKgd8sUWmOvMwTow7NkThlydzDiyIS+WBCfoMWdogqTER+7wX
# tfpR4Bo84ZJAx9ksi8YBidx1gCn6jgebkB4xeel7BTwoIAZL+ShWyYRCSo++DQneE4LkhPr8
# 8V2+/VQbkXWIcyNagEA8mTJtkXgk3Pjalumt2TyR95/pxodN1+bVd2scoT4OMlAtKTZISwXs
# evYNo8Z6/ymFuSmtqYGGKA11vpao/OJfF4dvVkdArQ1gxgxhFnZyekZlwD81uC6hT/aTqiz6
# 9nSYvVZsnQJcPE2hjEZ+Fk3x/A3NGGQrorICRFuoLzjTFAoeOnhdTaMIQzwXg4bdYCNv4j8P
# PJkvF8EPi1lgcOZ1k1Ng4DRSu1EkHGs3i50h4nyVScKEaaDtRwARAQABtCVMYXVuY2hwYWQg
# UFBBIGZvciBDYW5vbmljYWwgQm9vdHN0YWNriQI4BBMBAgAiBQJUEkWkAhsDBgsJCAcDAgYV
# CAIJCgsEFgIDAQIeAQIXgAAKCRBLmoF0eiB1QuDOD/wLwZrtJOSm1W7Gkm5Qj5djkXi7b8mc
# M4vS2fbxdZJjE+KRqxOHGdK68CT8RyUCfl13+RLyA45UxsNSoGmdnTcc7LUJbihxy92WgzF5
# saJ1ObTMge/avS8kJ7l1B0xS3hue5GXfyVYcYlXV6gD53Kfu03z619PE2rmukm1YtyRWPQho
# okr4kNIJbAqG8LR0GnF0CKt9oq5bIs06LvBm2cbFa9txeDOZcLMKfgMOda3Ju7U6k56MYl4a
# sUUP8oXehcvbLx9nsOT4A4XHLj+yOTuXGsTXvn+M6NXODuHj3cN0OvVN+o6/6kjyVuWJqONr
# IdJ5knIWx6UKoWXzRdcqbsSyDpyuUjOFAPyQdQ3zs1DL9vJbOUasQOilR+YVX+ULN9Q17GkK
# IwZc68b9bDZQRtJi2bOhorWamHKZuEKw95lCEHOms/C4Lw04y7sPnXV0MZejXfn/X4N5BELb
# ItSPhoe2IBrh9p1W7CMvfkvjO62nM6oqh6vdKmgW4Im/PG+7DYpLAIHY+C0WsbI2BKDTHNYu
# VKBmUjgMwfz+peks7pJBUgT74XR954vnOvMn0IiSV/+aoHANzeA9dxkt5W5YW1gBK9sw3eTQ
# 9jcRJzswkuHqgE+HFqGFzIgBYB+769+vUdbVEIDKPQXJB94VoLv2oFe1eOQhIbuBTZtDe2x6
# DCE3Nw==
# =Kaig
# -----END PGP PUBLIC KEY BLOCK-----
# external-policy-routing:
# charm: cs:~canonical-bootstack/policy-routing
# options:
# cidr: *external-network-cidr
# gateway: *external-network-gateway
ceilometer:
charm: cs:ceilometer
num_units: 3
bindings:
"": *oam-space
public: *public-space
admin: *admin-space
internal: *internal-space
options:
openstack-origin: *openstack-origin
region: *openstack-region
use-internal-endpoints: True
to:
- lxd:200
- lxd:201
- lxd:202
ceilometer-agent:
charm: cs:ceilometer-agent
options:
use-internal-endpoints: True
#Just to depoy ubuntu charm for contrail servers
juniper-server:
charm: cs:ubuntu
num_units: 3
to:
- 400
- 401
- 402
# vault stuff
etcd:
charm: cs:etcd
num_units: 3
bindings:
"": *oam-space
cluster: *internal-space
db: *internal-space
options:
channel: 3.2/stable
to:
- lxd:400
- lxd:401
- lxd:402
easyrsa:
charm: cs:~containers/easyrsa
num_units: 1
bindings:
"": *oam-space
to:
- lxd:402
vault:
charm: cs:vault
num_units: 3
bindings:
"": *oam-space
access: *internal-space
secrets: *internal-space
certificates: *internal-space
ha: *internal-space
etcd: *internal-space
cluster: *internal-space
options:
vip: *vault-vip
nagios_context: *nagios-context
to:
- lxd:400
- lxd:401
- lxd:402
relations:
# openstack
- [ "keystone:ha", "hacluster-keystone:ha" ]
- [ "keystone:shared-db", "keystone-mysql-router:shared-db" ]
- [ "cinder:shared-db", "cinder-mysql-router:shared-db" ]
- [ "cinder:identity-service", "keystone:identity-service" ]
- [ "cinder:amqp", "rabbitmq-server:amqp" ]
- [ "cinder:ha", "hacluster-cinder:ha" ]
- [ "cinder-ceph:ceph", "ceph-mon:client" ]
- [ "cinder-ceph:storage-backend", "cinder:storage-backend" ]
# - [ "cinder2:shared-db", "cinder2-mysql-router:shared-db" ]
# - [ "cinder2:identity-service", "keystone:identity-service" ]
# - [ "cinder2:amqp", "rabbitmq-server:amqp" ]
# - [ "cinder2:ha", "hacluster-cinder2:ha" ]
# - [ "cinder-ceph:storage-backend", "cinder2:storage-backend" ]
# - [ "cinder2", "cinder-backup" ]
- [ "ceph-osd:mon", "ceph-mon:osd" ]
# - [ "ceph-osd2:mon", "ceph-mon:osd" ]
- [ "glance:ha", "hacluster-glance:ha" ]
- [ "glance:shared-db", "glance-mysql-router:shared-db" ]
- [ "glance:identity-service", "keystone:identity-service" ]
- [ "glance:ceph", "ceph-mon:client" ]
- [ "glance:amqp", "rabbitmq-server:amqp" ]
- [ "glance:image-service", "cinder:image-service" ]
- [ "heat:ha", "hacluster-heat:ha" ]
- [ "heat:shared-db", "heat-mysql-router:shared-db" ]
- [ "heat:identity-service", "keystone:identity-service" ]
- [ "heat:amqp", "rabbitmq-server:amqp" ]
- [ "neutron-api:ha", "hacluster-neutron:ha" ]
- [ "neutron-api:shared-db", "neutron-api-mysql-router:shared-db" ]
- [ "neutron-api:amqp", "rabbitmq-server:amqp" ]
- [ "neutron-api:neutron-api", "nova-cloud-controller:neutron-api" ]
- [ "neutron-api:identity-service", "keystone:identity-service" ]
- [ "nova-cloud-controller:ha", "hacluster-nova:ha" ]
- [ "nova-cloud-controller:shared-db", "nova-cloud-controller-mysql-router:shared-db" ]
- [ "nova-cloud-controller:amqp", "rabbitmq-server:amqp" ]
- [ "nova-cloud-controller:identity-service", "keystone:identity-service" ]
- [ "nova-cloud-controller:image-service", "glance:image-service" ]
- [ "nova-cloud-controller:memcache", "memcached:cache" ]
- [ "nova-compute:juju-info", "ntp:juju-info" ]
- [ "nova-compute:amqp", "rabbitmq-server:amqp" ]
- [ "nova-compute:ceph", "ceph-mon:client" ]
- [ "nova-compute:ceph-access", "cinder-ceph:ceph-access" ]
- [ "nova-compute:image-service", "glance:image-service" ]
- [ "nova-compute:cloud-compute", "nova-cloud-controller:cloud-compute" ]
- [ "openstack-dashboard:ha", "hacluster-horizon:ha" ]
- [ "openstack-dashboard:identity-service", "keystone:identity-service" ]
- [ "openstack-dashboard:shared-db", "openstack-dashboard-mysql-router:shared-db" ]
# ceilometer
- [ "ceilometer:identity-credentials", "keystone:identity-credentials" ]
- [ "ceilometer:amqp", "rabbitmq-server:amqp" ]
- [ "ceilometer-agent:ceilometer-service", "ceilometer:ceilometer-service" ]
- [ "ceilometer-agent:nova-ceilometer", "nova-compute:nova-ceilometer" ]
- [ "ceilometer-agent:amqp", "rabbitmq-server:amqp"]
# gnocchi
- [ "gnocchi:ha", "hacluster-gnocchi:ha" ]
- [ "gnocchi:shared-db", "gnocchi-mysql-router:shared-db" ]
- [ "gnocchi:amqp", "rabbitmq-server:amqp" ]
- [ "gnocchi:identity-service", "keystone:identity-service" ]
- [ "gnocchi:storage-ceph", "ceph-mon:client" ]
- [ "gnocchi:coordinator-memcached", "memcached:cache" ]
- [ "gnocchi:metric-service", "ceilometer:metric-service" ]
# aodh
- [ "aodh:shared-db", "aodh-mysql-router:shared-db" ]
- [ "aodh:identity-service", "keystone:identity-service" ]
- [ "aodh:amqp", "rabbitmq-server:amqp" ]
- [ "aodh:ha", "hacluster-aodh:ha" ]
# placement
- [ "placement:ha", "hacluster-placement:ha" ]
- [ "placement:shared-db", "placement-mysql-router:shared-db" ]
- [ "placement:identity-service", "keystone:identity-service" ]
- [ "placement:placement", "nova-cloud-controller:placement" ]
# mysql-router
- [ "aodh-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
- [ "keystone-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
- [ "cinder-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
# - [ "cinder2-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
- [ "glance-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
- [ "gnocchi-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
- [ "heat-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
- [ "nova-cloud-controller-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
- [ "neutron-api-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
- [ "openstack-dashboard-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
- [ "placement-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
- [ "vault-mysql-router:db-router", "mysql-innodb-cluster:db-router" ]
# sysconfig relations
#- [ "ceph-osd:juju-info", "sysconfig-storage:juju-info" ]
- [ "nova-compute:juju-info", "sysconfig-compute:juju-info" ]
- [ "controller-server:juju-info", "sysconfig-control:juju-info" ]
# Neutron-gateway relations
- [ "neutron-gateway:quantum-network-service", "nova-cloud-controller:quantum-network-service" ]
- [ "neutron-gateway:amqp", "rabbitmq-server:amqp" ]
- [ "neutron-gateway:neutron-plugin-api", "neutron-api:neutron-plugin-api" ]
- [ "neutron-gateway:juju-info", "ntp:juju-info" ]
# Neutron-openvswitch relations
- [ "neutron-openvswitch:amqp" , "rabbitmq-server:amqp" ]
- [ "neutron-openvswitch:neutron-plugin-api" , "neutron-api:neutron-plugin-api" ]
- [ "neutron-openvswitch:neutron-plugin" , "nova-compute:neutron-plugin" ]
# vault stuff
- [ "vault:shared-db", "vault-mysql-router:shared-db" ]
- [ "vault:ha", "hacluster-vault:ha" ]
- [ "ceph-osd:secrets-storage", "vault:secrets"]
# - [ "ceph-osd2:secrets-storage", "vault:secrets"]
- [ "etcd:certificates", "easyrsa:client" ]
- [ "etcd:db", "vault:etcd" ]
# juniper server
- [ "juniper-server:juju-info", "ntp:juju-info" ]
# lma server
- [ "lma-server:juju-info", "ntp:juju-info" ]