Initial commit

This commit is contained in:
Arif Ali 2021-10-29 09:57:19 +01:00
commit d9f23802f7
Signed by: arif
GPG Key ID: 369608FBA1353A70
54 changed files with 5388 additions and 0 deletions

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
generated/*
secrets/*
.vscode
*~

0
README.md Normal file
View File

469
config/bucketsconfig.yaml Normal file
View File

@ -0,0 +1,469 @@
---
allocations:
control:
machines:
- as1-maas-node-01
- as2-maas-node-01
- as3-maas-node-01
lma:
machines:
- as1-maas-node-02
- as2-maas-node-02
- as3-maas-node-02
landscape:
machines:
- as1-maas-node-03
- as2-maas-node-03
- as3-maas-node-03
compute:
machines:
- as1-maas-node-04
- as1-maas-node-05
- as2-maas-node-04
- as2-maas-node-05
- as3-maas-node-04
- as3-maas-node-05
configs:
control:
disks: &common-disks
# RAID 1 OS disk
- disk: 0
id: os0
ptable: GPT
type: disk
# EFI partition
- device: os0
id: os0-efi
number: 1
size: 512M
type: partition
# EFI format
- volume: os0-efi
id: os0-efi-format
label: EFI
fstype: fat32
type: format
# EFI mount
- device: os0-efi-format
id: os0-efi-mount
path: /boot/efi
type: mount
# Boot partition
- device: os0
id: os0-boot
number: 2
size: 2G
type: partition
# Boot format
- volume: os0-boot
id: os0-boot-format
label: BOOT
fstype: ext4
type: format
# Boot mount
- device: os0-boot-format
id: os0-boot-mount
path: /boot
type: mount
# bcache partition
- device: os0
id: os0-bcache
number: 3
size: 236G
type: partition
# RAID 10 Disk
- disk: 1
id: os1
ptable: GPT
type: disk
# root partition
- device: os1
id: os1-root
number: 1
size: 11.9T
type: partition
# Bcache for root
- backing_device: os1-root
cache_device: os0-bcache
cache_mode: writeback
id: root0
name: root0
type: bcache
# LVM volume group for root
- devices:
- root0
id: vg0
name: vg0
type: lvm_volgroup
# LVM partition for root
- id: vg0-root
name: root
type: lvm_partition
size: 20G
volgroup: vg0
# root format
- volume: vg0-root
id: vg0-root-format
label: root
fstype: ext4
type: format
# root mount
- device: vg0-root-format
id: vg0-root-mount
path: /
type: mount
# LVM partition for home
- id: vg0-home
name: home
type: lvm_partition
size: 1G
volgroup: vg0
# home format
- volume: vg0-home
id: vg0-home-format
label: home
fstype: ext4
type: format
# home mount
- device: vg0-home-format
id: vg0-home-mount
path: /home
type: mount
# LVM partition for tmp
- id: vg0-tmp
name: tmp
type: lvm_partition
size: 2G
volgroup: vg0
# tmp format
- volume: vg0-tmp
id: vg0-tmp-format
label: tmp
fstype: ext4
type: format
# tmp mount
- device: vg0-tmp-format
id: vg0-tmp-mount
path: /tmp
type: mount
# LVM partition for var
- id: vg0-var
name: var
type: lvm_partition
size: 5T
volgroup: vg0
# var format
- volume: vg0-var
id: vg0-var-format
label: var
fstype: ext4
type: format
# var mount
- device: vg0-var-format
id: vg0-var-mount
path: /var
type: mount
# LVM partition for var/log
- id: vg0-var-log
name: var-log
type: lvm_partition
size: 20G
volgroup: vg0
# var/log format
- volume: vg0-var-log
id: vg0-var-log-format
label: var-log
fstype: ext4
type: format
# var/log mount
- device: vg0-var-log-format
id: vg0-var-log-mount
path: /var/log
type: mount
# LVM partition for var/tmp
- id: vg0-var-tmp
name: var-tmp
type: lvm_partition
size: 2G
volgroup: vg0
# var/tmp format
- volume: vg0-var-tmp
id: vg0-var-tmp-format
label: var-tmp
fstype: ext4
type: format
# var/tmp mount
- device: vg0-var-tmp-format
id: vg0-var-tmp-mount
path: /var/tmp
type: mount
# LVM partition for var/log/audit
- id: vg0-var-log-audit
name: var-log-audit
type: lvm_partition
size: 2G
volgroup: vg0
# var/log/audit format
- volume: vg0-var-log-audit
id: vg0-var-log-audit-format
label: var-log-audit
fstype: ext4
type: format
# var/log/audit mount
- device: vg0-var-log-audit-format
id: vg0-var-log-audit-mount
path: /var/log/audit
type: mount
# LVM partition for SWAP
- id: vg0-swap
name: swap
type: lvm_partition
size: 8G
volgroup: vg0
# SWAP format
- volume: vg0-swap
id: vg0-swap-format
label: swap
fstype: swap
type: format
# SWAP mount
- device: vg0-swap-format
id: vg0-swap-mount
path: ''
type: mount
nics: &common-nics
# ens3 NIC
- id: ens3
name: ens3
nic: 0
type: physical
vlan_id: 300
subnets:
oam:
mode: static
ip_addresses:
as1-maas-node-01: 10.0.1.101
as2-maas-node-01: 10.0.1.102
as3-maas-node-01: 10.0.1.103
as1-maas-node-02: 10.0.1.111
as2-maas-node-02: 10.0.1.112
as3-maas-node-02: 10.0.1.113
as1-maas-node-03: 10.0.1.121
as2-maas-node-03: 10.0.1.122
as3-maas-node-03: 10.0.1.123
as1-maas-node-04: 10.0.1.131
as1-maas-node-05: 10.0.1.132
as2-maas-node-04: 10.0.1.133
as2-maas-node-05: 10.0.1.134
as3-maas-node-04: 10.0.1.135
as3-maas-node-05: 10.0.1.136
fabric: default
# ens4 NIC
- id: ens4
name: ens4
nic: 1
type: physical
vlan_id: 301
subnets: [ceph-access]
fabric: default
# ens5 NIC
- id: ens5
name: ens5
nic: 2
type: physical
vlan_id: 302
subnets: [ceph-replica]
fabric: default
# ens6 NIC
- id: ens6
name: ens6
nic: 3
type: physical
vlan_id: 303
subnets: [overlay]
fabric: default
# ens7 NIC
- id: ens7
name: ens7
nic: 4
type: physical
vlan_id: 304
subnets: [admin]
fabric: default
# ens8 NIC
- id: ens8
name: ens8
nic: 5
type: physical
vlan_id: 305
subnets: [internal]
fabric: default
# ens9 NIC
- id: ens9
name: ens9
nic: 6
type: physical
vlan_id: 1
subnets: [external]
fabric: default
lma:
disks: *common-disks
nics: *common-nics
landscape:
disks: *common-disks
nics: *common-nics
compute:
disks: &compute-disks
# RAID 1 OS disk
- disk: 0
id: os0
ptable: GPT
type: disk
# EFI partition
- device: os0
id: os0-efi
number: 1
size: 512M
type: partition
# EFI format
- volume: os0-efi
id: os0-efi-format
label: EFI
fstype: fat32
type: format
# EFI mount
- device: os0-efi-format
id: os0-efi-mount
path: /boot/efi
type: mount
# Boot partition
- device: os0
id: os0-boot
number: 2
size: 2G
type: partition
# Boot format
- volume: os0-boot
id: os0-boot-format
label: BOOT
fstype: ext4
type: format
# Boot mount
- device: os0-boot-format
id: os0-boot-mount
path: /boot
type: mount
##new section
# Root partition
- device: os0
id: os0-root
number: 3
size: 53G
type: partition
# copy and paste from controll nodes : LVM volume group for root
- devices:
- os0-root
id: vg0
name: vg0
type: lvm_volgroup
# LVM partition for root
- id: vg0-root
name: root
type: lvm_partition
size: 45G
volgroup: vg0
# root format
- volume: vg0-root
id: vg0-root-format
label: root
fstype: ext4
type: format
# root mount
- device: vg0-root-format
id: vg0-root-mount
path: /
type: mount
# LVM partition for SWAP
- id: vg0-swap
name: swap
type: lvm_partition
size: 8G
volgroup: vg0
# SWAP format
- volume: vg0-swap
id: vg0-swap-format
label: swap
fstype: swap
type: format
# SWAP mount
- device: vg0-swap-format
id: vg0-swap-mount
path: ''
type: mount
nics: *common-nics

1503
config/bundle.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,113 @@
subordinates:
telegraf:
where: all except prometheus # and prometheus-ceph-exporter and prometheus-openstack-exporter
host-suffixes: [prometheus]
landscape-client:
where: all except landscape-server
filebeat:
where: all except graylog
canonical-livepatch:
where: host only
nrpe:
where: container aware # and except nagios
host-suffixes: [host, physical, guest]
container-suffixes: [lxd, container]
ntp:
where: host only # You don't want NTP in a container duelling with ntp in the host
thruk-agent:
where: on nagios
operations mandatory: &operations-mandatory-charms
- elasticsearch
- grafana
- graylog
- landscape-server
- nagios
- openstack-service-checks
- prometheus2
- prometheus-openstack-exporter
- prometheus-ceph-exporter
operations mandatory dependencies: &operations-mandatory-deps
- postgresql
operations subordinates: &operations-mandatory-subs
- canonical-livepatch
- filebeat
- ksplice
- landscape-client
- lldpd
- nrpe
- ntp
- telegraf
- thruk-agent
operations charms: &operations-charms
- *operations-mandatory-charms
- *operations-mandatory-deps
- *operations-mandatory-subs
openstack mandatory: &openstack-mandatory-charms
- ceilometer
- ceilometer-agent
- ceph-mon
- ceph-osd
- cinder
- cinder-ceph
- glance
- heat
- keystone
- neutron-api
- nova-cloud-controller
- nova-compute
- openstack-dashboard
openstack mandatory deps: &openstack-mandatory-deps
- haproxy
- memcached
- percona-cluster
- rabbitmq-server
openstack mandatory subordinates: &openstack-mandatory-subs
- hacluster
openstack optional charms: &openstack-optional-charms
- aodh
- ceph-radosgw
- designate
- designate-bind
- glance-simplestreams-sync
- glance-sync-slave
- gnocchi
- keystone-ldap
- mongodb # Optional since Gnochii
- neutron-gateway
- swift-proxy
- swift-storage
- cinder-backup
- vault
- etcd
- easyrsa
- neutron-openvswitch
cisco-aci-charms: &cisco-aci-charms
- neutron-api-plugin-aci
- openstack-dashboard-plugin-gbp
helper-charms: &helper-charms
- sysconfig
- bcache-tuning
- policy-routing
openstack charms: &openstack-charms
- *openstack-mandatory-charms
- *openstack-mandatory-deps
- *openstack-mandatory-subs
- *openstack-optional-charms
- *cisco-aci-charms
known charms:
- ubuntu
- *openstack-charms
- *operations-charms
- *helper-charms

54
config/dnsresources.yaml Normal file
View File

@ -0,0 +1,54 @@
---
# Internal VIPs [OAM network - vlan190]
- fqdn: aodh-internal.example.com
ip_addresses: 10.0.1.211
- fqdn: cinder-internal.example.com
ip_addresses: 10.0.1.212
- fqdn: dashboard-internal.example.com
ip_addresses: 10.0.1.213
- fqdn: glance-internal.example.com
ip_addresses: 10.0.1.214
- fqdn: heat-internal.example.com
ip_addresses: 10.0.1.215
- fqdn: keystone-internal.example.com
ip_addresses: 10.0.1.216
- fqdn: mysql-internal.example.com
ip_addresses: 10.0.1.217
- fqdn: neutron-internal.example.com
ip_addresses: 10.0.1.218
- fqdn: nova-internal.example.com
ip_addresses: 10.0.1.219
- fqdn: gnocchi-internal.example.com
ip_addresses: 10.0.1.220
- fqdn: vault-internal.example.com
ip_addresses: 10.0.1.221
# This record needs to be created manually after the creation of the Nagios VM
#- fqdn: nagios-internal.example.com
# ip_addresses: 10.0.1.63
#- fqdn: landscape-internal.example.com
# ip_addresses: 10.0.1.63
#- fqdn: graylog-internal.example.com
# ip_addresses: 10.0.1.63
# External VIPs [external network - vlan191]
- fqdn: aodh.example.com
ip_addresses: 10.0.1.211
- fqdn: cinder.example.com
ip_addresses: 10.0.1.212
- fqdn: dashboard.example.com
ip_addresses: 10.0.1.213
- fqdn: glance.example.com
ip_addresses: 10.0.1.214
- fqdn: heat.example.com
ip_addresses: 10.0.1.215
- fqdn: keystone.example.com
ip_addresses: 10.0.1.216
- fqdn: mysql.example.com
ip_addresses: 10.0.1.217
- fqdn: neutron.example.com
ip_addresses: 10.0.1.218
- fqdn: nova.example.com
ip_addresses: 10.0.1.219
- fqdn: gnocchi.example.com
ip_addresses: 10.0.1.220
- fqdn: vault.example.com
ip_addresses: 10.0.1.221

39
config/hosts.yaml Normal file
View File

@ -0,0 +1,39 @@
- hostname: asrock01
ip: 10.0.1.241
vm_zone: asrock01
pods:
- name: as1-juju-01
disk: 20G
cpu: 2
mem: 4G
nics:
- space: oam
ip: 10.0.1.231
mode: auto
tags: ['juju']
- hostname: asrock02
ip: 10.0.1.242
vm_zone: asrock02
pods:
- name: as2-juju-01
disk: 20G
cpu: 2
mem: 4G
nics:
- space: oam
ip: 10.0.1.232
mode: auto
tags: ['juju']
- hostname: asrock03
ip: 10.0.1.243
vm_zone: asrock03
pods:
- name: as3-juju-01
disk: 20G
cpu: 2
mem: 4G
nics:
- space: oam
ip: 10.0.1.233
mode: auto
tags: ['juju']

View File

@ -0,0 +1,72 @@
cloudinit-userdata: |
packages:
- squashfuse
- libopenscap8
write_files:
- owner: root:root
path: /etc/apt/auth.conf.d/cis-harden.conf
permissions: '0600'
content: |
# Credentials to allow the connecion for the CIS benchmarks private PPA
machine private-ppa.launchpad.net/ubuntu-advantage/security-benchmarks/ubuntu
login arif-ali
password kNnpLf27XvGsdwt6VxfT
- owner: root:root
path: /tmp/cis-hardening.conf
permissions: '0644'
content: |
# Hash created by grub-mkpasswd-pbkdf2 to set grub password. If empty, grub password
# is not set.
# (CIS rule 1.4.2)
grub_hash=
# Grub user set for authentication
grub_user=root
# Time synchronization service selected (ntp or chrony - if empty, none will be installed)
# (CIS rule 2.2.1.1-2.2.1.3)
time_sync_svc=
time_sync_addr=
# Audit log storage size, before log is automatically rotated
# (CIS rule 4.1.1.1)
max_log_file=8
# Remote log host address (CIS rule 4.2.2.4)
# Use the format loghost.example.com:554, to define the port
remote_log_server=
# SSH access limitation parameters at /etc/ssh/sshd_config (CIS rule 5.2.14)
AllowUsers=ubuntu
AllowGroups=
DenyUsers=
DenyGroups=
# PAM password quality parameters at /etc/security/pwquality.conf (CIS rule 5.3.1)
minlen=14
dcredit=-1
ucredit=-1
ocredit=-1
lcredit=-1
# sudo group members, aside from root (CIS rule 5.6)
sudo_member=
# Unowned files will be changed to this user (CIS rule 6.1.11)
unowned_user=root
# Ungrouped files will be changed to this user (CIS rule 6.1.12)
unowned_group=root
# Delete files in the home directory which violate CIS rules (CIS rules 6.2.11, 6.2.12, 6.2.14)
delete_user_files=true
preruncmd:
- locale-gen en_GB.UTF-8; update-locale
- sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys A166877412DAC26E73CEBF3FF6C280178D13028C
- sudo add-apt-repository "deb https://private-ppa.launchpad.net/ubuntu-advantage/security-benchmarks/ubuntu bionic main"
- sudo apt update
- sudo DEBIAN_FRONTEND=noninteractive apt install -y -q usg-cisbenchmark
- cd /usr/share/ubuntu-scap-security-guides/cis-hardening; sudo ./Canonical_Ubuntu_18.04_CIS-harden.sh -f /tmp/cis-hardening.conf lvl2_server
# remove auditd as added by Hardening script but is not supported on containers
- "systemd-detect-virt --container && apt purge -y auditd"
default-series: "bionic"
apt-mirror: http://192.168.1.12/ubuntu

View File

@ -0,0 +1,32 @@
cloudinit-userdata: |
write_files:
- owner: root:root
path: /root/99-post-juju.yaml
permissions: '0644'
content: |
network:
version: 2
ethernets:
ens3:
link-local: []
ens4:
link-local: []
ens5:
link-local: []
ens6:
link-local: []
ens7:
link-local: []
ens8:
link-local: []
ens9:
link-local: []
preruncmd:
- locale-gen en_GB.UTF-8; update-locale
- "systemd-detect-virt --container && rm -rf /root/99-post-juju.yaml"
- "! systemd-detect-virt --container && mv /root/99-post-juju.yaml /etc/netplan/99-post-juju.yaml"
- "! systemd-detect-virt --container && sudo lxc profile set default security.nesting true"
- sudo netplan apply
default-series: "bionic"
apt-mirror: http://192.168.1.12/ubuntu

17
config/juju_deploy.sh Executable file
View File

@ -0,0 +1,17 @@
#!/bin/bash
juju deploy ./bundle.yaml \
--overlay ./overlays/ovs.yaml \
--overlay ./overlays/hostnames.yaml \
--overlay ./overlays/ldap.yaml \
--overlay ./overlays/resources.yaml \
--overlay ./overlays/openstack_versioned_overlay.yaml \
--overlay ./overlays/stsstack.yaml $*
# --overlay ./overlays/contrail.yaml \
# --overlay ./overlays/openstack_versioned_overlay.yaml \
# --overlay ./overlays/openstack_versioned_overlay_gemini.yaml \
# --overlay ./overlays/contrail_versioned_overlay.yaml \
# --overlay ./overlays/ssl.yaml \
# --overlay ./overlays/contrail.yaml \
# --overlay ./overlays/contrail_versioned_overlay.yaml \

94
config/master.yaml Normal file
View File

@ -0,0 +1,94 @@
project:
customer: Arif Ali
project: Internal System
opportunity: blah
sku: fcb-stable-queens-bionic-bluestore
arch: disaggregated
hw: approved
layers:
- name: baremetal
type: baremetal
config:
hosts: include-rel://config/hosts.yaml
ssh_user: ubuntu
- name: maas
type: maas
parent: baremetal
config:
tweaks:
- nobond
- nobridge
maas_vip: 192.168.1.22
postgresql_vip: 192.168.1.22
package_repositories: []
maas_config:
maas_name: maas
completed_intro: True
dnssec_validation: 'no'
upstream_dns:
- 192.168.1.13
ntp_servers:
- 0.uk.pool.ntp.org
kernel_opts:
- console=tty0
- console=ttyS0,115200n8
maas_boot_source_selections:
- release: 'focal'
arches: ['amd64']
- release: 'bionic'
arches: ['amd64']
maas_admin: admin
maas_admin_email: mail@arif-ali.co.uk
# The password will be auto-generated in `generated/maas/maas-pass`
maas_admin_password: openstack
infra_extra_repositories:
- ppa:maas/stable
networks: include-rel://config/networks.yaml
enlist_timeout: 1801
nodeconfig: include-rel://config/bucketsconfig.yaml
nodes: include-rel://config/nodes.yaml
domains:
- name: maas
#authoritative: false
is_default: true
dnsresources: include-rel://config/dnsresources.yaml
- name: juju_maas_controller
type: juju_maas_controller
parent: maas
config:
ha: 3
ha_timeout: 900
controller_name: home-maas
model_defaults: juju-model-default.yaml
- name: openstack
type: openstack
parent: juju_maas_controller
config:
juju_model: openstack
bundles: # Primary bundle followed by optional overlays.
- bundle.yaml
- overlays/hostnames.yaml
- overlays/ovs.yaml
- overlays/ldap.yaml
- overlays/openstack_versioned_overlay.yaml
- overlays/stsstack.yaml
ha_type: ssl
openstack_config: include-rel://config/openstack.yaml
validate:
- type: rados
config:
unit: ceph-mon/0
- type: fio
config:
unit: ceph-mon/0
- type: rally
config:
task_yaml: rally.yaml
#- name: magpie
# type: magpie
# parent: juju_maas_controller
# config:
# juju_model: magpie
# oam-space: oam-space

131
config/networks.yaml Normal file
View File

@ -0,0 +1,131 @@
fabrics:
default:
vlans:
300:
# OAM network
dhcp_on: true
mtu: 1500
space: oam
subnets:
oam:
cidr: 10.0.1.0/24
gateway_ip: 10.0.1.253
reserved:
# 10.2.70.1, .2,.3, reserved
# gap: 4-7, infra-head, reserved
# 8-10 reserved diff env
# 11-13 nodes static ips
# 14-16 infras
# 17-25: nodes storage static ips
# 26-126: future use, reserved
# 127-255: unused, future use, reserved
# 71.0: openstack
# .1 .2 maas
# .3-18: vips
# 19-59: dhcp
# 60-62: juju controllers
# 63-254: containers
dhcp:
start_ip: 10.0.1.1
end_ip: 10.0.1.9
type: dynamic
openstack-vips:
start_ip: 10.0.1.211
end_ip: 10.0.1.225
servers:
start_ip: 10.0.1.241
end_ip: 10.0.1.254
1:
# External is on the same bond as OAM -> MTU 1500
mtu: 1500
space: external
subnets:
external:
cidr: 192.168.1.0/24
301:
dhcp_on: true
mtu: 1500
space: ceph-access
subnets:
ceph_access:
cidr: 10.0.2.0/24
reserved:
maas:
start_ip: 10.0.2.1
end_ip: 10.0.2.49
type: dynamic
servers:
start_ip: 10.0.2.241
end_ip: 10.0.2.254
302:
dhcp_on: true
mtu: 1500
space: ceph-replica
subnets:
ceph_replication:
cidr: 10.0.3.0/24
reserved:
maas:
start_ip: 10.0.3.1
end_ip: 10.0.3.49
type: dynamic
servers:
start_ip: 10.0.3.241
end_ip: 10.0.3.254
303:
dhcp_on: true
mtu: 1500
space: overlay
subnets:
overlay:
cidr: 10.0.4.0/24
reserved:
maas:
start_ip: 10.0.4.1
end_ip: 10.0.4.49
type: dynamic
servers:
start_ip: 10.0.4.241
end_ip: 10.0.4.254
304:
dhcp_on: true
mtu: 1500
space: internal
subnets:
internal:
cidr: 10.0.5.0/24
reserved:
maas:
start_ip: 10.0.5.1
end_ip: 10.0.5.49
type: dynamic
servers:
start_ip: 10.0.5.241
end_ip: 10.0.5.254
305:
dhcp_on: true
mtu: 1500
space: admin
subnets:
admin:
cidr: 10.0.6.0/24
reserved:
maas:
start_ip: 10.0.6.1
end_ip: 10.0.6.49
type: dynamic
servers:
start_ip: 10.0.6.241
end_ip: 10.0.6.254
spaces:
ceph-access:
description: Ceph access.
ceph-replica:
description: Ceph replication.
external:
description: Floating IP network.
oam:
description: Operations, administration and management. PXE and stuff.
overlay:
description: OVS overlay space for tenant networks

24
config/nodes.yaml Normal file
View File

@ -0,0 +1,24 @@
#asrock01:
# bmc_user: include-rel://secrets/cimc-user
# bmc_password: include-rel://secrets/cimc-password
# power_type: manual
# bmc_address: 10.2.69.17
# bmc_power_boot_type: efi
# zone: asrock01
#asrock02:
# bmc_user: include-rel://secrets/cimc-user
# bmc_password: include-rel://secrets/cimc-password
# power_type: manual
# bmc_address: 10.2.69.18
# bmc_power_boot_type: efi
# zone: asrock02
#asrock03:
# bmc_user: include-rel://secrets/cimc-user
# bmc_password: include-rel://secrets/cimc-password
# power_type: manual
# bmc_address: 10.2.69.19
# bmc_power_boot_type: efi
# zone: asrock03
as1-maas-node-01:
power_type: virsh
zone: asrock03

21
config/openstack.yaml Normal file
View File

@ -0,0 +1,21 @@
tenant_name: ubuntu-net
tenant_subnet_name: ubuntu-subnet
tenant_cidr: 172.16.0.0/24
tenant_gateway: 172.16.0.1
tenant_start: 172.16.0.10
tenant_end: 172.16.0.254
ext_name: ext-net
ext_subnet_name: ext-subnet
ext_cidr: 10.0.0.0/24
ext_gateway: 10.0.0.1
ext_start: 10.0.0.2
ext_end: 10.0.0.254
router_name: ubuntu-router
network_type: vlan
segmentation_id: 1001
physnet: physnet1
keypair_file: /home/arif/.ssha/id_rsa.pub
keypair_name: ubuntu-keypair
image_series: bionic
kvm_image_name: bionic-kvm
lxd_image_name: bionic-lxd

View File

@ -0,0 +1,264 @@
---
variables:
oam-space: &oam-space oam
public-space: &public-space oam
docker-registry: &docker-registry hub.juniper.net/contrail
docker-user: &docker-user include-file://../../secrets/juniper-username.txt
docker-password: &docker-password include-file://../../secrets/juniper-password.txt
#docker_runtime_repo: &docker-repo http://repo1.nci.bt.com/wes-505/current/mirror/download.docker.com/linux/ubuntu/
#docker_runtime_key_url: &docker-key http://repo1.nci.bt.com/wes-505/current/mirror/download.docker.com/linux/ubuntu/gpg
#image-tag: &image-tag 5.1.0-0.38-queens
#adastral image-tag: &image-tag 19.30-queens
# contrail version
image-tag: &image-tag "2011.138" #New LTS release - 1912.L1.46"
#contrail-proxy: &contrail-proxy ""
#contrail-no-proxy: &contrail-no-proxy ""
# in old charms - network also used for api calls - contrail controller and api
# in latest charms - seperate api network. could split this.
# contrail-control-net: &contrail-control-net 172.16.4.0/22
# #contrail net now on OAM
contrail-control-net: &contrail-control-net 10.0.1.0/24
# Data net on SDN transport
contrail-data-net: &contrail-data-net 10.0.6.0/24
# api in external
contrail-api-vip: &contrail-api-vip 10.0.1.221
# contrail log level (SYS_NOTICE or SYS_DEBUG)
contrail-log-level: &contrail-log-level SYS_DEBUG
applications:
# Contrail applications
contrail-openstack:
charm: cs:~juniper-os-software/contrail-openstack
options:
docker-registry: *docker-registry
docker-user: *docker-user
docker-password: *docker-password
#docker_runtime_repo: *docker-repo
#docker_runtime_key_url: *docker-key
#temp change to apt until bug fixed around no_proxy
#https://github.com/Juniper/contrail-charms/issues/150
#docker_runtime: apt
#docker_runtime: custom
image-tag: *image-tag
#https_proxy: *contrail-proxy
#http_proxy: *contrail-proxy
#no_proxy: *contrail-no-proxy
contrail-agent:
charm: cs:~juniper-os-software/contrail-agent
options:
log-level: *contrail-log-level
docker-registry: *docker-registry
docker-user: *docker-user
docker-password: *docker-password
#docker_runtime_repo: *docker-repo
#docker_runtime_key_url: *docker-key
#temp change to apt until bug fixed around no_proxy
#https://github.com/Juniper/contrail-charms/issues/150
#docker_runtime: apt
#docker_runtime: custom
image-tag: *image-tag
#https_proxy: *contrail-proxy
#http_proxy: *contrail-proxy
#no_proxy: *contrail-no-proxy
#physical-interface: ens8
#vhost-gateway: auto
#sriov-physical-interface: enp94s0f0
#sriov-numvfs: "12"
contrail-analytics:
charm: cs:~juniper-os-software/contrail-analytics
num_units: 3
bindings:
"": *oam-space
expose: true
options:
log-level: *contrail-log-level
docker-registry: *docker-registry
docker-user: *docker-user
docker-password: *docker-password
#docker_runtime_repo: *docker-repo
#docker_runtime_key_url: *docker-key
#temp change to apt until bug fixed around no_proxy
#https://github.com/Juniper/contrail-charms/issues/150
#docker_runtime: apt
#docker_runtime: custom
image-tag: *image-tag
#https_proxy: *contrail-proxy
#http_proxy: *contrail-proxy
#no_proxy: *contrail-no-proxy
control-network: *contrail-control-net
haproxy-http-mode: "http"
min-cluster-size: 3
# added to try to resolve issue with contrail-haproxy IJ Juniper case: 2020-0708-0220
vip: *contrail-api-vip
to:
- 500
- 501
- 502
contrail-analytics-db:
charm: cs:~juniper-os-software/contrail-analyticsdb
num_units: 3
bindings:
"": *oam-space
expose: true
options:
log-level: *contrail-log-level
docker-registry: *docker-registry
docker-user: *docker-user
docker-password: *docker-password
#docker_runtime_repo: *docker-repo
#docker_runtime_key_url: *docker-key
#temp change to apt until bug fixed around no_proxy
#https://github.com/Juniper/contrail-charms/issues/150
#docker_runtime: apt
#docker_runtime: custom
image-tag: *image-tag
#https_proxy: *contrail-proxy
#http_proxy: *contrail-proxy
#no_proxy: *contrail-no-proxy
control-network: *contrail-control-net
cassandra-minimum-diskgb: "4"
cassandra-jvm-extra-opts: "-Xms2g -Xmx4g"
min-cluster-size: 3
to:
- 503
- 504
- 505
keepalived:
charm: cs:~containers/keepalived
options:
virtual_ip: *contrail-api-vip
port: 8143
contrail-haproxy:
charm: cs:haproxy
num_units: 3
bindings:
"": *oam-space
#changed: reverseproxy: *overlay-space
reverseproxy: *oam-space
website: *public-space
public: *public-space
options:
default_timeouts: >-
queue 60000, connect 5000, client 120000, server 120000
services: ""
source: backports
peering_mode: "active-active"
enable_monitoring: True
ssl_cert: SELFSIGNED
to:
- lxd:500
- lxd:501
- lxd:502
contrail-controller:
charm: cs:~juniper-os-software/contrail-controller
num_units: 3
bindings:
"": *oam-space
expose: true
options:
log-level: *contrail-log-level
docker-registry: *docker-registry
docker-user: *docker-user
docker-password: *docker-password
#docker_runtime_repo: *docker-repo
#docker_runtime_key_url: *docker-key
#temp change to apt until bug fixed around no_proxy
#https://github.com/Juniper/contrail-charms/issues/150
#docker_runtime: apt
#docker_runtime: custom
image-tag: *image-tag
#https_proxy: *contrail-proxy
#http_proxy: *contrail-proxy
#no_proxy: *contrail-no-proxy
control-network: *contrail-control-net
#new data network as we are now splitting above function
#data-network: *contrail-data-net
auth-mode: rbac
cassandra-minimum-diskgb: "4"
cassandra-jvm-extra-opts: "-Xms1g -Xmx2g"
vip: *contrail-api-vip
#local-rabbitmq-hostname-resolution: True
haproxy-https-mode: tcp
haproxy-http-mode: http
bgp-asn: '65000'
min-cluster-size: 3
to:
- 506
- 507
- 508
contrail-keystone-auth:
charm: cs:~juniper-os-software/contrail-keystone-auth
num_units: 3
bindings:
"": *oam-space
to:
- lxd:503
- lxd:504
- lxd:505
relations:
- ["contrail-keystone-auth:identity-admin", "keystone:identity-admin"]
- ["contrail-controller:contrail-auth", "contrail-keystone-auth:contrail-auth"]
- ["contrail-controller:contrail-analytics", "contrail-analytics:contrail-analytics"]
- ["contrail-controller:contrail-analyticsdb", "contrail-analytics-db:contrail-analyticsdb"]
- ["contrail-controller", "ntp"]
- ["contrail-analytics:contrail-analyticsdb", "contrail-analytics-db:contrail-analyticsdb"]
- ["contrail-analytics", "ntp"]
- ["contrail-analytics-db", "ntp"]
- ["contrail-openstack:nova-compute", "nova-compute:neutron-plugin"]
- ["contrail-openstack:neutron-api", "neutron-api:neutron-plugin-api-subordinate"]
- ["contrail-openstack:heat-plugin", "heat:heat-plugin-subordinate"]
- ["contrail-openstack:contrail-controller", "contrail-controller:contrail-controller"]
- ["contrail-agent:juju-info", "nova-compute:juju-info"]
- ["contrail-agent:contrail-controller", "contrail-controller:contrail-controller"]
- ["contrail-analytics:http-services", "contrail-haproxy:reverseproxy"]
- ["contrail-controller:http-services", "contrail-haproxy:reverseproxy"]
- ["contrail-controller:https-services", "contrail-haproxy:reverseproxy"]
- ["contrail-haproxy:juju-info", "keepalived:juju-info"]
# added to support SSL on API
#- [ "contrail-agent:tls-certificates", "easyrsa:client" ]
#- [ "contrail-agent-dpdk:tls-certificates", "easyrsa:client" ]
#- [ "contrail-controller:tls-certificates", "easyrsa:client" ]
#- [ "contrail-analytics:tls-certificates", "easyrsa:client" ]
#- [ "contrail-analytics-db:tls-certificates", "easyrsa:client" ]
- [ "contrail-controller:nrpe-external-master", "nrpe-container:nrpe-external-master" ]
- [ "contrail-controller:juju-info", "telegraf:juju-info" ]
- [ "contrail-controller:juju-info", "filebeat:beats-host" ]
- [ "contrail-controller:juju-info", "landscape-client:container" ]
- [ "contrail-analytics:juju-info", "telegraf:juju-info" ]
- [ "contrail-analytics:juju-info", "filebeat:beats-host" ]
- [ "contrail-analytics:juju-info", "landscape-client:container" ]
- [ "contrail-analytics:nrpe-external-master", "nrpe-container:nrpe-external-master" ]
- [ "contrail-analytics-db:juju-info", "telegraf:juju-info" ]
- [ "contrail-analytics-db:juju-info", "filebeat:beats-host" ]
- [ "contrail-analytics-db:juju-info", "landscape-client:container" ]
- [ "contrail-analytics-db:nrpe-external-master", "nrpe-container:nrpe-external-master" ]
- [ "contrail-haproxy:juju-info", "telegraf:juju-info" ]
- [ "contrail-haproxy:juju-info", "filebeat:beats-host" ]
- [ "contrail-haproxy:juju-info", "landscape-client:container" ]
- [ "contrail-haproxy:nrpe-external-master", "nrpe-container:nrpe-external-master" ]
- [ "contrail-keystone-auth:juju-info", "telegraf:juju-info" ]
- [ "contrail-keystone-auth:juju-info", "filebeat:beats-host" ]
- [ "contrail-keystone-auth:juju-info", "landscape-client:container" ]
- [ "contrail-keystone-auth:nrpe-external-master", "nrpe-container:nrpe-external-master" ]

View File

@ -0,0 +1,21 @@
applications:
contrail-agent:
charm: cs:~juniper-os-software/contrail-agent-22
contrail-agent-dpdk:
charm: cs:~juniper-os-software/contrail-agent-22
contrail-analytics:
charm: cs:~juniper-os-software/contrail-analytics-20
contrail-analytics-db:
charm: cs:~juniper-os-software/contrail-analyticsdb-20
contrail-controller:
charm: cs:~juniper-os-software/contrail-controller-21
contrail-haproxy:
charm: cs:haproxy-55
contrail-keystone-auth:
charm: cs:~juniper-os-software/contrail-keystone-auth-20
contrail-openstack:
charm: cs:~juniper-os-software/contrail-openstack-23
juniper-server:
charm: cs:ubuntu-15
keepalived:
charm: cs:~containers/keepalived-28

View File

@ -0,0 +1,59 @@
---
applications:
aodh:
options:
os-public-hostname: aodh.example.com
os-internal-hostname: &aodh-int aodh-internal.example.com
os-admin-hostname: *aodh-int
ceilometer:
options:
os-public-hostname: ceilometer.example.com
os-internal-hostname: &ceilometer-int ceilometer-internal.example.com
os-admin-hostname: *ceilometer-int
cinder:
options:
os-public-hostname: cinder.example.com
os-internal-hostname: &cinder-int cinder-internal.example.com
os-admin-hostname: *cinder-int
openstack-dashboard:
options:
os-public-hostname: dashboard.example.com
glance:
options:
os-public-hostname: glance.example.com
os-internal-hostname: &glance-int glance-internal.example.com
os-admin-hostname: *glance-int
gnocchi:
options:
os-public-hostname: gnocchi.example.com
os-internal-hostname: &gnocchi-int gnocchi-internal.example.com
os-admin-hostname: *gnocchi-int
heat:
options:
os-public-hostname: heat.example.com
os-internal-hostname: &heat-int heat-internal.example.com
os-admin-hostname: *heat-int
keystone:
options:
os-public-hostname: keystone.example.com
os-internal-hostname: &keystone-int keystone-internal.example.com
os-admin-hostname: *keystone-int
neutron-api:
options:
os-public-hostname: neutron.example.com
os-internal-hostname: &neutron-int neutron-internal.example.com
os-admin-hostname: *neutron-int
nova-cloud-controller:
options:
os-public-hostname: nova.example.com
os-internal-hostname: &nova-int nova-internal.example.com
os-admin-hostname: *nova-int
# ceph-radosgw:
# options:
# os-public-hostname: swift.example.com
# os-internal-hostname: &swift-int swift-internal.example.com
# os-admin-hostname: *swift-int
vault:
options:
hostname: vault-internal.example.com

106
config/overlays/ldap.yaml Normal file
View File

@ -0,0 +1,106 @@
---
applications:
ldap-domain1:
charm: cs:~openstack-charmers/ldap-test-fixture
bindings:
"": oam
num_units: 1
to:
- lxd:100
ldap-domain2:
charm: cs:~openstack-charmers/ldap-test-fixture
num_units: 1
bindings:
"": oam
to:
- lxd:101
ldap-domain3:
charm: cs:~openstack-charmers/ldap-test-fixture
num_units: 1
bindings:
"": oam
to:
- lxd:102
keystone-ldap-domain1:
charm: cs:keystone-ldap
options:
ldap-user: cn=admin,dc=test,dc=com
ldap-password: crapper
ldap-suffix: dc=test,dc=com
domain-name: domain1
ldap-config-flags: >-
{
use_pool: true,
pool_size: 10,
pool_retry_max: 1,
user_tree_dn: "ou=users,dc=test,dc=com",
user_objectclass: "posixAccount",
user_id_attribute: uid,
user_name_attribute: uid,
user_attribute_ignore: userPassword,
query_scope: sub,
group_name_attribute: cn,
group_member_attribute: memberUid,
group_desc_attribute: description,
group_tree_dn: "ou=groups,dc=test,dc=com",
group_id_attribute: cn,
group_objectclass: "posixGroup",
group_members_are_ids: true,
}
keystone-ldap-domain2:
charm: cs:keystone-ldap
options:
ldap-user: cn=admin,dc=test,dc=com
ldap-password: crapper
ldap-suffix: dc=test,dc=com
domain-name: domain2
ldap-config-flags: >-
{
use_pool: true,
pool_size: 10,
pool_retry_max: 1,
user_tree_dn: "ou=users,dc=test,dc=com",
user_objectclass: "posixAccount",
user_id_attribute: uid,
user_name_attribute: uid,
user_attribute_ignore: userPassword,
query_scope: sub,
group_name_attribute: cn,
group_member_attribute: member,
group_desc_attribute: description,
group_tree_dn: "ou=groups,dc=test,dc=com",
group_id_attribute: cn,
group_objectclass: "groupOfNames",
group_members_are_ids: false,
}
keystone-ldap-domain3:
charm: cs:keystone-ldap
options:
ldap-user: cn=admin,dc=test,dc=com
ldap-password: crapper
ldap-suffix: dc=test,dc=com
domain-name: domain3
ldap-config-flags: >-
{
use_pool: true,
pool_size: 10,
pool_retry_max: 1,
user_tree_dn: "ou=users,dc=test,dc=com",
user_objectclass: "posixAccount",
user_id_attribute: uid,
user_name_attribute: uid,
user_attribute_ignore: userPassword,
query_scope: sub,
group_name_attribute: cn,
group_member_attribute: uniqueMember,
group_desc_attribute: description,
group_tree_dn: "ou=groups,dc=test,dc=com",
group_id_attribute: cn,
group_objectclass: "groupOfUniqueNames",
}
relations:
- [ "keystone", "keystone-ldap-domain1" ]
- [ "keystone", "keystone-ldap-domain2" ]
- [ "keystone", "keystone-ldap-domain3" ]

View File

@ -0,0 +1,149 @@
applications:
aodh:
charm: cs:aodh-35
# bcache-tuning:
# charm: cs:bcache-tuning-2
# canonical-livepatch:
# charm: cs:canonical-livepatch-34
ceilometer:
charm: cs:ceilometer-268
ceilometer-agent:
charm: cs:ceilometer-agent-258
ceph-mon:
charm: cs:ceph-mon-44
ceph-osd:
charm: cs:ceph-osd-294
# ceph-osd2:
# charm: cs:ceph-osd-294
# ceph-radosgw:
# charm: cs:ceph-radosgw-283
cinder:
charm: cs:cinder-297
cinder2:
charm: cs:cinder-297
cinder-ceph:
charm: cs:cinder-ceph-251
easyrsa:
charm: cs:~containers/easyrsa-296
elasticsearch:
charm: cs:elasticsearch-39
etcd:
charm: cs:etcd-488
# external-policy-routing:
# charm: cs:~canonical-bootstack/policy-routing-3
filebeat:
charm: cs:filebeat-29
glance:
charm: cs:glance-292
gnocchi:
charm: cs:gnocchi-32
grafana:
charm: cs:~prometheus-charmers/grafana-38
graylog:
charm: cs:graylog-49
graylog-mongodb:
charm: cs:mongodb-53
hacluster-aodh:
charm: cs:hacluster-63
hacluster-cinder:
charm: cs:hacluster-63
hacluster-cinder2:
charm: cs:hacluster-63
hacluster-glance:
charm: cs:hacluster-63
hacluster-gnocchi:
charm: cs:hacluster-63
hacluster-heat:
charm: cs:hacluster-63
hacluster-horizon:
charm: cs:hacluster-63
hacluster-keystone:
charm: cs:hacluster-63
hacluster-mysql:
charm: cs:hacluster-63
hacluster-neutron:
charm: cs:hacluster-63
hacluster-nova:
charm: cs:hacluster-63
# hacluster-radosgw:
# charm: cs:hacluster-63
hacluster-vault:
charm: cs:hacluster-63
heat:
charm: cs:heat-271
keystone-ldap-domain1:
charm: cs:keystone-ldap-23
keystone-ldap-domain2:
charm: cs:keystone-ldap-23
keystone-ldap-domain3:
charm: cs:keystone-ldap-23
juniper-server:
charm: cs:ubuntu-15
keystone:
charm: cs:keystone-309
landscape-client:
charm: cs:landscape-client-32
landscape-haproxy:
charm: cs:haproxy-55
landscape-postgresql:
charm: cs:postgresql-199
landscape-rabbitmq-server:
charm: cs:rabbitmq-server-97
landscape-server:
charm: cs:landscape-server-38
# lldpd:
# charm: cs:lldpd-0
memcached:
charm: cs:memcached-26
mysql:
charm: cs:percona-cluster-282
nagios:
charm: cs:nagios-35
# ncitest-ldap:
# charm: cs:keystone-ldap-23
neutron-gateway:
charm: cs:neutron-gateway-276
neutron-openvswitch:
charm: cs:neutron-openvswitch-269
neutron-api:
charm: cs:neutron-api-282
nova-cloud-controller:
charm: cs:nova-cloud-controller-340
nova-compute:
charm: cs:nova-compute-311
# nrpe-compute:
# charm: cs:nrpe-61
nrpe-container:
charm: cs:nrpe-61
nrpe-host:
charm: cs:nrpe-61
# nrpe-kvm:
# charm: cs:nrpe-61
ntp:
charm: cs:ntp-37
openstack-dashboard:
charm: cs:openstack-dashboard-297
openstack-service-checks:
charm: cs:~canonical-bootstack/openstack-service-checks-30
prometheus:
charm: cs:prometheus2-12
prometheus-ceph-exporter:
charm: cs:prometheus-ceph-exporter-5
prometheus-openstack-exporter:
charm: cs:prometheus-openstack-exporter-10
rabbitmq-server:
charm: cs:rabbitmq-server-97
sysconfig-compute:
charm: cs:sysconfig-2
# sysconfig-storage:
# charm: cs:sysconfig-2
sysconfig-control:
charm: cs:sysconfig-2
telegraf:
charm: cs:telegraf-30
telegraf-prometheus:
charm: cs:telegraf-30
# thruk-agent:
# charm: cs:thruk-agent-2
vault:
charm: cs:vault-32

View File

@ -0,0 +1,221 @@
applications:
aodh:
charm: cs:aodh-48
appformix-advanced-routing:
charm: cs:advanced-routing-5
ccom-server:
charm: cs:ubuntu-18
appformix-server:
charm: cs:ubuntu-18
bcache-tuning:
charm: cs:bcache-tuning-6
canonical-livepatch:
charm: cs:canonical-livepatch-42
ceilometer:
charm: cs:ceilometer-282
ceilometer-agent:
charm: cs:ceilometer-agent-271
ceph-mon:
charm: cs:ceph-mon-55
ceph-mon2:
charm: cs:ceph-mon-55
ceph-osd:
charm: cs:ceph-osd-310
ceph-osd2:
charm: cs:ceph-osd-310
ceph-radosgw:
charm: cs:ceph-radosgw-296
cinder:
charm: cs:cinder-310
cinder-ceph:
charm: cs:cinder-ceph-262 # upgrade to support availability-zone specification
cinder-ceph2:
charm: cs:cinder-ceph-262 # upgrade to support availability-zone specification
cinder-infinidat:
charm: cs:~bt-charmers/cinder-infinidat-19
contrail-agent:
charm: cs:~juniper-os-software/contrail-agent-27
contrail-agent-dpdk:
charm: cs:~juniper-os-software/contrail-agent-27
contrail-analytics:
charm: cs:~juniper-os-software/contrail-analytics-25
contrail-analytics-db:
charm: cs:~juniper-os-software/contrail-analyticsdb-25
contrail-controller:
charm: cs:~juniper-os-software/contrail-controller-27
contrail-haproxy:
charm: cs:haproxy-61
contrail-keystone-auth:
charm: cs:~juniper-os-software/contrail-keystone-auth-26
contrail-openstack:
charm: cs:~juniper-os-software/contrail-openstack-28
controller-server:
charm: cs:ubuntu-18
easyrsa:
charm: cs:~containers/easyrsa-408
elasticsearch:
charm: cs:elasticsearch-49
etcd:
charm: cs:etcd-583
external-advanced-routing:
charm: cs:advanced-routing-5
filebeat:
charm: cs:filebeat-33
glance:
charm: cs:glance-305
gnocchi:
charm: cs:gnocchi-46
grafana:
charm: cs:grafana-49
graylog:
charm: cs:graylog-47
graylog-mongodb:
charm: cs:mongodb-59
hacluster-aodh:
charm: cs:hacluster-76
hacluster-cinder:
charm: cs:hacluster-76
hacluster-glance:
charm: cs:hacluster-76
hacluster-gnocchi:
charm: cs:hacluster-76
hacluster-heat:
charm: cs:hacluster-76
hacluster-horizon:
charm: cs:hacluster-76
hacluster-placement:
charm: cs:hacluster-76
hacluster-keystone:
charm: cs:hacluster-76
hacluster-manila:
charm: cs:hacluster-76
hacluster-neutron:
charm: cs:hacluster-76
hacluster-nova:
charm: cs:hacluster-76
hacluster-radosgw:
charm: cs:hacluster-76
hacluster-vault:
charm: cs:hacluster-76
heat:
charm: cs:heat-283
infinidat-tools:
charm: cs:~bt-charmers/infinidat-tools-8
juniper-server:
charm: cs:ubuntu-18
keepalived:
charm: cs:~containers/keepalived-98
keystone:
charm: cs:keystone-323
landscape-client:
charm: cs:landscape-client-35
landscape-haproxy:
charm: cs:haproxy-61
landscape-postgresql:
charm: cs:postgresql-233
landscape-rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server-438 # attempted fix for LP#1939702
landscape-server:
charm: cs:landscape-server-39
lldpd:
charm: cs:lldpd-9
manila:
charm: cs:manila-30
manila-dashboard:
charm: cs:~openstack-charmers/manila-dashboard-2
manila-infinidat:
charm: cs:~bt-charmers/manila-infinidat-14
memcached:
charm: cs:memcached-32
mysql-innodb-cluster:
charm: cs:mysql-innodb-cluster-11
aodh-mysql-router:
charm: cs:mysql-router-11
keystone-mysql-router:
charm: cs:mysql-router-11
cinder-mysql-router:
charm: cs:mysql-router-11
glance-mysql-router:
charm: cs:mysql-router-11
gnocchi-mysql-router:
charm: cs:mysql-router-11
heat-mysql-router:
charm: cs:mysql-router-11
nova-cloud-controller-mysql-router:
charm: cs:mysql-router-11
neutron-api-mysql-router:
charm: cs:mysql-router-11
openstack-dashboard-mysql-router:
charm: cs:mysql-router-11
placement-mysql-router:
charm: cs:mysql-router-11
vault-mysql-router:
charm: cs:mysql-router-11
manila-mysql-router:
charm: cs:mysql-router-11
nagios:
charm: cs:nagios-44
inf-ldap:
charm: cs:keystone-ldap-35
int-ldap:
charm: cs:keystone-ldap-35
neutron-api:
charm: cs:neutron-api-294
placement:
charm: cs:placement-19
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller-549 # bug LP#1928992 Placement endpoints not being updated, or perhaps regressing to n-c-c endpoints, even after deploying placement service for train
nova-compute-kvm:
charm: cs:nova-compute-327
nova-compute-kvm-dpdk:
charm: cs:nova-compute-327
nrpe-compute-kvm:
charm: cs:nrpe-73
nrpe-compute-kvm-dpdk:
charm: cs:nrpe-73
nrpe-container:
charm: cs:nrpe-73
#nrpe-contrail:
# charm: cs:nrpe-61
nrpe-host:
charm: cs:nrpe-73
nrpe-host-ceph-osd:
charm: cs:nrpe-73
nrpe-kvm:
charm: cs:nrpe-73
nrpe-kvm-appformix:
charm: cs:nrpe-73
ntp:
charm: cs:ntp-46
openstack-dashboard:
charm: cs:openstack-dashboard-313
openstack-service-checks:
#charm: cs:openstack-service-checks-4 # this breaks SSL because of certifi bug 1924816
charm: cs:~llama-charmers-next/openstack-service-checks-12
prometheus:
charm: cs:prometheus2-22
prometheus-ceph-exporter:
charm: cs:prometheus-ceph-exporter-13
prometheus-openstack-exporter:
charm: cs:prometheus-openstack-exporter-17
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server-438 # attempted fix for LP#1939702
#charm: cs:rabbitmq-server-110
sysconfig-compute:
charm: cs:sysconfig-2
sysconfig-compute-dpdk:
charm: cs:sysconfig-2
sysconfig-control:
charm: cs:sysconfig-2
sysconfig-storage:
charm: cs:sysconfig-2
telegraf:
charm: cs:telegraf-41
telegraf-appformix:
charm: cs:telegraf-41
telegraf-prometheus:
charm: cs:telegraf-41
thruk-agent:
charm: cs:thruk-agent-10
vault:
charm: cs:vault-46

View File

@ -0,0 +1,141 @@
applications:
aodh:
charm: cs:aodh-27
# bcache-tuning:
# charm: cs:~james-page/bcache-tuning-10
# canonical-livepatch:
# charm: cs:canonical-livepatch-32
ceilometer:
charm: cs:ceilometer-262
ceilometer-agent:
charm: cs:ceilometer-agent-252
ceph-mon:
charm: cs:ceph-mon-38
ceph-osd:
charm: cs:ceph-osd-285
# ceph-radosgw:
# charm: cs:ceph-radosgw-271
cinder:
charm: cs:cinder-297
cinder-ceph:
charm: cs:cinder-ceph-243
easyrsa:
charm: cs:~containers/easyrsa-254
elasticsearch:
charm: cs:elasticsearch-37
etcd:
charm: cs:etcd-434
# external-policy-routing:
# charm: cs:~canonical-bootstack/policy-routing-3
filebeat:
charm: cs:filebeat-24
glance:
charm: cs:glance-290
gnocchi:
charm: cs:gnocchi-23
grafana:
charm: cs:~prometheus-charmers/grafana-33
graylog:
charm: cs:graylog-32
graylog-mongodb:
charm: cs:mongodb-52
hacluster-aodh:
charm: cs:hacluster-55
hacluster-cinder:
charm: cs:hacluster-55
hacluster-glance:
charm: cs:hacluster-55
hacluster-gnocchi:
charm: cs:hacluster-55
hacluster-heat:
charm: cs:hacluster-55
hacluster-horizon:
charm: cs:hacluster-55
hacluster-keystone:
charm: cs:hacluster-55
hacluster-mysql:
charm: cs:hacluster-55
hacluster-neutron:
charm: cs:hacluster-55
hacluster-nova:
charm: cs:hacluster-55
# hacluster-radosgw:
# charm: cs:hacluster-55
hacluster-vault:
charm: cs:hacluster-55
heat:
charm: cs:heat-263
keepalived:
charm: cs:~containers/keepalived-28
keystone-ldap-domain1:
charm: cs:keystone-ldap-18
keystone-ldap-domain2:
charm: cs:keystone-ldap-18
keystone-ldap-domain3:
charm: cs:keystone-ldap-18
juniper-server:
charm: cs:ubuntu-15
keystone:
charm: cs:keystone-309
landscape-client:
charm: cs:landscape-client-32
landscape-haproxy:
charm: cs:haproxy-55
landscape-postgresql:
charm: cs:postgresql-199
landscape-rabbitmq-server:
charm: cs:rabbitmq-server-89
landscape-server:
charm: cs:landscape-server-33
# lldpd:
# charm: cs:~ivoks/lldpd-5
memcached:
charm: cs:memcached-23
mysql:
charm: cs:percona-cluster-276
nagios:
charm: cs:nagios-33
neutron-gateway:
charm: cs:neutron-gateway-276
neutron-openvswitch:
charm: cs:neutron-openvswitch-269
neutron-api:
charm: cs:neutron-api-281
nova-cloud-controller:
charm: cs:nova-cloud-controller-339
nova-compute:
charm: cs:nova-compute-302
# nrpe-compute:
# charm: cs:nrpe-58
nrpe-container:
charm: cs:nrpe-58
nrpe-host:
charm: cs:nrpe-58
# nrpe-kvm:
# charm: cs:nrpe-58
ntp:
charm: cs:ntp-32
openstack-dashboard:
charm: cs:openstack-dashboard-288
openstack-service-checks:
charm: cs:~canonical-bootstack/openstack-service-checks-22
prometheus:
charm: cs:prometheus2-10
prometheus-ceph-exporter:
charm: cs:prometheus-ceph-exporter-5
prometheus-openstack-exporter:
charm: cs:prometheus-openstack-exporter-10
rabbitmq-server:
charm: cs:rabbitmq-server-89
sysconfig-compute:
charm: cs:sysconfig-1
sysconfig-control:
charm: cs:sysconfig-1
telegraf:
charm: cs:telegraf-29
telegraf-prometheus:
charm: cs:telegraf-29
# thruk-agent:
# charm: cs:thruk-agent-2
vault:
charm: cs:vault-24

22
config/overlays/ovs.yaml Normal file
View File

@ -0,0 +1,22 @@
---
variables:
nova-default-filters: &nova-default-filters >-
RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,
ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,
ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,DifferentHostFilter,
SameHostFilter,AggregateInstanceExtraSpecsFilter,NUMATopologyFilter,
AggregateCoreFilter,DiskFilter
vlan-ranges: &vlan-ranges physnet1:350:599
applications:
nova-cloud-controller:
options:
scheduler-default-filters: *nova-default-filters
nova-compute:
options:
# AppArmor needs to be disabled: LP:1820302
aa-profile-mode: disable
neutron-api:
options:
vlan-ranges: *vlan-ranges

View File

@ -0,0 +1,7 @@
applications:
keystone:
options:
use-policyd-override: true
resources:
policyd-override: ../resources/keystone.zip

124
config/overlays/ssl.yaml Normal file
View File

@ -0,0 +1,124 @@
---
variables:
ssl_ca: &ssl_ca >-
include-base64://../ssl/cacert.pem
ssl_crt: &ssl_crt >-
include-base64://../ssl/servercert.pem
ssl_key: &ssl_key >-
include-base64://../ssl/serverkey.pem
applications:
aodh:
options:
ssl_ca: *ssl_ca
ssl_cert: *ssl_crt
ssl_key: *ssl_key
ceilometer:
options:
ssl_ca: *ssl_ca
cinder:
options:
ssl_ca: *ssl_ca
ssl_cert: *ssl_crt
ssl_key: *ssl_key
openstack-dashboard:
options:
ssl_ca: *ssl_ca
ssl_cert: *ssl_crt
ssl_key: *ssl_key
glance:
options:
ssl_ca: *ssl_ca
ssl_cert: *ssl_crt
ssl_key: *ssl_key
gnocchi:
options:
ssl_ca: *ssl_ca
ssl_cert: *ssl_crt
ssl_key: *ssl_key
heat:
options:
ssl_ca: *ssl_ca
ssl_cert: *ssl_crt
ssl_key: *ssl_key
keystone:
options:
ssl_ca: *ssl_ca
ssl_cert: *ssl_crt
ssl_key: *ssl_key
neutron-api:
options:
ssl_ca: *ssl_ca
ssl_cert: *ssl_crt
ssl_key: *ssl_key
nova-cloud-controller:
options:
ssl_ca: *ssl_ca
ssl_cert: *ssl_crt
ssl_key: *ssl_key
console-ssl-cert: *ssl_crt
console-ssl-key: *ssl_key
# ceph-radosgw:
# options:
# ssl_ca: *ssl_ca
# ssl_cert: *swift_crt
# ssl_key: *swift_key
openstack-service-checks:
options:
trusted_ssl_ca: *ssl_ca
prometheus-openstack-exporter:
options:
ssl_ca: *ssl_ca
# vault:
# options:
# ssl-ca: *ssl_ca
# ssl-cert: *ssl_crt
# ssl-key: *ssl_key
nagios:
options:
ssl_chain: *ssl_ca
ssl_cert: *ssl_crt
ssl_key: *ssl_key
ssl: 'on'
landscape-client:
options:
# this charm expects that the variable startswith base64: string. The rest is identical to the *ssl_ca value.
ssl-public-key: "base64:LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZqekNDQTNlZ0F3SUJBZ0lVWGdQbWZkdktY
YXVNbHNqTjhVeTJuTWdhamF3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1Z6RUxNQWtHQTFVRUJoTUNS
MEl4RURBT0JnTlZCQWdNQjBWdVoyeGhibVF4RHpBTkJnTlZCQWNNQmt4dgpibVJ2YmpFVk1CTUdB
MVVFQ2d3TVZXSjFiblIxSUVOc2IzVmtNUTR3REFZRFZRUUxEQVZEYkc5MVpEQWVGdzB5Ck1UQXlN
VEV4TWpRNU1URmFGdzB5TWpBeU1URXhNalE1TVRGYU1GY3hDekFKQmdOVkJBWVRBa2RDTVJBd0Rn
WUQKVlFRSURBZEZibWRzWVc1a01ROHdEUVlEVlFRSERBWk1iMjVrYjI0eEZUQVRCZ05WQkFvTURG
VmlkVzUwZFNCRApiRzkxWkRFT01Bd0dBMVVFQ3d3RlEyeHZkV1F3Z2dJaU1BMEdDU3FHU0liM0RR
RUJBUVVBQTRJQ0R3QXdnZ0lLCkFvSUNBUURpV2tiK3luRnBDVXgxakxlaFM0SUl4MDBjTm51OU81
eTRSbExxZFQ1TXltQzZFRVdBK0RvRnA5VEMKRW10R3ViUWdXNklSVHlJRi9hKytZNGFlRG0vU0NW
TEJ1OWZGZSt3WVdEbDU1L1F6SUJVMUhkWEsrdnBIRlB3SQpVQjRKQlZEUi9UWWZmWG5IMmJTVnMx
eE8xOVRwb2krZ0d4OW5UaEpsMVpyN2pNRkpXbmlXUG5FdEVuSG51Z3owCll0VkRUbEp3OHo2ZmVN
ME1GQlprcWZmZmxoTEpYMlA1OURQM2ZXV1lzcEFSM29TemFwb1JsbURQbHZnZDU0T28KZnJubFlK
cTdyWTRVTmdYYXpUNkZPQklHcDVOTVk1S0M1dXBpbVVEdzZlbTZubW10TjVuZUhkdTR3YlBsRVp6
Nwp1a2o1cFFCREcrOHJpazJhSFB3VkFaOXNZaTNJUXdCc0lvL1pwMHpGeGhkbnNXTzltanNXWWpY
RkJaaFg4UzZzCmZZUFZlSXd1VEsrenZMbkxxZmpCUHBKTG11Um5HT0VhT1RhME9UUUtHZHZncDhX
eFJjc2Y2VXVhTFduUWRURkMKNjFzOE1jVG5GbXhhdWg0TmhmVVYvZURjTlRxU29pZHlDM0lUVmdR
eWJyTis4UW52cnlKcjNycjFVcDl1MzZDRwpJN3dqOHY3dE8vU0tId2ZPcHQ2UmhGNFBrWVRyNnQy
TmR3cURsWnBFMFBTOU5Wbk91S2dubzZmVkpKK1dvaEhxCktTM2Vnckw1elNRbTdDZ2JUTEJyR3NS
UnVoNitVRjVhOWtnZGpkVHVTT1BGYlBLa283djE3VndqQ0VYSlZJUlcKSEF6OW1nd0RVTTNjOU9Z
cEZ2TCs0ZUNVbWc3Mm82djh2NUpGd2JDMUNqdkdMZW5ON1FJREFRQUJvMU13VVRBZApCZ05WSFE0
RUZnUVVNWlZ5NndacERTR1dZOTQxcm5CUzdJTzgxMVF3SHdZRFZSMGpCQmd3Rm9BVU1aVnk2d1pw
CkRTR1dZOTQxcm5CUzdJTzgxMVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBC
QVFzRkFBT0MKQWdFQU1TY0xMSFdRd1B3Y3YycTFrREkzdkg4bWNVU1ZFaG50VS9sVzNVeXh3eldi
VzZZVjJ5WFVtUmpKMlU1dApLRzc3RE5hUXF6T09HcEUwSjJuTDdlcHRoUW01SU1pNW5XNFFUa05C
YVVhOS9QQkk1UkxWZGZCRkZFT3NjQ3VtCnFqeEFQdWdYZHFJMWQreEFmZU9SNHcrY1E1UFNvVW1P
b3JXSzVZQ1BQaU1SVVJnVU50MGNZbjFGR0QzN2FIMUUKeng5RnN1U3lGekhBU1hMNCtYSFNYaHNB
MzJnRTA5SHIyUzRlbytSWUIwbnNreHlCWTBibG55c2d4QXFXMkFGSQo2aGhubTE5SldkZUdSUFBB
ZnRlWFRmNzlqcEF2Rmc2YWZmeWtaalZ4NDR1WSt5TCtNUTR3eEx2WHlrU2MxV3lJCkFwaG9HWkRW
WGxJZVorMVRLVnVXdnZPZlBveVJyYVRyeUtkRGtzNThzaWxBeWV4My9XTktidXRyRUN4V2Jvc0sK
LzkzWExRWnlVdmx0b2JncVpEWDF0Rk9NTDN3ZkdCZzl4dTczMTJ3NXhvN2c1WlFnT0lJaERkdzRw
OGlrekJHaQorZXdvNnhIblZEaE9UWGFCcmtuN3Rwbnh0NGRxQUdMdVJTRC9NV1crTTZXZThwNlI5
TkRLWlBwdDdmVVk1NFlvCm9ISzlSYVMyeHZ3UGxWYjNqaHVKbkxDRWQ0eUdBQnEvcDhlWTZVV01P
TXBqbEFzT3FDN2pnSlRCc3hySnQvdnEKK3RYdDBZdVVTUW5seHBNV0hnVVdFcVMxU2t2R09JVjhZ
MTdGQXphVDRDV3VtSEJlUnFpNS83R2hKcUtSa05VZQo4NENDOWJQMkswN3ZkbGp1blNlR1dCbTJW
YXY3STdQQ0RoK0dQVmp2QnE1T2QvUT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="
landscape-haproxy:
options:
ssl_cert: *ssl_crt
ssl_key: *ssl_key

View File

@ -0,0 +1,9 @@
---
applications:
neutron-api:
options:
flat-network-providers: physnet1
enable-vlan-trunking: true
keystone:
options:
admin-password: openstack

32
config/ssl/cacert.pem Normal file
View File

@ -0,0 +1,32 @@
-----BEGIN CERTIFICATE-----
MIIFjzCCA3egAwIBAgIUXgPmfdvKXauMlsjN8Uy2nMgajawwDQYJKoZIhvcNAQEL
BQAwVzELMAkGA1UEBhMCR0IxEDAOBgNVBAgMB0VuZ2xhbmQxDzANBgNVBAcMBkxv
bmRvbjEVMBMGA1UECgwMVWJ1bnR1IENsb3VkMQ4wDAYDVQQLDAVDbG91ZDAeFw0y
MTAyMTExMjQ5MTFaFw0yMjAyMTExMjQ5MTFaMFcxCzAJBgNVBAYTAkdCMRAwDgYD
VQQIDAdFbmdsYW5kMQ8wDQYDVQQHDAZMb25kb24xFTATBgNVBAoMDFVidW50dSBD
bG91ZDEOMAwGA1UECwwFQ2xvdWQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
AoICAQDiWkb+ynFpCUx1jLehS4IIx00cNnu9O5y4RlLqdT5MymC6EEWA+DoFp9TC
EmtGubQgW6IRTyIF/a++Y4aeDm/SCVLBu9fFe+wYWDl55/QzIBU1HdXK+vpHFPwI
UB4JBVDR/TYffXnH2bSVs1xO19Tpoi+gGx9nThJl1Zr7jMFJWniWPnEtEnHnugz0
YtVDTlJw8z6feM0MFBZkqffflhLJX2P59DP3fWWYspAR3oSzapoRlmDPlvgd54Oo
frnlYJq7rY4UNgXazT6FOBIGp5NMY5KC5upimUDw6em6nmmtN5neHdu4wbPlEZz7
ukj5pQBDG+8rik2aHPwVAZ9sYi3IQwBsIo/Zp0zFxhdnsWO9mjsWYjXFBZhX8S6s
fYPVeIwuTK+zvLnLqfjBPpJLmuRnGOEaOTa0OTQKGdvgp8WxRcsf6UuaLWnQdTFC
61s8McTnFmxauh4NhfUV/eDcNTqSoidyC3ITVgQybrN+8QnvryJr3rr1Up9u36CG
I7wj8v7tO/SKHwfOpt6RhF4PkYTr6t2NdwqDlZpE0PS9NVnOuKgno6fVJJ+WohHq
KS3egrL5zSQm7CgbTLBrGsRRuh6+UF5a9kgdjdTuSOPFbPKko7v17VwjCEXJVIRW
HAz9mgwDUM3c9OYpFvL+4eCUmg72o6v8v5JFwbC1CjvGLenN7QIDAQABo1MwUTAd
BgNVHQ4EFgQUMZVy6wZpDSGWY941rnBS7IO811QwHwYDVR0jBBgwFoAUMZVy6wZp
DSGWY941rnBS7IO811QwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC
AgEAMScLLHWQwPwcv2q1kDI3vH8mcUSVEhntU/lW3UyxwzWbW6YV2yXUmRjJ2U5t
KG77DNaQqzOOGpE0J2nL7epthQm5IMi5nW4QTkNBaUa9/PBI5RLVdfBFFEOscCum
qjxAPugXdqI1d+xAfeOR4w+cQ5PSoUmOorWK5YCPPiMRURgUNt0cYn1FGD37aH1E
zx9FsuSyFzHASXL4+XHSXhsA32gE09Hr2S4eo+RYB0nskxyBY0blnysgxAqW2AFI
6hhnm19JWdeGRPPAfteXTf79jpAvFg6affykZjVx44uY+yL+MQ4wxLvXykSc1WyI
AphoGZDVXlIeZ+1TKVuWvvOfPoyRraTryKdDks58silAyex3/WNKbutrECxWbosK
/93XLQZyUvltobgqZDX1tFOML3wfGBg9xu7312w5xo7g5ZQgOIIhDdw4p8ikzBGi
+ewo6xHnVDhOTXaBrkn7tpnxt4dqAGLuRSD/MWW+M6We8p6R9NDKZPpt7fUY54Yo
oHK9RaS2xvwPlVb3jhuJnLCEd4yGABq/p8eY6UWMOMpjlAsOqC7jgJTBsxrJt/vq
+tXt0YuUSQnlxpMWHgUWEqS1SkvGOIV8Y17FAzaT4CWumHBeRqi5/7GhJqKRkNUe
84CC9bP2K07vdljunSeGWBm2Vav7I7PCDh+GPVjvBq5Od/Q=
-----END CERTIFICATE-----

7
config/ssl/copycerts.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
CERT_DIR=~/stsstack-bundles/openstack/ssl/openstack-ssl-test/results
cp $CERT_DIR/servercert.pem .
cp $CERT_DIR/serverkey.pem .
cp $CERT_DIR/cacert.pem .

52
config/ssl/privkey.pem Normal file
View File

@ -0,0 +1,52 @@
-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDYxKy1xtJoFBdL
LwniTgmfE0fhy3W/W07rf3sNSIADIy+yZjAtfCOTEbmUnOWA4iWTAwrnnJo/CcAz
4hKzHS8BG2RFPplKjNz2VxItfIZlO6WnaLJ9JSEhgrB3ipfbL7a0xWvazhsUWYfl
5SLvElYQRx12Q1z6K44P6NKFcQFvbhAL6aeWxeRD4Eg5mDD5nhHS6hYc96zBg0Qh
QW/vzusZCtwSBxujZKYREj8xdFQdjutF+Pp+w+axlR0IgWZ1bVMYyL3Mag7Fy8db
zryBZZgEORg89zThaF0cnrtqQeHA9vbsfxlS55Dqp26m40xU0TcP/7kqO3A0aUOx
bmp5H5qaQwO7uXxS3s4nD8n08ZDWC/M9vAb6gBvEuUzdNR9mWjiegJbjdiRhrkNM
7tzdX5oFG3LjWr5Ic+JPaHH2WKntuKdTTwAZz1Mw6KmX5hjxJuz8xkLUZ9n02C1d
v+HF6q+5OfUmX9vgjDC/7fm0yEVP7aEP7najcBq2QtuEtktFpr19ICVGWXa7NQM8
OLPDsOMqwbw/nCa7QddV6+Jib/j+XYGlXhX2X8Hr4/+/rJO2d0vxCy1GHZmHBDHn
Ip4nu29AsSCfZH27LkAcLXHFYOBFBlRBdJPdzgX4K5f+abzjXTYN6vGVLWNrofez
33aAveDT5Tr4Hu9Foo3AzAuxOY8n6QIDAQABAoICABeFJ5RFsXjXjTt8LB9hBkw7
ZFZMpEcSGjsVRv3G+xTcEYXS7rFQwe9oQfGtIH3ei+MqtlkxNc9XOIKoqgzDQuFk
3nlg9HMRW7xi8Db9Rvve//dtNci8aZ7gNlOSNy3yPGna7fv81xVbLZK1TRgRrkLv
5HN7lbATX8O8KHOQMm6ry5PvU17ZtA3lXRDawZ2kGB0Bh0q3WQhaPN864XFsyzeZ
ZQ2Ttd0Bw78gcrjj/WiDpGWgQVXV3ccw/ch5eeuN5DFRPr7aVyYBGW2NvghwRJFY
vJeI3zUCBPBhDMbXxS4R3fMFS3Miayf66Ne03Ahr9wSGY8oATwlwzoaHC4h+9GBL
eaDkW9pv8WUiNGl/XYF4CgrW5hdOGWn/vt7NzaloO2zfzVhdjjtRVQYB2YO5AMw4
Zr8LozKpjhjDeG/0rCOxnYE6SBNmtOGvGA1X3SOqfNqLwC01kmT7bKJGDvSjXbX0
suxg8HoVfaKJ4jTqU1mcdk/YqTKMvHEAOmJC7QK/xGOWXVJeA+uqgK2UgOZs7pOu
torSJtJWXdBrrx15oXq7axpEYObk06U/4307gpq0hjjnxuHTqv+TWM49VbA4VSHf
8qs4Det2d7/llk80iwyH/WG7KkO/WSFScAtHuSrddD0T8vnNy4mkVwd7LDAYLumc
LghK1R+qhNaxzy7TmATNAoIBAQDwUldJCjWe3tPoTmnC+oYfaN+R7ZPKeE1dyI7q
3E5xdmHksxPInq8HxNAG0kDVewugq7vwD7b1GyxkgMho3Encx2I6f0U6Wl8UmHx9
+IT18upLi9OK/lfhjYWHJU4Xs3wMwmN1UOMOUO0lc2sIAy0QaUmMrf64QiALQkod
DsOFKVPne/MR20Emj6IKVOtD/k105PNj8B9tQv9Aekecslo8kI3njbchVZ59Ws2S
nesZhshKI9M7uK6DuXmlQT5AJq5H6KMQ4uSWRr81qSR2sAKI8MjplVeRoCifmpur
J/FetS+GFTDbIi1bthGinz8jTJSHyKoUQLX/QjcIzLLywMefAoIBAQDm6PbIMDRe
k88CtNTPp0947J4jusjN4IUILiL+P4WSHIW3yEz/1YQCZCjHFQfyUupQ8k10m/sz
p0PKnzoQoeLeYz3OE0GQPuYPw+f3CBEyAI0TVMuXSHV6Lyx5WVVcK7+FFHtsi9E/
iA2NtXTApRFpHmtfn0VtU0ALMi17tMMMVVz/v3B2yzBiUgv47amFzmn63BELsYh0
j3xzxDl4jmCPLnKrzpUUNgLAp7ag5WgJPlJVsN2arOyT5aHdjxzjvpMFJ0+2vtH4
vGatCgNYfC6m77P/BjIUnxl4t1/x3dNDZ5z5ZbT4h3bxmEslIN4oK0b+avrQM7WP
MEcsn8agtIN3AoIBACGVAAl+2btXm/kMbA8I/xEIkKVNs6dJZFZEnmQHYRbN2iQh
OhSAyCinwgISA2lRhnBvTxXevExH/c4ViOvOTKwDWKCgBEaBNmQkYtEfl0TfKwpf
gbIclJ86LXyTI/R/6kfGKivMn83yVRE0rbIy72spq1NzRPLumpe4La8dzwiem9Dv
KypFnzcKPzR+ZVndAWniTHicp2eXQx/5dmRZL+7irFG9JegMlNcjhzKDysSIZxil
JPFlf01875i+IzrHrPbzdUX4zsSJIlMXTbcgJVqJeynY6qG45bRX4ITsUfhQDVIJ
SAh3ICt+p7w30JTYMLFAwzONT2FaGWKraB4v/jUCggEBANVIXZnvHJA3SsCfMupT
Cg8Rz8gE4TF4rqgtpVjeZ4vcGI72zliIaZSj2x54CI0FlTULegu5f7/oQa/IfyOZ
xCnR1Oah1q9bybjKJVqB8xASfjxa2Gp2HTyskhHMOBIYvqA3mQFb6/0YuBwqfRrr
3TcRR/Bc9w3SFZfB000uW/Lqbio0M7cmWxyV2EUnkKvtSurRllUMuJOhbTypBMSc
DghyyKx09jJi6Qv2XbQj8YKINBi+cxJJo4PAWVCCvhaGktnxw6lyfBOaQeSXGRwE
KDs9sS5gFKYDhs1/43lOgbMC43VhQlVFGtJw7wfuYgUMZFjfi6f/+zW1TCiltBwA
YGUCggEAToVBBOks7RQtSwdl6E4di5la05LFZMJrWxsPSOyVPENRrWeeq6idYSCj
v+fYz2wHHm4nc7FF4XIhXQN/Bv6nBwz60OuxMnsEc/4NFHYJWwKUkHuX7W/qbLsP
LKlvw2ywy/EIYqdcdyVryQ8s+sNZOYq5FDZ/0x5MgteVtIxd+KmMrtZDvqgZhNiq
VpFeIrHuav4cG45o3qY1e1IwFXZAvJzYxaZFpOIubwmSs4oD34nhNyJG5eswJOht
xz1tG8kDJFy3G/XrT9VN3inEaPT4y9qN8K03/cst32TOvD7nmInuBj6HsLnTLfX3
20VoGTwfsCfcapPA3/SqCZw7QpavIg==
-----END PRIVATE KEY-----

130
config/ssl/servercert.pem Normal file
View File

@ -0,0 +1,130 @@
Certificate:
Data:
Version: 3 (0x2)
Serial Number: 15 (0xf)
Signature Algorithm: sha256WithRSAEncryption
Issuer: C=GB, ST=England, L=London, O=Ubuntu Cloud, OU=Cloud
Validity
Not Before: Feb 11 12:49:11 2021 GMT
Not After : Feb 11 12:49:11 2022 GMT
Subject: C=GB, ST=England, L=London, O=Ubuntu Cloud, OU=Cloud, CN=10.0.1.211
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
RSA Public-Key: (2048 bit)
Modulus:
00:c4:e4:ec:1a:64:c1:ec:15:05:72:47:82:ee:c6:
b1:12:a2:be:af:f1:52:d9:e5:97:52:35:11:b7:76:
da:53:54:99:60:87:28:d4:c4:55:70:c4:5a:c3:12:
2c:62:9a:de:c9:92:30:a4:45:56:83:08:62:6e:18:
df:eb:ea:eb:c3:44:17:fe:6a:dc:c4:f6:98:be:10:
7a:b4:f5:4d:9a:60:2f:6c:d1:40:e9:a6:57:79:f6:
c1:5f:7e:99:c4:86:12:a2:f0:7a:50:08:20:1e:a3:
42:77:71:fa:2f:47:8b:26:30:91:cb:fa:55:38:fa:
76:fe:15:4f:ec:d9:db:df:44:3c:e1:22:84:9f:6e:
cb:75:d2:0e:1d:93:c0:5a:83:ba:e8:13:e7:8f:13:
d9:e5:bc:52:04:d9:ea:68:62:e5:9d:00:11:42:ef:
70:54:f9:69:02:e3:f7:46:6b:35:ce:2c:9a:80:93:
7a:37:a5:4e:92:f1:b9:4a:47:13:f0:79:21:82:75:
86:7b:91:ca:63:d7:60:23:66:26:35:c9:81:5b:d1:
19:74:a0:e9:e9:72:c5:fb:18:55:f5:a1:ff:89:69:
04:b1:36:81:f5:5b:a1:1d:b7:6c:c2:74:c9:04:50:
a4:c1:dd:be:14:6a:eb:cc:f6:17:5a:fb:95:39:53:
46:71
Exponent: 65537 (0x10001)
X509v3 extensions:
X509v3 Subject Key Identifier:
CB:9B:67:C1:6B:30:93:13:FC:8A:BB:B0:CA:ED:CE:F4:F6:A2:95:65
X509v3 Authority Key Identifier:
keyid:31:95:72:EB:06:69:0D:21:96:63:DE:35:AE:70:52:EC:83:BC:D7:54
X509v3 Basic Constraints:
CA:FALSE
X509v3 Key Usage:
Digital Signature, Key Encipherment
X509v3 Subject Alternative Name:
IP Address:10.0.1.211, IP Address:10.0.1.212, IP Address:10.0.1.213, IP Address:10.0.1.214, IP Address:10.0.1.215, IP Address:10.0.1.216, IP Address:10.0.1.217, IP Address:10.0.1.218, IP Address:10.0.1.219, IP Address:10.0.1.220, IP Address:10.0.1.221, IP Address:10.0.1.222, IP Address:10.0.1.223, IP Address:10.0.1.224, IP Address:10.0.1.225, IP Address:10.0.1.226, IP Address:10.0.1.227, IP Address:10.0.1.228, IP Address:10.0.1.229, IP Address:10.0.1.230, DNS:glance-internal.example.com, DNS:glance-public.example.com, DNS:glance.example.com, DNS:cinder-internal.example.com, DNS:cinder-public.example.com, DNS:cinder.example.com, DNS:nova-internal.example.com, DNS:nova-public.example.com, DNS:nova.example.com, DNS:keystone-internal.example.com, DNS:keystone-public.example.com, DNS:keystone.example.com, DNS:neutron-internal.example.com, DNS:neutron-public.example.com, DNS:neutron.example.com, DNS:horizon-internal.example.com, DNS:horizon-public.example.com, DNS:horizon.example.com, DNS:swift-internal.example.com, DNS:swift-public.example.com, DNS:swift.example.com, DNS:heat-internal.example.com, DNS:heat-public.example.com, DNS:heat.example.com, DNS:aodh-internal.example.com, DNS:aodh-public.example.com, DNS:aodh.example.com, DNS:ceilometer-internal.example.com, DNS:ceilometer-public.example.com, DNS:ceilometer.example.com, DNS:gnocchi-internal.example.com, DNS:gnocchi-public.example.com, DNS:gnocchi.example.com, DNS:nagios-internal.example.com, DNS:nagios-public.example.com, DNS:nagios.example.com, DNS:vault-int.example.com, DNS:vault-public.example.com, DNS:vault.example.com, DNS:landscape-internal.example.com, DNS:landscape-public.example.com, DNS:landscape.example.com
Netscape Comment:
OpenSSL Generated Certificate
Signature Algorithm: sha256WithRSAEncryption
24:4f:fa:23:38:70:47:ca:67:a7:b3:df:60:d4:d4:e9:f1:2f:
83:ca:94:41:cd:60:c2:31:ca:da:0c:1b:32:40:8d:ac:bd:05:
f6:29:39:fd:a3:77:12:76:8d:50:8d:bd:e0:f6:83:d4:1f:fa:
96:f1:75:56:33:56:7b:9f:a6:c1:c5:5a:0e:28:fe:49:b0:ba:
5a:56:4b:af:be:c1:6a:8d:78:35:90:d3:c5:69:91:19:61:0c:
0c:5f:dd:cc:77:0b:6f:51:10:fe:06:cc:0e:f4:c2:65:c6:0b:
61:2d:95:88:df:a7:9a:d2:9b:dd:96:04:f7:77:41:e2:2a:da:
9b:a1:33:aa:de:ea:56:bb:78:d9:e5:dd:71:88:57:b2:d1:e8:
8b:75:da:f8:dc:9c:8a:0b:a5:55:28:a3:4a:d1:a3:c4:06:4e:
b4:8e:e1:44:11:4b:04:5a:07:37:26:0c:2b:a5:03:bb:f6:15:
8f:f3:e4:0f:a0:2a:b8:f7:c4:4d:e7:03:df:7c:58:0e:ca:67:
2a:34:4b:5b:33:b2:b6:26:88:20:34:87:cd:fb:e5:27:7f:64:
88:d5:f3:e8:6c:72:20:05:fb:bc:a1:0d:b7:d3:03:20:85:fb:
88:ef:ce:a0:cd:8e:35:d9:14:3b:48:be:5c:46:8f:13:bd:53:
04:93:51:d0:a7:a7:44:c2:81:9c:ff:70:ea:9f:07:73:31:e5:
4a:e1:ad:2c:53:66:44:34:1e:e0:50:72:ad:28:67:00:2f:86:
c8:11:23:a8:a1:20:d0:b0:51:44:2b:eb:46:61:7f:fd:43:29:
da:d0:f1:8c:d1:b2:d8:6c:34:79:f8:b9:77:89:58:30:b6:00:
de:05:5e:94:f0:c4:d8:05:c0:f3:a9:d4:cf:8f:f3:4a:8a:dd:
8c:bc:11:86:ae:d3:ec:e3:9a:ea:13:6a:db:2f:d2:53:84:3b:
b9:c5:98:23:d9:b6:4d:f5:c3:32:1f:6a:39:80:c4:66:b9:43:
9e:9a:39:7d:08:12:ec:87:cb:38:d1:4c:93:0c:ce:d0:b2:0e:
db:a6:00:a2:99:c0:11:06:81:a9:1c:bf:d8:8c:7b:c2:71:3d:
19:1a:61:c9:dd:ec:f9:44:ff:15:3a:1a:1f:d5:95:55:63:ee:
b2:35:01:81:83:89:b2:1a:a7:8d:5b:11:be:01:fc:3b:54:76:
ce:3b:6f:ae:6c:fc:b1:24:77:9e:4f:1a:82:02:20:0e:c5:24:
c4:5c:3f:23:1e:fe:b2:78:9b:0e:b1:91:4e:60:0f:26:ea:90:
5d:09:bc:b4:ab:a9:e4:fa:2b:c3:d3:6c:d5:30:7a:e4:f2:eb:
c6:e7:fb:f4:dc:5c:e3:38
-----BEGIN CERTIFICATE-----
MIIJgjCCB2qgAwIBAgIBDzANBgkqhkiG9w0BAQsFADBXMQswCQYDVQQGEwJHQjEQ
MA4GA1UECAwHRW5nbGFuZDEPMA0GA1UEBwwGTG9uZG9uMRUwEwYDVQQKDAxVYnVu
dHUgQ2xvdWQxDjAMBgNVBAsMBUNsb3VkMB4XDTIxMDIxMTEyNDkxMVoXDTIyMDIx
MTEyNDkxMVowbDELMAkGA1UEBhMCR0IxEDAOBgNVBAgMB0VuZ2xhbmQxDzANBgNV
BAcMBkxvbmRvbjEVMBMGA1UECgwMVWJ1bnR1IENsb3VkMQ4wDAYDVQQLDAVDbG91
ZDETMBEGA1UEAwwKMTAuMC4xLjIxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
AQoCggEBAMTk7BpkwewVBXJHgu7GsRKivq/xUtnll1I1Ebd22lNUmWCHKNTEVXDE
WsMSLGKa3smSMKRFVoMIYm4Y3+vq68NEF/5q3MT2mL4QerT1TZpgL2zRQOmmV3n2
wV9+mcSGEqLwelAIIB6jQndx+i9HiyYwkcv6VTj6dv4VT+zZ299EPOEihJ9uy3XS
Dh2TwFqDuugT548T2eW8UgTZ6mhi5Z0AEULvcFT5aQLj90ZrNc4smoCTejelTpLx
uUpHE/B5IYJ1hnuRymPXYCNmJjXJgVvRGXSg6elyxfsYVfWh/4lpBLE2gfVboR23
bMJ0yQRQpMHdvhRq68z2F1r7lTlTRnECAwEAAaOCBUIwggU+MB0GA1UdDgQWBBTL
m2fBazCTE/yKu7DK7c709qKVZTAfBgNVHSMEGDAWgBQxlXLrBmkNIZZj3jWucFLs
g7zXVDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDCCBLQGA1UdEQSCBKswggSnhwQK
AAHThwQKAAHUhwQKAAHVhwQKAAHWhwQKAAHXhwQKAAHYhwQKAAHZhwQKAAHahwQK
AAHbhwQKAAHchwQKAAHdhwQKAAHehwQKAAHfhwQKAAHghwQKAAHhhwQKAAHihwQK
AAHjhwQKAAHkhwQKAAHlhwQKAAHmghtnbGFuY2UtaW50ZXJuYWwuZXhhbXBsZS5j
b22CGWdsYW5jZS1wdWJsaWMuZXhhbXBsZS5jb22CEmdsYW5jZS5leGFtcGxlLmNv
bYIbY2luZGVyLWludGVybmFsLmV4YW1wbGUuY29tghljaW5kZXItcHVibGljLmV4
YW1wbGUuY29tghJjaW5kZXIuZXhhbXBsZS5jb22CGW5vdmEtaW50ZXJuYWwuZXhh
bXBsZS5jb22CF25vdmEtcHVibGljLmV4YW1wbGUuY29tghBub3ZhLmV4YW1wbGUu
Y29tgh1rZXlzdG9uZS1pbnRlcm5hbC5leGFtcGxlLmNvbYIba2V5c3RvbmUtcHVi
bGljLmV4YW1wbGUuY29tghRrZXlzdG9uZS5leGFtcGxlLmNvbYIcbmV1dHJvbi1p
bnRlcm5hbC5leGFtcGxlLmNvbYIabmV1dHJvbi1wdWJsaWMuZXhhbXBsZS5jb22C
E25ldXRyb24uZXhhbXBsZS5jb22CHGhvcml6b24taW50ZXJuYWwuZXhhbXBsZS5j
b22CGmhvcml6b24tcHVibGljLmV4YW1wbGUuY29tghNob3Jpem9uLmV4YW1wbGUu
Y29tghpzd2lmdC1pbnRlcm5hbC5leGFtcGxlLmNvbYIYc3dpZnQtcHVibGljLmV4
YW1wbGUuY29tghFzd2lmdC5leGFtcGxlLmNvbYIZaGVhdC1pbnRlcm5hbC5leGFt
cGxlLmNvbYIXaGVhdC1wdWJsaWMuZXhhbXBsZS5jb22CEGhlYXQuZXhhbXBsZS5j
b22CGWFvZGgtaW50ZXJuYWwuZXhhbXBsZS5jb22CF2FvZGgtcHVibGljLmV4YW1w
bGUuY29tghBhb2RoLmV4YW1wbGUuY29tgh9jZWlsb21ldGVyLWludGVybmFsLmV4
YW1wbGUuY29tgh1jZWlsb21ldGVyLXB1YmxpYy5leGFtcGxlLmNvbYIWY2VpbG9t
ZXRlci5leGFtcGxlLmNvbYIcZ25vY2NoaS1pbnRlcm5hbC5leGFtcGxlLmNvbYIa
Z25vY2NoaS1wdWJsaWMuZXhhbXBsZS5jb22CE2dub2NjaGkuZXhhbXBsZS5jb22C
G25hZ2lvcy1pbnRlcm5hbC5leGFtcGxlLmNvbYIZbmFnaW9zLXB1YmxpYy5leGFt
cGxlLmNvbYISbmFnaW9zLmV4YW1wbGUuY29tghV2YXVsdC1pbnQuZXhhbXBsZS5j
b22CGHZhdWx0LXB1YmxpYy5leGFtcGxlLmNvbYIRdmF1bHQuZXhhbXBsZS5jb22C
HmxhbmRzY2FwZS1pbnRlcm5hbC5leGFtcGxlLmNvbYIcbGFuZHNjYXBlLXB1Ymxp
Yy5leGFtcGxlLmNvbYIVbGFuZHNjYXBlLmV4YW1wbGUuY29tMCwGCWCGSAGG+EIB
DQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsF
AAOCAgEAJE/6IzhwR8pnp7PfYNTU6fEvg8qUQc1gwjHK2gwbMkCNrL0F9ik5/aN3
EnaNUI294PaD1B/6lvF1VjNWe5+mwcVaDij+SbC6WlZLr77Bao14NZDTxWmRGWEM
DF/dzHcLb1EQ/gbMDvTCZcYLYS2ViN+nmtKb3ZYE93dB4iram6Ezqt7qVrt42eXd
cYhXstHoi3Xa+NycigulVSijStGjxAZOtI7hRBFLBFoHNyYMK6UDu/YVj/PkD6Aq
uPfETecD33xYDspnKjRLWzOytiaIIDSHzfvlJ39kiNXz6GxyIAX7vKENt9MDIIX7
iO/OoM2ONdkUO0i+XEaPE71TBJNR0KenRMKBnP9w6p8HczHlSuGtLFNmRDQe4FBy
rShnAC+GyBEjqKEg0LBRRCvrRmF//UMp2tDxjNGy2Gw0efi5d4lYMLYA3gVelPDE
2AXA86nUz4/zSordjLwRhq7T7OOa6hNq2y/SU4Q7ucWYI9m2TfXDMh9qOYDEZrlD
npo5fQgS7IfLONFMkwzO0LIO26YAopnAEQaBqRy/2Ix7wnE9GRphyd3s+UT/FToa
H9WVVWPusjUBgYOJshqnjVsRvgH8O1R2zjtvrmz8sSR3nk8aggIgDsUkxFw/Ix7+
snibDrGRTmAPJuqQXQm8tKup5Porw9Ns1TB65PLrxuf79Nxc4zg=
-----END CERTIFICATE-----

28
config/ssl/serverkey.pem Normal file
View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDE5OwaZMHsFQVy
R4LuxrESor6v8VLZ5ZdSNRG3dtpTVJlghyjUxFVwxFrDEiximt7JkjCkRVaDCGJu
GN/r6uvDRBf+atzE9pi+EHq09U2aYC9s0UDppld59sFffpnEhhKi8HpQCCAeo0J3
cfovR4smMJHL+lU4+nb+FU/s2dvfRDzhIoSfbst10g4dk8Bag7roE+ePE9nlvFIE
2epoYuWdABFC73BU+WkC4/dGazXOLJqAk3o3pU6S8blKRxPweSGCdYZ7kcpj12Aj
ZiY1yYFb0Rl0oOnpcsX7GFX1of+JaQSxNoH1W6Edt2zCdMkEUKTB3b4UauvM9hda
+5U5U0ZxAgMBAAECggEAZJWuKC2hA8IZbjAqK2FFxIdvcuKNuNMZYt3JlzgYgMmP
MUjbCxbhryIgW9dE5XrIpaERSFpDv9zgq+35NsRkfPSkbUyaS+TOUwqIJTnL0zmg
AK9FwhdASQZN80LxuvvjOWowkgIUppIwumR3sv+RjojxcosITntlnpe0pQMG2B9W
stEcU+N7jnWtZ9srY5y8ofxGNZ/ro25upFLHI/eE8tlTvWpSOdXS7vFvpa9VhVzo
ZwjcqpIAuW+JwRqeqshDGlgECsaPaAtX/+txIrHDFZCRXAB/GT2y9rpPEH/zjVT8
TMmaxIanJcxWegdrvq1ZX+eS7luR9FwQfib08VkaIQKBgQDjOKeW6ewUu8CdKIxb
ZC4VYx9sfQsGDsoBDFMU/nGQSYnZT5kQoAL/98ixXiCunmuAkNfl1Uhk1i8zVsvL
lrA57D2KHnrbvCyY97VFFKGwZG6Z5XYRPkgzluR4+eycTkgCXNIgSeYOsGiEvHdl
MZASN7A+coVXO/bt7fUiiJvWDwKBgQDd1PPuV/tY0JHPLdoqEGRysjbpKdggN2Wr
LT6/do3czSTPmkmXhiqdH98H02DkX//hM3Bu+4uAAgDU+pro4gU8OjGi6Rb6Cxwg
k9asyBGRK5jD2iYNI9HXirMtN8ktDetpNJlriFYaIY1UP0ME9jkUHuB2ePwmZfKV
rmvI0QebfwKBgE0Tj83iheG2mqz07z+lKPi6ShOMCyw/4gge/SPW+ADg4TDlDmAU
V1Aq5lo8OsvhE0hmWcYt5kPGX/aDT9g8woSzfWCX3EcjeuFczZGkYQCr4NS1gKpy
vR065z+eT8PpzgV3JRQs3SxIbPvxznJ2MI/tcgyM3mxr++RT3t6bBnnDAoGAbwFw
a3W1fjUosc8VG/WF/ms11SmuUDjIdIc4niDaToKNiCGB8AQgcO4Q8l6RXta3Od8+
xRq1LQJTnkAloqHv0rqgOhCAAfHrSlYQl/ep6sYxNNiGMA8bo8txbBA9aIFNqyC6
IThhNcRSb7UUeXgpCeuFkWNrcl+Tq9lTcHNBZksCgYByy+cEi3OBrmPT0K24f7gv
0w249LbB0p4syQ2sC8uDhiz6NdFcB+7YvlZdCVAiSaO4X8AvnL6d1rGcAByS/wOF
Q79YMHq82iYtJnvGDrlgFoKHSYhUcmdN5idBLVWKm281NVIQKJMTlCMjvDdTOCKh
wnpnmlCJWeyRkanwX11K2A==
-----END PRIVATE KEY-----

0
generated/.gitkeep Normal file
View File

1
generated/maas/maas-api Normal file
View File

@ -0,0 +1 @@
QjzaY5ucvSSrydMKnz:aChrPQrVG4tfeAabrm:5ySUKY7c7EMhKhuKhpsXGzeaQLYpMAXx

1
generated/maas/maas-pass Normal file
View File

@ -0,0 +1 @@
openstack

1070
resources/keystone.yaml Normal file

File diff suppressed because it is too large Load Diff

BIN
resources/keystone.zip Normal file

Binary file not shown.

View File

@ -0,0 +1,27 @@
#!/bin/bash
juju_status_json=$(juju status --format json)
# ldap
for i in `seq 1 3`; do
ldap_ip=$(echo $juju_status_json | jq .applications[\"ldap-domain${i}\"].units[][\"public-address\"] | sed s/\"//g)
juju config keystone-ldap-domain${i} ldap-server=ldap://${ldap_ip}
done
# landscape
landscape_ip=$(echo $juju_status_json | jq .applications[\"landscape-haproxy\"].units[][\"public-address\"] | sed s/\"//g)
juju run --all "echo ${landsape_ip} landscape.example.com | sudo tee -a /etc/hosts"
# fix ceilometer, so that it can get to keystone
juju run --application ceilometer "echo 10.0.1.216 keystone.example.com | sudo tee -a /etc/hosts"
juju run-action ceilometer/0 ceilometer-upgrade
# ensure openstack-service-checks can get to keystone
juju run --application openstack-service-checks "echo 10.0.1.216 keystone.example.com | sudo tee -a /etc/hosts"
juju run --application openstack-service-checks "echo 10.0.1.216 keystone-internal.example.com | sudo tee -a /etc/hosts"
# ensure ceph-osd can get to vault
juju run --application ceph-osd "echo 10.0.1.222 vault.example.com | sudo tee -a /etc/hosts"
juju run --application ceph-osd "echo 10.0.1.222 vault-internal.example.com | sudo tee -a /etc/hosts"

View File

@ -0,0 +1,13 @@
#!/bin/bash
keystone_passwd=$(juju run --unit keystone/leader 'leader-get admin_passwd')
nagios_passwd=$(juju run --unit nagios/leader 'sudo cat /var/lib/juju/nagios.passwd')
grafana_passwd=$(juju run-action --wait grafana/leader get-admin-password | grep password | awk '{print $2}')
graylog_passwd=$(juju run-action --wait graylog/leader show-admin-password | grep admin-password | awk '{print $2}')
mysql_passwd=$(juju run --unit mysql/leader 'leader-get root-password')
echo "Keystone admin password: ... ${keystone_passwd}"
echo "nagios password: ... ${nagios_passwd}"
echo "grafana password: ... ${grafana_passwd}"
echo "graylog password: ... ${graylog_passwd}"
echo "mysql password: ... ${mysql_passwd}"

View File

@ -0,0 +1,12 @@
#!/bin/bash
# This is when landscape-haproxy the cert is SELFSIGNED. This will ensure that landscape will work
landscape_crt=$(juju run --application landscape-haproxy 'sudo openssl x509 -in /var/lib/haproxy/default.pem' | base64)
juju config landscape-client ssl-public-key="base64:${landscape_crt}"
# And yes, this needs to use the IP address, otherwise the the registration will fail
landscape_ip=$(juju run --application landscape-haproxy 'unit-get private-address')
juju config landscape-client url="https://${landscape_ip}/message-system" ping-url="http://${landscape_ip}/ping"
# May need to restart all the landscape-clients
#juju run --application landscape-client 'sudo systemctl restart landscape-client.service'

View File

@ -0,0 +1,9 @@
export OS_AUTH_URL=http://keystone.example.com:5000/v3
export OS_REGION_NAME=RegionOne
export OS_PROJECT_NAME=ldap1_proj1
export OS_PROJECT_DOMAIN_NAME=domain1
export OS_USER_DOMAIN_NAME=domain1
export OS_IDENTITY_API_VERSION=3
export OS_PASSWORD=crapper
export OS_USERNAME=johndoe

View File

@ -0,0 +1,14 @@
keystone_addr=`juju config keystone vip`
if [ -z "$keystone_addr" ]; then
keystone_addr=`jq -r '.applications.keystone.units."keystone/0"."public-address"' $juju_status_json_cache`
fi
export OS_AUTH_URL=http://${keystone_addr}:5000/v3
export OS_REGION_NAME=RegionOne
export OS_PROJECT_NAME=ldap_proj2
export OS_PROJECT_DOMAIN_NAME=userdomain
export OS_USER_DOMAIN_NAME=userdomain
export OS_IDENTITY_API_VERSION=3
export OS_PASSWORD=crapper
export OS_USERNAME=user20

View File

@ -0,0 +1,9 @@
#!/bin/bash
for proj in cinder glance heat keystone openstack-dashboard neutron-api nova-cloud-controller nagios landscape-haproxy; do
juju config $proj --reset ssl_ca
juju config $proj --reset ssl_cert
juju config $proj --reset ssl_key
done

View File

@ -0,0 +1,16 @@
#!/bin/bash
TMPDIR=$(mktemp -d)
DATE=$(date +%s)
lds_cert_pub=$TMPDIR/landscape_cert_${DATE}_pub.pem
juju config landscape-haproxy ssl_cert=$(base64 -w 0 ../config/ssl/servercert.pem) ssl_key=$(base64 -w 0 ../config/ssl/serverkey.pem)
cat ../config/ssl/servercert.pem | openssl x509 -pubkey -noout > ${lds_cert_pub}
pub_key_base64=$(cat ${lds_cert_pub} | base64 -w 0)
rm -rf ${lds_cert_pub}
juju config landscape-client ssl-public-key="base64:$pub_key_base64"

View File

@ -0,0 +1,13 @@
#!/bin/bash
# This is when landscape-haproxy the cert is SELFSIGNED. This will ensure that landscape will work
landscape_crt=$(juju run --application landscape-haproxy 'sudo openssl x509 -in /var/lib/haproxy/default.pem' | base64)
juju config landscape-client ssl-public-key="base64:${landscape_crt}"
# And yes, this needs to use the IP address, otherwise the the registration will fail
landscape_ip=$(juju run --application landscape-haproxy 'unit-get private-address')
juju config landscape-client url="https://${landscape_ip}/message-system" ping-url="http://${landscape_ip}/ping"
# May need to restart all the landscape-clients
#juju run --application landscape-client 'sudo systemctl restart landscape-client.service'

View File

@ -0,0 +1,8 @@
#!/bin/bash
juju_status_json=$(juju status --format json)
for i in `seq 1 3`; do
ldap_ip=$(echo $juju_status_json | jq .applications[\"ldap-domain${i}\"].units[][\"public-address\"] | sed s/\"//g)
juju config keystone-ldap-domain${i} ldap-server=ldap://${ldap_ip}
done

13
scripts/other-scripts/force_ip Executable file
View File

@ -0,0 +1,13 @@
#!/bin/bash
export MACHINE_NAME=$1
export STATIC_IP=$2
export SUBNET_NAME=$3
export SUBNET_ID=$(maas root subnets read | jq '.[] | {id:.id,name:.name}' --compact-output | grep $SUBNET_NAME | jq .id)
export SYSTEM_ID=$(maas root nodes read | jq '.[] | {hostname:.hostname,system_id: .system_id, status:.status}' --compact-output | grep $MACHINE_NAME | jq .system_id | awk -F"\"" '{print $2}')
export EXISTING_LINK_ID=$(maas root interfaces read ${SYSTEM_ID} | jq '.[] | .links[] | {link_id:.id, mode:.mode, subnet:.subnet.name}' --compact-output | grep $SUBNET_NAME | jq .link_id)
export NIC_ID=$(maas root interfaces read ${SYSTEM_ID} | jq '.[] | {iface_id:.id, name:.name, mac:.mac_address, subnet:.subnet.name, link: .links}' --compact-output | grep ${EXISTING_LINK_ID} | jq .iface_id)
maas root interface unlink-subnet ${SYSTEM_ID} ${NIC_ID} id=${EXISTING_LINK_ID}
maas root interface link-subnet ${SYSTEM_ID} ${NIC_ID} mode=STATIC subnet=${SUBNET_ID} ip_address=${STATIC_IP}

View File

@ -0,0 +1,24 @@
#!/usr/bin/python3
import json
import subprocess
machines = json.loads(subprocess.check_output("maas root nodes read", shell=True))
for machine in machines:
system_id = machine["system_id"]
hostname = machine["hostname"]
print("Checking interfaces for machine {} with ID {}".format(hostname, system_id))
interfaces = json.loads(subprocess.check_output("maas root interfaces read {}".format(system_id), shell=True))
for interface in interfaces:
link_id = None
links = interface["links"]
if len(links) == 2:
for link in links:
if link["mode"] == "auto":
link_id = link["id"]
nic_id = interface["id"]
print("Removing link_id {} for NIC {} for machine {} with name {}".format(link_id, nic_id, system_id, hostname))
subprocess.check_call("maas root interface unlink-subnet {} {} id={}".format(system_id, nic_id, link_id), shell=True)
break

20
scripts/other-scripts/vault.sh Executable file
View File

@ -0,0 +1,20 @@
#!/bin/bash
vault_vip=$(juju config vault vip)
echo export VAULT_ADDR="http://${vault_vip}:8200"
export VAULT_ADDR="http://${vault_vip}:8200"
echo " "
IPS=$(juju status vault --format json | jq '.applications.vault.units | to_entries[] | .value."public-address"' | sed s/\"//g)
for ip in $IPS;do
echo export VAULT_ADDR=http://${ip}:8200;
export VAULT_ADDR=http://${ip}:8200;
for vault_key in $(head -n3 ../../secrets/vault.txt | awk '{print $4}');do
echo vault operator unseal -tls-skip-verify $vault_key
vault operator unseal -tls-skip-verify $vault_key
done;
done;

View File

@ -0,0 +1,45 @@
#!/bin/bash
# The following flavors will be configured initially in the cloud. Note that
# these flavors will need to be configured per host aggregate, multiplying the
# list below by the host aggregates listed in the previous section. When
# combined with the host aggregate, this maps to 24 combinations which need
# to be defined, for brevity and clarity these combinations are not
# individually listed here.
# Name vCPU RAM (MB) Disk (GB) Disk Type
# m1.small 1 2048 20 Ceph
# m1.medium 2 4096 40 Ceph
# m1.large 4 8192 80 Ceph
# m1.xlarge 8 16384 160 Ceph
HOST_AGGREGATES=(
"default"
)
for host_aggregate in ${HOST_AGGREGATES[*]}; do
# m1.small.ceph 1 2048 20 Ceph
openstack flavor create \
--vcpus 1 --ram 2048 --disk 20 \
--property aggregate_instance_extra_specs:host_aggregate=${host_aggregate} \
${host_aggregate}.m1.small
# m1.medium.ceph 2 4096 40 Ceph
openstack flavor create \
--vcpus 2 --ram 4096 --disk 40 \
--property aggregate_instance_extra_specs:host_aggregate=${host_aggregate} \
${host_aggregate}.m1.medium
# m1.large.ceph 4 8192 80 Ceph
openstack flavor create \
--vcpus 4 --ram 8192 --disk 80 \
--property aggregate_instance_extra_specs:host_aggregate=${host_aggregate} \
${host_aggregate}.m1.large
# m1.xlarge.ceph 8 16384 160 Ceph
openstack flavor create \
--vcpus 8 --ram 16384 --disk 160 \
--property aggregate_instance_extra_specs:host_aggregate=${host_aggregate} \
${host_aggregate}.m1.xlarge
done

View File

@ -0,0 +1,27 @@
#!/bin/bash
# Name Description Compute Nodes
# default 2:1 CPU 04,05,06
declare -A COMPUTE_NODES
COMPUTE_NODES=(
["default"]="04 05 06"
)
NODE_NAME_PREFIX="node0"
for aggregate in "${!COMPUTE_NODES[@]}"; do
# Create aggregate
echo "Creating host aggregate ${aggregate}..."
openstack aggregate create \
--property host_aggregate=${aggregate} \
${aggregate}
# Add COMPUTE_NODES to the host aggregate
for node in ${COMPUTE_NODES[$aggregate]}; do
echo "Adding node ${NODE_NAME_PREFIX}${node} to host aggregate ${aggregate}..."
openstack aggregate add host ${aggregate} ${NODE_NAME_PREFIX}${node}
done
done

View File

@ -0,0 +1,51 @@
#!/bin/bash
# Name Description Compute Nodes
# asrock01 Nodes in AZ1 02 04
# asrock02 Nodes in AZ2 02 03
# asrock03 Nodes in AZ3 02 03
declare -A COMPUTE_NODES
COMPUTE_NODES=(
# Host Aggregate name: Compute Nodes
["asrock01"]="02 03"
["asrock02"]="02 03"
["asrock03"]="02 03"
)
declare -A AVAILABILITY_ZONES
AVAILABILITY_ZONES=(
# Host Aggregate name: Availability Zone
["asrock01"]="asrock01"
["asrock02"]="asrock02"
["asrock03"]="asrock03"
)
declare -A NODE_NAME_PREFIXES
NODE_NAME_PREFIXES=(
# Host Aggregate name: Compute Nodes
["asrock01"]="as1-maas-node-"
["asrock02"]="as2-maas-node-"
["asrock03"]="as3-maas-node-"
)
for aggregate in "${!COMPUTE_NODES[@]}"; do
# Create aggregate
echo "Creating host aggregate ${aggregate}..."
openstack aggregate create \
--zone ${AVAILABILITY_ZONES[$aggregate]} \
${aggregate}
# Add COMPUTE_NODES to the host aggregate
for node in ${COMPUTE_NODES[$aggregate]}; do
echo "Adding node ${NODE_NAME_PREFIXES[$aggregate]}${node} to host aggregate ${aggregate}..."
openstack aggregate add host ${aggregate} ${NODE_NAME_PREFIXES[$aggregate]}${node}
done
openstack flavor show ${AVAILABILITY_ZONES[$aggregate]}.m1.cirros || openstack flavor create \
--vcpus 1 --ram 64 --disk 1 \
--property aggregate_instance_extra_specs:host_aggregate=${AVAILABILITY_ZONES[$aggregate]} \
${AVAILABILITY_ZONES[$aggregate]}.m1.cirros
done

View File

@ -0,0 +1,18 @@
[Unit]
Description=Configure SRIOV Virtual Functions
DefaultDependencies=no
Wants=network.target
After=local-fs.target network-pre.target apparmor.service systemd-sysctl.service systemd-modules-load.service
Before=network.target shutdown.target network-online.target
Conflicts=shutdown.target
[Install]
WantedBy=multi-user.target
WantedBy=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/local/bin/networking-sriov.sh systemd-start
ExecStop=/usr/local/bin/networking-sriov.sh systemd-stop
RemainAfterExit=true
TimeoutStartSec=5min

View File

@ -0,0 +1,34 @@
#!/bin/bash
DESC="Configure SRIOV Virtual Functions"
. /lib/lsb/init-functions
do_start() {
echo '12' > /sys/class/net/enp175s0f1/device/sriov_numvfs
}
do_stop() {
echo '0' > /sys/class/net/enp175s0f1/device/sriov_numvfs
}
case "$1" in
systemd-start)
do_start
;;
systemd-stop)
do_stop
;;
restart)
log_daemon_msg "Re-$DESC"
do_stop
do_start
;;
*)
N=/usr/local/bin/networking-sriov.sh
echo "Usage: $N {restart|systemd-start|systemd-stop}" >&2
;;
esac
exit 0

0
secrets/.gitkeep Normal file
View File

18
secrets/vault.txt Normal file
View File

@ -0,0 +1,18 @@
Unseal Key 1: L3OvWpS8dYyIl9mxJ/rn46cn5uVlf9FVZOfngf6K03b+
Unseal Key 2: OYnjKwMDar1pAWB8XFuwq0x6TyTBRaT5BvcG6J1jNKDJ
Unseal Key 3: aKvnqpX+6kWIJe1GWR8M/joJpDissExSk1oYC1vO5lmy
Unseal Key 4: 76IAnSGfbnugZCDBgtoLMsAnhmErr6N9aJnuEAQrUP//
Unseal Key 5: +VhM7LYgcUpB8pkM+Xtceit6L6CPldbRCokPeWfCtynI
Initial Root Token: s.MC3kjNzrLhBuPk2DCrOzVrcw
Vault initialized with 5 key shares and a key threshold of 3. Please securely
distribute the key shares printed above. When the Vault is re-sealed,
restarted, or stopped, you must supply at least 3 of these keys to unseal it
before it can start servicing requests.
Vault does not store the generated master key. Without at least 3 key to
reconstruct the master key, Vault will remain permanently sealed!
It is possible to generate new unseal keys, provided you have a quorum of
existing unseal keys shares. See "vault operator rekey" for more information.