From d9f23802f746b95000dad675d0ed55ab4c855fbc Mon Sep 17 00:00:00 2001 From: Arif Ali Date: Fri, 29 Oct 2021 09:57:19 +0100 Subject: [PATCH] Initial commit --- .gitignore | 4 + README.md | 0 config/bucketsconfig.yaml | 469 +++++ config/bundle.yaml | 1503 +++++++++++++++++ config/canonical-openstack-rules.yaml | 113 ++ config/dnsresources.yaml | 54 + config/hosts.yaml | 39 + config/juju-model-default-cis.yaml | 72 + config/juju-model-default.yaml | 32 + config/juju_deploy.sh | 17 + config/master.yaml | 94 ++ config/networks.yaml | 131 ++ config/nodes.yaml | 24 + config/openstack.yaml | 21 + config/overlays/contrail.yaml | 264 +++ .../overlays/contrail_versioned_overlay.yaml | 21 + config/overlays/hostnames.yaml | 59 + config/overlays/ldap.yaml | 106 ++ .../overlays/openstack_versioned_overlay.yaml | 149 ++ .../openstack_versioned_overlay_focal.yaml | 221 +++ .../openstack_versioned_overlay_gemini.yaml | 141 ++ config/overlays/ovs.yaml | 22 + config/overlays/resources.yaml | 7 + config/overlays/ssl.yaml | 124 ++ config/overlays/stsstack.yaml | 9 + config/ssl/cacert.pem | 32 + config/ssl/copycerts.sh | 7 + config/ssl/privkey.pem | 52 + config/ssl/servercert.pem | 130 ++ config/ssl/serverkey.pem | 28 + generated/.gitkeep | 0 generated/maas/maas-api | 1 + generated/maas/maas-pass | 1 + resources/keystone.yaml | 1070 ++++++++++++ resources/keystone.zip | Bin 0 -> 4937 bytes scripts/arif-scripts/99-post-setup.sh | 27 + scripts/arif-scripts/get_passwords.sh | 13 + scripts/arif-scripts/landscape-certs.sh | 12 + scripts/arif-scripts/ldap/nova_ldap.rc | 9 + scripts/arif-scripts/ldap/nova_ldap_user20.rc | 14 + scripts/arif-scripts/reset_certs.sh | 9 + .../arif-scripts/update_landscape_certs.sh | 16 + .../update_landscape_certs_self.sh | 13 + scripts/arif-scripts/update_ldap.sh | 8 + scripts/other-scripts/force_ip | 13 + scripts/other-scripts/remove_alias | 24 + scripts/other-scripts/vault.sh | 20 + scripts/post-deployment/01-create-flavors.sh | 45 + .../post-deployment/02-create-host-types.sh | 27 + .../03-set-availability-zones.sh | 51 + scripts/sriov/networking-sriov.service | 18 + scripts/sriov/networking-sriov.sh | 34 + secrets/.gitkeep | 0 secrets/vault.txt | 18 + 54 files changed, 5388 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 config/bucketsconfig.yaml create mode 100644 config/bundle.yaml create mode 100644 config/canonical-openstack-rules.yaml create mode 100644 config/dnsresources.yaml create mode 100644 config/hosts.yaml create mode 100644 config/juju-model-default-cis.yaml create mode 100644 config/juju-model-default.yaml create mode 100755 config/juju_deploy.sh create mode 100644 config/master.yaml create mode 100644 config/networks.yaml create mode 100644 config/nodes.yaml create mode 100644 config/openstack.yaml create mode 100644 config/overlays/contrail.yaml create mode 100644 config/overlays/contrail_versioned_overlay.yaml create mode 100644 config/overlays/hostnames.yaml create mode 100644 config/overlays/ldap.yaml create mode 100644 config/overlays/openstack_versioned_overlay.yaml create mode 100644 config/overlays/openstack_versioned_overlay_focal.yaml create mode 100644 config/overlays/openstack_versioned_overlay_gemini.yaml create mode 100644 config/overlays/ovs.yaml create mode 100644 config/overlays/resources.yaml create mode 100644 config/overlays/ssl.yaml create mode 100644 config/overlays/stsstack.yaml create mode 100644 config/ssl/cacert.pem create mode 100755 config/ssl/copycerts.sh create mode 100644 config/ssl/privkey.pem create mode 100644 config/ssl/servercert.pem create mode 100644 config/ssl/serverkey.pem create mode 100644 generated/.gitkeep create mode 100644 generated/maas/maas-api create mode 100644 generated/maas/maas-pass create mode 100644 resources/keystone.yaml create mode 100644 resources/keystone.zip create mode 100755 scripts/arif-scripts/99-post-setup.sh create mode 100755 scripts/arif-scripts/get_passwords.sh create mode 100755 scripts/arif-scripts/landscape-certs.sh create mode 100644 scripts/arif-scripts/ldap/nova_ldap.rc create mode 100644 scripts/arif-scripts/ldap/nova_ldap_user20.rc create mode 100755 scripts/arif-scripts/reset_certs.sh create mode 100755 scripts/arif-scripts/update_landscape_certs.sh create mode 100755 scripts/arif-scripts/update_landscape_certs_self.sh create mode 100755 scripts/arif-scripts/update_ldap.sh create mode 100755 scripts/other-scripts/force_ip create mode 100755 scripts/other-scripts/remove_alias create mode 100755 scripts/other-scripts/vault.sh create mode 100755 scripts/post-deployment/01-create-flavors.sh create mode 100755 scripts/post-deployment/02-create-host-types.sh create mode 100755 scripts/post-deployment/03-set-availability-zones.sh create mode 100644 scripts/sriov/networking-sriov.service create mode 100755 scripts/sriov/networking-sriov.sh create mode 100644 secrets/.gitkeep create mode 100644 secrets/vault.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..64c5526 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +generated/* +secrets/* +.vscode +*~ diff --git a/README.md b/README.md new file mode 100644 index 0000000..e69de29 diff --git a/config/bucketsconfig.yaml b/config/bucketsconfig.yaml new file mode 100644 index 0000000..6af140d --- /dev/null +++ b/config/bucketsconfig.yaml @@ -0,0 +1,469 @@ +--- +allocations: + + control: + machines: + - as1-maas-node-01 + - as2-maas-node-01 + - as3-maas-node-01 + + lma: + machines: + - as1-maas-node-02 + - as2-maas-node-02 + - as3-maas-node-02 + + landscape: + machines: + - as1-maas-node-03 + - as2-maas-node-03 + - as3-maas-node-03 + + compute: + machines: + - as1-maas-node-04 + - as1-maas-node-05 + - as2-maas-node-04 + - as2-maas-node-05 + - as3-maas-node-04 + - as3-maas-node-05 + +configs: + + control: + disks: &common-disks + # RAID 1 OS disk + - disk: 0 + id: os0 + ptable: GPT + type: disk + + # EFI partition + - device: os0 + id: os0-efi + number: 1 + size: 512M + type: partition + + # EFI format + - volume: os0-efi + id: os0-efi-format + label: EFI + fstype: fat32 + type: format + + # EFI mount + - device: os0-efi-format + id: os0-efi-mount + path: /boot/efi + type: mount + + # Boot partition + - device: os0 + id: os0-boot + number: 2 + size: 2G + type: partition + + # Boot format + - volume: os0-boot + id: os0-boot-format + label: BOOT + fstype: ext4 + type: format + + # Boot mount + - device: os0-boot-format + id: os0-boot-mount + path: /boot + type: mount + + # bcache partition + - device: os0 + id: os0-bcache + number: 3 + size: 236G + type: partition + + # RAID 10 Disk + - disk: 1 + id: os1 + ptable: GPT + type: disk + + # root partition + - device: os1 + id: os1-root + number: 1 + size: 11.9T + type: partition + + # Bcache for root + - backing_device: os1-root + cache_device: os0-bcache + cache_mode: writeback + id: root0 + name: root0 + type: bcache + + # LVM volume group for root + - devices: + - root0 + id: vg0 + name: vg0 + type: lvm_volgroup + + # LVM partition for root + - id: vg0-root + name: root + type: lvm_partition + size: 20G + volgroup: vg0 + + # root format + - volume: vg0-root + id: vg0-root-format + label: root + fstype: ext4 + type: format + + # root mount + - device: vg0-root-format + id: vg0-root-mount + path: / + type: mount + + # LVM partition for home + - id: vg0-home + name: home + type: lvm_partition + size: 1G + volgroup: vg0 + + # home format + - volume: vg0-home + id: vg0-home-format + label: home + fstype: ext4 + type: format + + # home mount + - device: vg0-home-format + id: vg0-home-mount + path: /home + type: mount + + # LVM partition for tmp + - id: vg0-tmp + name: tmp + type: lvm_partition + size: 2G + volgroup: vg0 + + # tmp format + - volume: vg0-tmp + id: vg0-tmp-format + label: tmp + fstype: ext4 + type: format + + # tmp mount + - device: vg0-tmp-format + id: vg0-tmp-mount + path: /tmp + type: mount + + # LVM partition for var + - id: vg0-var + name: var + type: lvm_partition + size: 5T + volgroup: vg0 + + # var format + - volume: vg0-var + id: vg0-var-format + label: var + fstype: ext4 + type: format + + # var mount + - device: vg0-var-format + id: vg0-var-mount + path: /var + type: mount + + # LVM partition for var/log + - id: vg0-var-log + name: var-log + type: lvm_partition + size: 20G + volgroup: vg0 + + # var/log format + - volume: vg0-var-log + id: vg0-var-log-format + label: var-log + fstype: ext4 + type: format + + # var/log mount + - device: vg0-var-log-format + id: vg0-var-log-mount + path: /var/log + type: mount + + # LVM partition for var/tmp + - id: vg0-var-tmp + name: var-tmp + type: lvm_partition + size: 2G + volgroup: vg0 + + # var/tmp format + - volume: vg0-var-tmp + id: vg0-var-tmp-format + label: var-tmp + fstype: ext4 + type: format + + # var/tmp mount + - device: vg0-var-tmp-format + id: vg0-var-tmp-mount + path: /var/tmp + type: mount + + # LVM partition for var/log/audit + - id: vg0-var-log-audit + name: var-log-audit + type: lvm_partition + size: 2G + volgroup: vg0 + + # var/log/audit format + - volume: vg0-var-log-audit + id: vg0-var-log-audit-format + label: var-log-audit + fstype: ext4 + type: format + + # var/log/audit mount + - device: vg0-var-log-audit-format + id: vg0-var-log-audit-mount + path: /var/log/audit + type: mount + + # LVM partition for SWAP + - id: vg0-swap + name: swap + type: lvm_partition + size: 8G + volgroup: vg0 + + # SWAP format + - volume: vg0-swap + id: vg0-swap-format + label: swap + fstype: swap + type: format + + # SWAP mount + - device: vg0-swap-format + id: vg0-swap-mount + path: '' + type: mount + + nics: &common-nics + # ens3 NIC + - id: ens3 + name: ens3 + nic: 0 + type: physical + vlan_id: 300 + subnets: + oam: + mode: static + ip_addresses: + as1-maas-node-01: 10.0.1.101 + as2-maas-node-01: 10.0.1.102 + as3-maas-node-01: 10.0.1.103 + as1-maas-node-02: 10.0.1.111 + as2-maas-node-02: 10.0.1.112 + as3-maas-node-02: 10.0.1.113 + as1-maas-node-03: 10.0.1.121 + as2-maas-node-03: 10.0.1.122 + as3-maas-node-03: 10.0.1.123 + as1-maas-node-04: 10.0.1.131 + as1-maas-node-05: 10.0.1.132 + as2-maas-node-04: 10.0.1.133 + as2-maas-node-05: 10.0.1.134 + as3-maas-node-04: 10.0.1.135 + as3-maas-node-05: 10.0.1.136 + fabric: default + + # ens4 NIC + - id: ens4 + name: ens4 + nic: 1 + type: physical + vlan_id: 301 + subnets: [ceph-access] + fabric: default + + # ens5 NIC + - id: ens5 + name: ens5 + nic: 2 + type: physical + vlan_id: 302 + subnets: [ceph-replica] + fabric: default + + # ens6 NIC + - id: ens6 + name: ens6 + nic: 3 + type: physical + vlan_id: 303 + subnets: [overlay] + fabric: default + + # ens7 NIC + - id: ens7 + name: ens7 + nic: 4 + type: physical + vlan_id: 304 + subnets: [admin] + fabric: default + + # ens8 NIC + - id: ens8 + name: ens8 + nic: 5 + type: physical + vlan_id: 305 + subnets: [internal] + fabric: default + + # ens9 NIC + - id: ens9 + name: ens9 + nic: 6 + type: physical + vlan_id: 1 + subnets: [external] + fabric: default + + lma: + disks: *common-disks + nics: *common-nics + + landscape: + disks: *common-disks + nics: *common-nics + + compute: + disks: &compute-disks + # RAID 1 OS disk + - disk: 0 + id: os0 + ptable: GPT + type: disk + + # EFI partition + - device: os0 + id: os0-efi + number: 1 + size: 512M + type: partition + + # EFI format + - volume: os0-efi + id: os0-efi-format + label: EFI + fstype: fat32 + type: format + + # EFI mount + - device: os0-efi-format + id: os0-efi-mount + path: /boot/efi + type: mount + + # Boot partition + - device: os0 + id: os0-boot + number: 2 + size: 2G + type: partition + + # Boot format + - volume: os0-boot + id: os0-boot-format + label: BOOT + fstype: ext4 + type: format + + # Boot mount + - device: os0-boot-format + id: os0-boot-mount + path: /boot + type: mount + + ##new section + # Root partition + - device: os0 + id: os0-root + number: 3 + size: 53G + type: partition + + # copy and paste from controll nodes : LVM volume group for root + - devices: + - os0-root + id: vg0 + name: vg0 + type: lvm_volgroup + + # LVM partition for root + - id: vg0-root + name: root + type: lvm_partition + size: 45G + volgroup: vg0 + + # root format + - volume: vg0-root + id: vg0-root-format + label: root + fstype: ext4 + type: format + + # root mount + - device: vg0-root-format + id: vg0-root-mount + path: / + type: mount + + # LVM partition for SWAP + - id: vg0-swap + name: swap + type: lvm_partition + size: 8G + volgroup: vg0 + + # SWAP format + - volume: vg0-swap + id: vg0-swap-format + label: swap + fstype: swap + type: format + + # SWAP mount + - device: vg0-swap-format + id: vg0-swap-mount + path: '' + type: mount + + nics: *common-nics diff --git a/config/bundle.yaml b/config/bundle.yaml new file mode 100644 index 0000000..cbe8243 --- /dev/null +++ b/config/bundle.yaml @@ -0,0 +1,1503 @@ +# Copyright (c) 2017-2018 Canonical USA Inc. All rights reserved. +# +# Foundation HyperConverged +# +series: bionic +variables: + # https://wiki.ubuntu.com/OpenStack/CloudArchive + # packages for an LTS release come in a form of SRUs + # do not use cloud: for an LTS version as + # installation hooks will fail. Example: + openstack-origin: &openstack-origin distro + + openstack-region: &openstack-region RegionOne + + # !> Important 2 service + # containers/host adapt appropriately. + reserved-host-memory: &reserved-host-memory 512 + #ram-allocation-ratio: &ram-allocation-ratio 0.999999 # XXX bug 1613839 + ram-allocation-ratio: &ram-allocation-ratio 1.0 # now fixed + # changed from 4.0 as used on borehamwood 1 and adastral to 2.0 for borehamwood (003) env + cpu-allocation-ratio: &cpu-allocation-ratio 2.0 + + + # This is Management network, unrelated to OpenStack and other applications + # OAM - Operations, Administration and Maintenance + oam-space: &oam-space oam + + # This is OpenStack Admin network; for adminURL endpoints + admin-space: &admin-space oam + + # This is OpenStack Public network; for publicURL endpoints + #public-space: &public-space external + public-space: &public-space oam + + # This is OpenStack Internal network; for internalURL endpoints + internal-space: &internal-space oam + + # CEPH configuration + # CEPH access network + ceph-public-space: &ceph-public-space ceph-access + + # CEPH replication network + ceph-cluster-space: &ceph-cluster-space ceph-replica + + overlay-space: &overlay-space overlay + + # Workaround for 'only one default binding supported' + oam-space-constr: &oam-space-constr spaces=oam + ceph-access-constr: &ceph-access-constr spaces=ceph-access + combi-access-constr: &combi-access-constr spaces=ceph-access,oam + + # CEPH OSD and journal devices; temporary workaround for #1674148 + osd-devices: &osd-devices "/dev/sdb /dev/sdc" + + customize-failure-domain: &customize-failure-domain True + + # Expected OSD count is total number of OSD disks that will be part of Ceph cluster. + # Never set this number higher or much lower than the real number. 10-20% less than + # actual number is acceptable + expected-osd-count: &expected-osd-count 12 + expected-mon-count: &expected-mon-count 3 + + nagios-context: &nagios-context arif-nc01 + + # Various VIPs + aodh-vip: &aodh-vip "10.0.1.211" + cinder-vip: &cinder-vip "10.0.1.212" + dashboard-vip: &dashboard-vip "10.0.1.213" + glance-vip: &glance-vip "10.0.1.214" + heat-vip: &heat-vip "10.0.1.215" + keystone-vip: &keystone-vip "10.0.1.216" + mysql-vip: &mysql-vip "10.0.1.217" + neutron-api-vip: &neutron-api-vip "10.0.1.218" + nova-cc-vip: &nova-cc-vip "10.0.1.219" + gnocchi-vip: &gnocchi-vip "10.0.1.220" + contrail-vip: &contrail-api-vip "10.0.1.221" + vault-vip: &vault-vip "10.0.1.222" + + # NTP configuration + ntp-source: &ntp-source "192.168.1.11" + + # Add policy-routing to the external network + external-network-cidr: &external-network-cidr 192.168.1.0/24 + external-network-gateway: &external-network-gateway 192.168.1.249 + + # After bundle has been deployed, log in to Landscape server and create + # an account. In the account settings, set the Registration key and then + # configure landscape-client to use that registration-key: + # juju config landscape-client registration-key=$your_registration_key + + # Encryption At Rest + # removed for borehamwood 003 design as all storage shared and distributed on ceph + # ephemeral-device: &ephemeral-device /dev/disk/by-dname/ephemeral + + # DNS configuration + # This configuration for overlay networks. Usually domain should be set to something + # like "openstack.customername.lan." (notice . at the end), while cidr is for PTR + # records, so in most cases 24 is just fine (16 is another option) + # dns-domain: &dns-domain "openstack.customername.lan." + # dns-cidr: &dns-cidr 24 + # DNS server needs to be the same in the different charms to avoid conflict. + dns-servers: &dns-servers '192.168.1.13' + + # Mappings to provide connectivity to a physical network, used by neutron-gateway + # and possibly neutron-openvswitch, therefore do not configure an IP address for + # this port in MAAS. + data-port: &data-port "br-data:ens9" + bridge-mappings: &bridge-mappings 'physnet1:br-data' + +machines: + # Baremetals + # Control Nodes + "100": + constraints: tags=control + "101": + constraints: tags=control + "102": + constraints: tags=control + # LMA Nodes + "200": + constraints: tags=compute + "201": + constraints: tags=compute + "202": + constraints: tags=compute + # Landscape Nodes + "300": + constraints: tags=compute + "301": + constraints: tags=compute + "302": + constraints: tags=compute + # Contrail Nodes + "400": + constraints: tags=control + "401": + constraints: tags=control + "402": + constraints: tags=control +# "500": +# constraints: tags=compute +# "501": +# constraints: tags=compute +# "502": +# constraints: tags=compute +# "503": +# constraints: tags=compute +# "504": +# constraints: tags=compute +# "505": +# constraints: tags=compute +# "506": +# constraints: tags=compute +# "507": +# constraints: tags=compute +# "508": +# constraints: tags=compute + + # hyper-converged nova/ceph Nodes + "1000": + constraints: tags=compute + "1001": + constraints: tags=compute + "1002": + constraints: tags=compute + "1003": + constraints: tags=compute + "1004": + constraints: tags=compute + "1005": + constraints: tags=compute + +applications: + # HAcluster + hacluster-aodh: + charm: cs:hacluster + hacluster-cinder: + charm: cs:hacluster + hacluster-glance: + charm: cs:hacluster + hacluster-gnocchi: + charm: cs:hacluster + hacluster-horizon: + charm: cs:hacluster + hacluster-keystone: + charm: cs:hacluster + hacluster-neutron: + charm: cs:hacluster + hacluster-nova: + charm: cs:hacluster + hacluster-mysql: + charm: cs:hacluster + hacluster-heat: + charm: cs:hacluster + hacluster-vault: + charm: cs:hacluster + + # CPU governor applications + sysconfig-compute: + charm: cs:sysconfig + options: + enable-iommu: false + governor: "performance" + enable-pti: true + update-grub: true +# sysconfig-storage: +# charm: cs:sysconfig +# options: +# enable-iommu: true +# governor: "performance" +# enable-pti: true +# update-grub: true + sysconfig-control: + charm: cs:sysconfig + options: + enable-iommu: true + governor: "performance" + enable-pti: true + update-grub: true + + # bcache-tuning + #bcache-tuning: + # charm: cs:bcache-tuning + + # Ceph + ceph-mon: + charm: cs:ceph-mon + num_units: 3 + bindings: + "": *oam-space + public: *ceph-public-space + osd: *ceph-public-space + client: *ceph-public-space + admin: *ceph-public-space + cluster: *ceph-cluster-space + options: + expected-osd-count: *expected-osd-count + source: *openstack-origin + monitor-count: *expected-mon-count + customize-failure-domain: *customize-failure-domain + to: + - lxd:100 + - lxd:101 + - lxd:102 +# ceph-mon2: +# charm: cs:ceph-mon +# num_units: 3 +# bindings: +# "": *oam-space +# public: *ceph-public-space +# osd: *ceph-public-space +# client: *ceph-public-space +# admin: *ceph-public-space +# cluster: *ceph-cluster-space +# options: +# expected-osd-count: *expected-osd-count +# source: *openstack-origin +# monitor-count: *expected-mon-count +# customize-failure-domain: *customize-failure-domain +# to: +# - lxd:100 +# - lxd:101 +# - lxd:102 + ceph-osd: + charm: cs:ceph-osd + num_units: 6 + bindings: + "": *oam-space + public: *ceph-public-space + cluster: *ceph-cluster-space + secrets-storage: *internal-space + mon: *ceph-public-space + options: + osd-devices: *osd-devices + source: *openstack-origin + customize-failure-domain: *customize-failure-domain + autotune: false + aa-profile-mode: complain + bluestore: true + osd-encrypt: True + osd-encrypt-keymanager: vault + to: + - '1000' + - '1001' + - '1002' + - '1003' + - '1004' + - '1005' +# ceph-osd2: +# charm: cs:ceph-osd +# num_units: 6 +# bindings: +# "": *oam-space +# public: *ceph-public-space +# cluster: *ceph-cluster-space +# secrets-storage: *internal-space +# mon: *ceph-public-space +# options: +# osd-devices: *osd-devices +# source: *openstack-origin +# customize-failure-domain: *customize-failure-domain +# autotune: false +# aa-profile-mode: complain +# bluestore: true +# osd-encrypt: True +# osd-encrypt-keymanager: vault +# to: +# - '1000' +# - '1001' +# - '1002' +# - '1003' +# - '1004' +# - '1005' + # OpenStack + aodh: + charm: cs:aodh + num_units: 3 + bindings: + "": *oam-space + public: *public-space + admin: *admin-space + internal: *internal-space + shared-db: *internal-space + options: + worker-multiplier: *worker-multiplier + openstack-origin: *openstack-origin + region: *openstack-region + vip: *aodh-vip + use-internal-endpoints: True + to: + - lxd:200 + - lxd:201 + - lxd:202 + gnocchi: + charm: cs:gnocchi + num_units: 3 + bindings: + "": *oam-space + public: *public-space + admin: *admin-space + internal: *internal-space + shared-db: *internal-space + storage-ceph: *ceph-public-space + coordinator-memcached: *internal-space + options: + worker-multiplier: *worker-multiplier + openstack-origin: *openstack-origin + region: *openstack-region + vip: *gnocchi-vip + use-internal-endpoints: True + to: + - lxd:200 + - lxd:201 + - lxd:202 + cinder: + charm: cs:cinder + num_units: 3 + constraints: *combi-access-constr + bindings: + "": *oam-space + public: *public-space + admin: *admin-space + internal: *internal-space + shared-db: *internal-space + options: + worker-multiplier: *worker-multiplier + openstack-origin: *openstack-origin + block-device: None + glance-api-version: 2 + vip: *cinder-vip + use-internal-endpoints: True + region: *openstack-region + enabled-services: "api,scheduler,volume" + to: + - lxd:100 + - lxd:101 + - lxd:102 +# cinder2: +# charm: cs:cinder +# num_units: 3 +# constraints: *combi-access-constr +# bindings: +# "": *oam-space +# public: *public-space +# admin: *admin-space +# internal: *internal-space +# shared-db: *internal-space +# options: +# worker-multiplier: *worker-multiplier +# openstack-origin: *openstack-origin +# block-device: None +# glance-api-version: 2 +# vip: *cinder-vip +# use-internal-endpoints: True +# region: *openstack-region +# enabled-services: "backup" +# to: +# - as1-maas-node-07 +# - as2-maas-node-07 +# - as3-maas-node-07 +# cinder3: +# charm: cs:cinder +# num_units: 3 +# constraints: *combi-access-constr +# bindings: +# "": *oam-space +# public: *public-space +# admin: *admin-space +# internal: *internal-space +# shared-db: *internal-space +# options: +# worker-multiplier: *worker-multiplier +# openstack-origin: *openstack-origin +# block-device: None +# glance-api-version: 2 +# vip: *cinder-vip +# use-internal-endpoints: True +# region: *openstack-region +# enabled-services: "backup" +# to: +# - lxd:200 +# - lxd:201 +# - lxd:202 +# cinder-backup: +# charm: cs:cinder-backup + cinder-ceph: + charm: cs:cinder-ceph + options: + restrict-ceph-pools: False + glance: + charm: cs:glance + constraints: *combi-access-constr + bindings: + "": *oam-space + public: *public-space + admin: *admin-space + internal: *internal-space + shared-db: *internal-space + options: + worker-multiplier: *worker-multiplier + openstack-origin: *openstack-origin + vip: *glance-vip + use-internal-endpoints: True + restrict-ceph-pools: False + region: *openstack-region + num_units: 3 + to: + - lxd:100 + - lxd:101 + - lxd:102 + keystone: + charm: cs:keystone + num_units: 3 + bindings: + "": *oam-space + public: *public-space + admin: *admin-space + internal: *internal-space + shared-db: *internal-space + options: + worker-multiplier: *worker-multiplier + openstack-origin: *openstack-origin + vip: *keystone-vip + region: *openstack-region + preferred-api-version: 3 + token-provider: 'fernet' + to: + - lxd:100 + - lxd:101 + - lxd:102 + mysql: + charm: cs:percona-cluster + num_units: 3 + bindings: + "": *oam-space + cluster: *internal-space + shared-db: *internal-space + ha: *internal-space + db: *internal-space + db-admin: *internal-space + options: + source: *openstack-origin + #innodb-buffer-pool-size: 16G + vip: *mysql-vip + wait-timeout: 3600 + min-cluster-size: 3 + enable-binlogs: False + performance-schema: True + max-connections: *mysql-connections + tuning-level: *mysql-tuning-level + wsrep-slave-threads: 48 + to: + - lxd:100 + - lxd:101 + - lxd:102 + neutron-api: + charm: cs:neutron-api + num_units: 3 + bindings: + "": *oam-space + public: *public-space + admin: *admin-space + internal: *internal-space + shared-db: *internal-space + #neutron-plugin-api-subordinate: *overlay-space + options: + worker-multiplier: *worker-multiplier + openstack-origin: *openstack-origin + region: *openstack-region + neutron-security-groups: True + #overlay-network-type: vxlan gre + overlay-network-type: 'vxlan' + use-internal-endpoints: True + vip: *neutron-api-vip + enable-l3ha: True + dhcp-agents-per-network: 2 + enable-ml2-port-security: True + default-tenant-network-type: vxlan + l2-population: True + #global-physnet-mtu: 9000 + to: + - lxd:100 + - lxd:101 + - lxd:102 + neutron-gateway: + charm: cs:neutron-gateway + num_units: 3 + bindings: + "": *oam-space + data: *overlay-space + options: + worker-multiplier: *worker-multiplier + openstack-origin: *openstack-origin + bridge-mappings: *bridge-mappings + data-port: *data-port + aa-profile-mode: enforce + dns-servers: *dns-servers + customize-failure-domain: *customize-failure-domain + to: + - 100 + - 101 + - 102 + neutron-openvswitch: + charm: cs:neutron-openvswitch + num_units: 0 + bindings: + "": *oam-space + data: *overlay-space + options: + worker-multiplier: *worker-multiplier + bridge-mappings: *bridge-mappings + prevent-arp-spoofing: True + firewall-driver: openvswitch + dns-servers: *dns-servers + data-port: *data-port + nova-cloud-controller: + charm: cs:nova-cloud-controller + num_units: 3 + bindings: + "": *oam-space + public: *public-space + admin: *admin-space + internal: *internal-space + shared-db: *internal-space + memcache: *internal-space + options: + worker-multiplier: *worker-multiplier + openstack-origin: *openstack-origin + network-manager: Neutron + region: *openstack-region + vip: *nova-cc-vip + console-access-protocol: novnc + console-proxy-ip: local + use-internal-endpoints: True + ram-allocation-ratio: *ram-allocation-ratio + cpu-allocation-ratio: *cpu-allocation-ratio + to: + - lxd:100 + - lxd:101 + - lxd:102 + nova-compute: + charm: cs:nova-compute + num_units: 6 + bindings: + "": *oam-space + internal: *internal-space + options: + openstack-origin: *openstack-origin + enable-live-migration: True + enable-resize: True + migration-auth-type: ssh + use-internal-endpoints: True + libvirt-image-backend: rbd + restrict-ceph-pools: False + aa-profile-mode: enforce + virt-type: kvm + customize-failure-domain: *customize-failure-domain + reserved-host-memory: *reserved-host-memory + #cpu-mode: custom + #cpu-model: 'Skylake-Server-IBRS' + to: + - 1000 + - 1001 + - 1002 + - 1003 + - 1004 + - 1005 + ntp: + charm: cs:ntp + options: + source: *ntp-source + pools: '' + openstack-dashboard: + charm: cs:openstack-dashboard + num_units: 3 + constraints: *oam-space-constr + bindings: + "": *public-space + shared-db: *internal-space + options: + openstack-origin: *openstack-origin + webroot: "/" + secret: "encryptcookieswithme" + vip: *dashboard-vip + neutron-network-l3ha: True + neutron-network-lb: True + neutron-network-firewall: False + cinder-backup: False + password-retrieve: True + endpoint-type: 'publicURL' + to: + - lxd:100 + - lxd:101 + - lxd:102 + rabbitmq-server: + charm: cs:rabbitmq-server + bindings: + "": *oam-space + amqp: *internal-space + cluster: *internal-space + options: + source: *openstack-origin + min-cluster-size: 3 + cluster-partition-handling: ignore + num_units: 3 + to: + - lxd:100 + - lxd:101 + - lxd:102 + heat: + charm: cs:heat + num_units: 3 + bindings: + "": *oam-space + public: *public-space + admin: *admin-space + internal: *internal-space + shared-db: *internal-space + #heat-plugin-subordinate: *overlay-space + options: + worker-multiplier: *worker-multiplier + openstack-origin: *openstack-origin + region: *openstack-region + vip: *heat-vip + use-internal-endpoints: True + config-flags: "max_nested_stack_depth=20" + to: + - lxd:100 + - lxd:101 + - lxd:102 + memcached: + charm: cs:memcached + num_units: 3 + constraints: *oam-space-constr + bindings: + "": *internal-space + cache: *internal-space + options: + allow-ufw-ip6-softfail: True + to: + - lxd:100 + - lxd:101 + - lxd:102 + +# LMA stack applications + landscape-server: + charm: cs:landscape-server + bindings: + "": *oam-space + options: + install_sources: |- + - 'deb http://ppa.launchpad.net/landscape/19.10/ubuntu bionic main' + install_keys: |- + - | + -----BEGIN PGP PUBLIC KEY BLOCK----- + Version: SKS 1.1.6 + Comment: Hostname: keyserver.ubuntu.com + mI0ESXN/egEEAOgRYISU9dnQm4BB5ZEEwKT+NKUDNd/DhMYdtBMw9Yk7S5cyoqpbtwoPJVzK + AXxq+ng5e3yYypSv98pLMr5UF09FGaeyGlD4s1uaVFWkFCO4jsTg7pWIY6qzO/jMxB5+Yu/G + 0GjWQMNKxFk0oHMa0PhNBZtdPacVz65mOVmCsh/lABEBAAG0G0xhdW5jaHBhZCBQUEEgZm9y + IExhbmRzY2FwZYi2BBMBAgAgBQJJc396AhsDBgsJCAcDAgQVAggDBBYCAwECHgECF4AACgkQ + boWobkZStOb+rwP+ONKUWeX+MTIPqGWkknBPV7jm8nyyIUojC4IhS+9YR6GYnn0hMABSkEHm + IV73feKmrT2GESYI1UdYeKiOkWsPN/JyBk+eTvKet0qsw5TluqiHSW+LEi/+zUyrS3dDMX3o + yaLgYa+UkjIyxnaKLkQuCiS+D+fYwnJulIkhaKObtdE= + =UwRd + -----END PGP PUBLIC KEY BLOCK----- + license-file: include-base64://../secrets/ldslicense.txt + #root-url: http://landscape.example.com/ + num_units: 3 + to: + - 300 + - 301 + - 302 + landscape-rabbitmq-server: + charm: cs:rabbitmq-server + bindings: + "": *oam-space + cluster: *oam-space + amqp: *oam-space + num_units: 3 + options: + source: *openstack-origin + min-cluster-size: 3 + cluster-partition-handling: ignore # TODO: GABOR discuss with BT + to: + - lxd:300 + - lxd:301 + - lxd:302 + landscape-postgresql: + charm: cs:postgresql + bindings: + "": *oam-space + options: + extra_packages: python-apt postgresql-contrib postgresql-.*-debversion postgresql-plpython-.* + max_connections: 500 + max_prepared_transactions: 500 + num_units: 2 + to: + - lxd:300 + - lxd:301 + landscape-haproxy: + charm: cs:haproxy + bindings: + "": *oam-space + options: + default_timeouts: "queue 60000, connect 5000, client 120000, server 120000" + services: "" + source: backports + ssl_cert: SELFSIGNED + global_default_bind_options: "no-tlsv10" + num_units: 1 + to: + - lxd:302 + graylog: + charm: cs:graylog + series: bionic + bindings: + "": *oam-space + num_units: 1 + options: + jvm_heap_size: '1G' + rest_transport_uri: http://graylog.example.com:9001 + index_rotation_period: PT3H + to: + - 200 + graylog-mongodb: + charm: cs:mongodb + bindings: + "": *oam-space + num_units: 1 + options: + nagios_context: *nagios-context + to: + - lxd:200 + elasticsearch: + charm: cs:elasticsearch + bindings: + "": *oam-space + num_units: 2 + options: + firewall_enabled: False + es-heap-size: 2 + gpg-key: | + -----BEGIN PGP PUBLIC KEY BLOCK----- + Version: SKS 1.1.6 + Comment: Hostname: keyserver.ubuntu.com + + mQENBFI3HsoBCADXDtbNJnxbPqB1vDNtCsqhe49vFYsZN9IOZsZXgp7aHjh6CJBDA+bGFOwy + hbd7at35jQjWAw1O3cfYsKAmFy+Ar3LHCMkV3oZspJACTIgCrwnkic/9CUliQe324qvObU2Q + RtP4Fl0zWcfb/S8UYzWXWIFuJqMvE9MaRY1bwUBvzoqavLGZj3SF1SPO+TB5QrHkrQHBsmX+ + Jda6d4Ylt8/t6CvMwgQNlrlzIO9WT+YN6zS+sqHd1YK/aY5qhoLNhp9G/HxhcSVCkLq8SStj + 1ZZ1S9juBPoXV1ZWNbxFNGwOh/NYGldD2kmBf3YgCqeLzHahsAEpvAm8TBa7Q9W21C8vABEB + AAG0RUVsYXN0aWNzZWFyY2ggKEVsYXN0aWNzZWFyY2ggU2lnbmluZyBLZXkpIDxkZXZfb3Bz + QGVsYXN0aWNzZWFyY2gub3JnPokBOAQTAQIAIgUCUjceygIbAwYLCQgHAwIGFQgCCQoLBBYC + AwECHgECF4AACgkQ0n1mbNiOQrRzjAgAlTUQ1mgo3nK6BGXbj4XAJvuZDG0HILiUt+pPnz75 + nsf0NWhqR4yGFlmpuctgCmTD+HzYtV9fp9qW/bwVuJCNtKXk3sdzYABY+Yl0Cez/7C2GuGCO + lbn0luCNT9BxJnh4mC9h/cKI3y5jvZ7wavwe41teqG14V+EoFSn3NPKmTxcDTFrV7SmVPxCB + cQze00cJhprKxkuZMPPVqpBS+JfDQtzUQD/LSFfhHj9eD+Xe8d7sw+XvxB2aN4gnTlRzjL1n + TRp0h2/IOGkqYfIG9rWmSLNlxhB2t+c0RsjdGM4/eRlPWylFbVMc5pmDpItrkWSnzBfkmXL3 + vO2X3WvwmSFiQbkBDQRSNx7KAQgA5JUlzcMW5/cuyZR8alSacKqhSbvoSqqbzHKcUQZmlzNM + KGTABFG1yRx9r+wa/fvqP6OTRzRDvVS/cycws8YX7Ddum7x8uI95b9ye1/Xy5noPEm8cD+hp + lnpU+PBQZJ5XJ2I+1l9Nixx47wPGXeClLqcdn0ayd+v+Rwf3/XUJrvccG2YZUiQ4jWZkoxsA + 07xx7Bj+Lt8/FKG7sHRFvePFU0ZS6JFx9GJqjSBbHRRkam+4emW3uWgVfZxuwcUCn1ayNgRt + KiFv9jQrg2TIWEvzYx9tywTCxc+FFMWAlbCzi+m4WD+QUWWfDQ009U/WM0ks0KwwEwSk/UDu + ToxGnKU2dQARAQABiQEfBBgBAgAJBQJSNx7KAhsMAAoJENJ9ZmzYjkK0c3MIAIE9hAR20mqJ + WLcsxLtrRs6uNF1VrpB+4n/55QU7oxA1iVBO6IFu4qgsF12JTavnJ5MLaETlggXY+zDef9sy + TPXoQctpzcaNVDmedwo1SiL03uMoblOvWpMR/Y0j6rm7IgrMWUDXDPvoPGjMl2q1iTeyHkMZ + EyUJ8SKsaHh4jV9wp9KmC8C+9CwMukL7vM5w8cgvJoAwsp3Fn59AxWthN3XJYcnMfStkIuWg + R7U2r+a210W6vnUxU4oN0PmMcursYPyeV0NX/KQeUeNMwGTFB6QHS/anRaGQewijkrYYoTNt + fllxIu9XYmiBERQ/qPDlGRlOgVTd9xUfHFkzB52c70E= + =92oX + -----END PGP PUBLIC KEY BLOCK----- + to: + - 201 + - 202 + filebeat: + charm: cs:filebeat + options: + logpath: "/var/log/*.log /var/log/*/*.log /var/log/syslog" + install_keys: |- + - | + -----BEGIN PGP PUBLIC KEY BLOCK----- + Version: SKS 1.1.6 + Comment: Hostname: keyserver.ubuntu.com + mQENBFI3HsoBCADXDtbNJnxbPqB1vDNtCsqhe49vFYsZN9IOZsZXgp7aHjh6CJBDA+bGFOwy + hbd7at35jQjWAw1O3cfYsKAmFy+Ar3LHCMkV3oZspJACTIgCrwnkic/9CUliQe324qvObU2Q + RtP4Fl0zWcfb/S8UYzWXWIFuJqMvE9MaRY1bwUBvzoqavLGZj3SF1SPO+TB5QrHkrQHBsmX+ + Jda6d4Ylt8/t6CvMwgQNlrlzIO9WT+YN6zS+sqHd1YK/aY5qhoLNhp9G/HxhcSVCkLq8SStj + 1ZZ1S9juBPoXV1ZWNbxFNGwOh/NYGldD2kmBf3YgCqeLzHahsAEpvAm8TBa7Q9W21C8vABEB + AAG0RUVsYXN0aWNzZWFyY2ggKEVsYXN0aWNzZWFyY2ggU2lnbmluZyBLZXkpIDxkZXZfb3Bz + QGVsYXN0aWNzZWFyY2gub3JnPokBOAQTAQIAIgUCUjceygIbAwYLCQgHAwIGFQgCCQoLBBYC + AwECHgECF4AACgkQ0n1mbNiOQrRzjAgAlTUQ1mgo3nK6BGXbj4XAJvuZDG0HILiUt+pPnz75 + nsf0NWhqR4yGFlmpuctgCmTD+HzYtV9fp9qW/bwVuJCNtKXk3sdzYABY+Yl0Cez/7C2GuGCO + lbn0luCNT9BxJnh4mC9h/cKI3y5jvZ7wavwe41teqG14V+EoFSn3NPKmTxcDTFrV7SmVPxCB + cQze00cJhprKxkuZMPPVqpBS+JfDQtzUQD/LSFfhHj9eD+Xe8d7sw+XvxB2aN4gnTlRzjL1n + TRp0h2/IOGkqYfIG9rWmSLNlxhB2t+c0RsjdGM4/eRlPWylFbVMc5pmDpItrkWSnzBfkmXL3 + vO2X3WvwmSFiQbkBDQRSNx7KAQgA5JUlzcMW5/cuyZR8alSacKqhSbvoSqqbzHKcUQZmlzNM + KGTABFG1yRx9r+wa/fvqP6OTRzRDvVS/cycws8YX7Ddum7x8uI95b9ye1/Xy5noPEm8cD+hp + lnpU+PBQZJ5XJ2I+1l9Nixx47wPGXeClLqcdn0ayd+v+Rwf3/XUJrvccG2YZUiQ4jWZkoxsA + 07xx7Bj+Lt8/FKG7sHRFvePFU0ZS6JFx9GJqjSBbHRRkam+4emW3uWgVfZxuwcUCn1ayNgRt + KiFv9jQrg2TIWEvzYx9tywTCxc+FFMWAlbCzi+m4WD+QUWWfDQ009U/WM0ks0KwwEwSk/UDu + ToxGnKU2dQARAQABiQEfBBgBAgAJBQJSNx7KAhsMAAoJENJ9ZmzYjkK0c3MIAIE9hAR20mqJ + WLcsxLtrRs6uNF1VrpB+4n/55QU7oxA1iVBO6IFu4qgsF12JTavnJ5MLaETlggXY+zDef9sy + TPXoQctpzcaNVDmedwo1SiL03uMoblOvWpMR/Y0j6rm7IgrMWUDXDPvoPGjMl2q1iTeyHkMZ + EyUJ8SKsaHh4jV9wp9KmC8C+9CwMukL7vM5w8cgvJoAwsp3Fn59AxWthN3XJYcnMfStkIuWg + R7U2r+a210W6vnUxU4oN0PmMcursYPyeV0NX/KQeUeNMwGTFB6QHS/anRaGQewijkrYYoTNt + fllxIu9XYmiBERQ/qPDlGRlOgVTd9xUfHFkzB52c70E= + =92oX + -----END PGP PUBLIC KEY BLOCK----- + install_sources: | + - 'deb https://artifacts.elastic.co/packages/5.x/apt stable main' + nagios: + charm: cs:nagios + series: bionic + bindings: + "": *oam-space + num_units: 1 + options: + enable_livestatus: true + check_timeout: 50 + to: + - lxd:200 + openstack-service-checks: + charm: cs:~canonical-bootstack/openstack-service-checks + constraints: *oam-space-constr + bindings: + "": *public-space + identity-credentials: *internal-space + num_units: 1 + to: + - lxd:200 + nrpe-host: + charm: cs:nrpe + bindings: + monitors: *oam-space + options: + nagios_hostname_type: "host" + nagios_host_context: *nagios-context + xfs_errors: "30" + netlinks: | + - bond0 mtu:1500 speed:1000 + - bond1 mtu:9000 speed:50000 + - eno1 mtu:1500 speed:1000 + - eno2 mtu:1500 speed:1000 + - enp25s0f0 mtu:9000 speed:25000 + - enp25s0f1 mtu:9000 speed:25000 + nrpe-container: + charm: cs:nrpe + bindings: + monitors: *oam-space + options: + nagios_hostname_type: unit + nagios_host_context: *nagios-context + disk_root: '' + load: '' + swap: '' + swap_activity: '' + mem: '' + landscape-client: + charm: cs:landscape-client + options: + account-name: "standalone" + origin: | + deb http://ppa.launchpad.net/landscape/19.10/ubuntu bionic main|-----BEGIN PGP PUBLIC KEY BLOCK----- + Version: SKS 1.1.6 + Comment: Hostname: keyserver.ubuntu.com + mI0ESXN/egEEAOgRYISU9dnQm4BB5ZEEwKT+NKUDNd/DhMYdtBMw9Yk7S5cyoqpbtwoPJVzK + AXxq+ng5e3yYypSv98pLMr5UF09FGaeyGlD4s1uaVFWkFCO4jsTg7pWIY6qzO/jMxB5+Yu/G + 0GjWQMNKxFk0oHMa0PhNBZtdPacVz65mOVmCsh/lABEBAAG0G0xhdW5jaHBhZCBQUEEgZm9y + IExhbmRzY2FwZYi2BBMBAgAgBQJJc396AhsDBgsJCAcDAgQVAggDBBYCAwECHgECF4AACgkQ + boWobkZStOb+rwP+ONKUWeX+MTIPqGWkknBPV7jm8nyyIUojC4IhS+9YR6GYnn0hMABSkEHm + IV73feKmrT2GESYI1UdYeKiOkWsPN/JyBk+eTvKet0qsw5TluqiHSW+LEi/+zUyrS3dDMX3o + yaLgYa+UkjIyxnaKLkQuCiS+D+fYwnJulIkhaKObtdE= + =UwRd + -----END PGP PUBLIC KEY BLOCK----- + #registration-key: include-file://../secrets/landscape-registration.txt + disable-unattended-upgrades: True + # the reason that this has to be done manually is because Landscape server needs an admin user to be + # created first (manual step, see above). Once the user and registration key is set configure the clients' url and ping-url options. + #ping-url: http://landscape.example.com/ping + #url: https://landscape.example.com/message-system + prometheus: + charm: cs:prometheus2 + series: bionic + bindings: + "": *oam-space + num_units: 1 + to: + - lxd:201 + prometheus-openstack-exporter: + charm: cs:prometheus-openstack-exporter + constraints: *oam-space-constr + bindings: + "": *public-space + identity-credentials: *internal-space + prometheus-openstack-exporter-service: *oam-space + num_units: 1 + to: + - lxd:201 + grafana: + charm: cs:~prometheus-charmers/grafana + series: bionic + bindings: + "": *oam-space + options: + port: "3000" + install_method: snap + num_units: 1 + to: + - lxd:201 + telegraf: + charm: cs:telegraf + options: + # Contrail services are listening on 8094 + socket_listener_port: '8095' + install_sources: | + - 'deb http://ppa.launchpad.net/telegraf-devs/ppa/ubuntu bionic main' + install_keys: |- + - | + -----BEGIN PGP PUBLIC KEY BLOCK----- + Version: SKS 1.1.6 + Comment: Hostname: keyserver.ubuntu.com + mQINBFcVSuIBEAC80aj0tAQ6+NhGV/bkSwu6Oj+BpDR50Be3uBv7ttdtvChL5zHTnaxjdK3h + LKSyrDLlmSOkffQ2uO7CxvqeF09MsHhyvrDDx0EY54//xxoAB++PoB2OQqmqldg3Al5Hp4Dz + rllV5CIX5PD8NGX8UpO3HXk5wEwn9G81l8cia3vPveU82EIkHMiJGpk6+L86OMlwXzxkSI3M + xXgNFKQc+ELDYLvGSseYC9vPN3kdmFoo/UjznPPE4fxr4bXit3N8Abl1jYjBa0x6SWkK1BAb + s8w3BXtvyk90z9Oyme69wPD4zAYfFp+kN2nDmTDBMtNCyMu9oatdI5SukMNK4Lcm8eAE6VNs + 04j7BKvGk9+17M8WP9Pw8nIisOwScS9gUlJlLUpnBaJ+sxoOvGQ4mzZxYMKzJh0E58aEX3bS + AyzQfsae8bZLNOTcgotyzzIDJFF9npzu3wmKjeOt/706p4LiDqKUbQK6cI+QcJ/y80ZUK8pB + M043ttSHWLmTBFX2drp6zQGae9+02fX89ZD+5c+MPlubJMYCCKkvQT4OssHfC+dVDQ66rwUy + OObrzsVgikdpIxQVitL3J+Dms56xAkdFfoo+qdxxdv9S/eakc5mfavc/4WVvmFDaJiqJnJRR + Ryw1zApRtuweEEdVn8niy1mahoKpWaw1pTI4AazjWI6xJH1JyQARAQABtB9MYXVuY2hwYWQg + UFBBIGZvciBUZWxlZ3JhZiBEZXZziQI4BBMBAgAiBQJXFUriAhsDBgsJCAcDAgYVCAIJCgsE + FgIDAQIeAQIXgAAKCRDxDL4ByUQG9UgbEACa4IzdeYxH/S5I6MrZfvWNo/JTZ/MZWDD+QlMW + 60ThAemCUSE+NJvZZ1q7ovGFpYnHJT9GQXOwJAX1quDUqyM1uXNmLlOyIVNnmjUTINoLhw2V + iC8E7dMWC9w4Na2fKezmNHH00kNl43ncstIjjZ3pLnDGYm1y0ItiCUcTRgHhx2cUZ/vStz1S + Pdqj4P3i8vuspoYJ2T3VPlM/0G+u9Yjuy3Uzu9RugOyO3UJPoi3+4O2VTNosSBy5MILVCp49 + eigyFVGpq5sT/c86qd1zqmsNWEubrlzDfETS4LMj9epr46ZKPXGQkeryt1m2Oe0HkIdNZ+IQ + 5p+i9fnEy7/1uKTXWQYsg2UWsLA2PvTvwY8JxxMhUFgv12q2w7STntqJyi9PLItYNtbtKoS3 + XZCCMqQLCWMXHY+2ol6rRSfs06H/wzlR8LjDaEXkDVuDmqMtcbgTboZYblsGxst7I/Y4Wgfi + J52uiIyobQ69uJbG0XeRTLZ3WyrBkopEsTX/+sQjVqbADXYU4hBVDgnCf2uN/5dcwSEvDj8/ + +WsToAfEJkscRBsQjTLVzf+eFqHLrbqz/yoYIqBc//IJMBSbxIf5mrOHHLdbOuMCB6PVwpTI + vLFOSDNPuVDX+S1goA8KJTnXpm8jWDynn3XaXx3AlYw4iZ0ETSgQLQLRd6JuPOEGXsGdBA== + =ufaX + -----END PGP PUBLIC KEY BLOCK----- + extra_plugins: | + [[inputs.exec]] + commands = [ "/usr/bin/awk '{print int($1)}' /proc/uptime" ] + name_override = "exec_uptime" + data_format = "value" + bindings: + # overrides private-address exposed to prometheus + prometheus-client: *oam-space + telegraf-prometheus: + charm: cs:telegraf + bindings: + # overrides private-address exposed to prometheus + prometheus-client: *oam-space + options: + install_sources: | + - 'deb http://ppa.launchpad.net/telegraf-devs/ppa/ubuntu bionic main' + install_keys: |- + - | + -----BEGIN PGP PUBLIC KEY BLOCK----- + Version: SKS 1.1.6 + Comment: Hostname: keyserver.ubuntu.com + mQINBFcVSuIBEAC80aj0tAQ6+NhGV/bkSwu6Oj+BpDR50Be3uBv7ttdtvChL5zHTnaxjdK3h + LKSyrDLlmSOkffQ2uO7CxvqeF09MsHhyvrDDx0EY54//xxoAB++PoB2OQqmqldg3Al5Hp4Dz + rllV5CIX5PD8NGX8UpO3HXk5wEwn9G81l8cia3vPveU82EIkHMiJGpk6+L86OMlwXzxkSI3M + xXgNFKQc+ELDYLvGSseYC9vPN3kdmFoo/UjznPPE4fxr4bXit3N8Abl1jYjBa0x6SWkK1BAb + s8w3BXtvyk90z9Oyme69wPD4zAYfFp+kN2nDmTDBMtNCyMu9oatdI5SukMNK4Lcm8eAE6VNs + 04j7BKvGk9+17M8WP9Pw8nIisOwScS9gUlJlLUpnBaJ+sxoOvGQ4mzZxYMKzJh0E58aEX3bS + AyzQfsae8bZLNOTcgotyzzIDJFF9npzu3wmKjeOt/706p4LiDqKUbQK6cI+QcJ/y80ZUK8pB + M043ttSHWLmTBFX2drp6zQGae9+02fX89ZD+5c+MPlubJMYCCKkvQT4OssHfC+dVDQ66rwUy + OObrzsVgikdpIxQVitL3J+Dms56xAkdFfoo+qdxxdv9S/eakc5mfavc/4WVvmFDaJiqJnJRR + Ryw1zApRtuweEEdVn8niy1mahoKpWaw1pTI4AazjWI6xJH1JyQARAQABtB9MYXVuY2hwYWQg + UFBBIGZvciBUZWxlZ3JhZiBEZXZziQI4BBMBAgAiBQJXFUriAhsDBgsJCAcDAgYVCAIJCgsE + FgIDAQIeAQIXgAAKCRDxDL4ByUQG9UgbEACa4IzdeYxH/S5I6MrZfvWNo/JTZ/MZWDD+QlMW + 60ThAemCUSE+NJvZZ1q7ovGFpYnHJT9GQXOwJAX1quDUqyM1uXNmLlOyIVNnmjUTINoLhw2V + iC8E7dMWC9w4Na2fKezmNHH00kNl43ncstIjjZ3pLnDGYm1y0ItiCUcTRgHhx2cUZ/vStz1S + Pdqj4P3i8vuspoYJ2T3VPlM/0G+u9Yjuy3Uzu9RugOyO3UJPoi3+4O2VTNosSBy5MILVCp49 + eigyFVGpq5sT/c86qd1zqmsNWEubrlzDfETS4LMj9epr46ZKPXGQkeryt1m2Oe0HkIdNZ+IQ + 5p+i9fnEy7/1uKTXWQYsg2UWsLA2PvTvwY8JxxMhUFgv12q2w7STntqJyi9PLItYNtbtKoS3 + XZCCMqQLCWMXHY+2ol6rRSfs06H/wzlR8LjDaEXkDVuDmqMtcbgTboZYblsGxst7I/Y4Wgfi + J52uiIyobQ69uJbG0XeRTLZ3WyrBkopEsTX/+sQjVqbADXYU4hBVDgnCf2uN/5dcwSEvDj8/ + +WsToAfEJkscRBsQjTLVzf+eFqHLrbqz/yoYIqBc//IJMBSbxIf5mrOHHLdbOuMCB6PVwpTI + vLFOSDNPuVDX+S1goA8KJTnXpm8jWDynn3XaXx3AlYw4iZ0ETSgQLQLRd6JuPOEGXsGdBA== + =ufaX + -----END PGP PUBLIC KEY BLOCK----- +# canonical-livepatch: +# charm: cs:canonical-livepatch +# options: +# livepatch_key: include-file://../secrets/livepatch-key.txt +# livepatch_proxy: *snap-proxy +# #livepatch_proxy: 'http://10.2.65.7:8080' +# thruk-agent: +# charm: cs:thruk-agent +# series: bionic +# options: +# source: 'deb http://ppa.launchpad.net/canonical-bootstack/thruk/ubuntu bionic main' +# key: | +# -----BEGIN PGP PUBLIC KEY BLOCK----- +# Version: SKS 1.1.6 +# Comment: Hostname: keyserver.ubuntu.com +# mQINBFQSRaQBEADDAtFnmi0w6ddIoR5olNu2778ACItGLtLPmlKTHJUjbs26nLZQcp5OY2DR +# cE03k55eXy7mn1aSxQaIqbC6lSPzpy+d1RTXMJmIJcEuyJKmJ2XfS9TgdhS3hrYmmNuFnBqp +# xc8FAqDnD/BnlF1suhgLf0mxiEZaTev5/ps3f/Ma8RK5ev5rM3ou/8iLewXlXBH83lf2OnzV +# BuYeAc/ikAnSg7dxyI26RMqdPi60NC67AVYqEddg8XoJ7zppUkvH4F+SlbgeadwEj6tjsOO3 +# S/CII9AuSyUbkxm10HHKh3WiKgd8sUWmOvMwTow7NkThlydzDiyIS+WBCfoMWdogqTER+7wX +# tfpR4Bo84ZJAx9ksi8YBidx1gCn6jgebkB4xeel7BTwoIAZL+ShWyYRCSo++DQneE4LkhPr8 +# 8V2+/VQbkXWIcyNagEA8mTJtkXgk3Pjalumt2TyR95/pxodN1+bVd2scoT4OMlAtKTZISwXs +# evYNo8Z6/ymFuSmtqYGGKA11vpao/OJfF4dvVkdArQ1gxgxhFnZyekZlwD81uC6hT/aTqiz6 +# 9nSYvVZsnQJcPE2hjEZ+Fk3x/A3NGGQrorICRFuoLzjTFAoeOnhdTaMIQzwXg4bdYCNv4j8P +# PJkvF8EPi1lgcOZ1k1Ng4DRSu1EkHGs3i50h4nyVScKEaaDtRwARAQABtCVMYXVuY2hwYWQg +# UFBBIGZvciBDYW5vbmljYWwgQm9vdHN0YWNriQI4BBMBAgAiBQJUEkWkAhsDBgsJCAcDAgYV +# CAIJCgsEFgIDAQIeAQIXgAAKCRBLmoF0eiB1QuDOD/wLwZrtJOSm1W7Gkm5Qj5djkXi7b8mc +# M4vS2fbxdZJjE+KRqxOHGdK68CT8RyUCfl13+RLyA45UxsNSoGmdnTcc7LUJbihxy92WgzF5 +# saJ1ObTMge/avS8kJ7l1B0xS3hue5GXfyVYcYlXV6gD53Kfu03z619PE2rmukm1YtyRWPQho +# okr4kNIJbAqG8LR0GnF0CKt9oq5bIs06LvBm2cbFa9txeDOZcLMKfgMOda3Ju7U6k56MYl4a +# sUUP8oXehcvbLx9nsOT4A4XHLj+yOTuXGsTXvn+M6NXODuHj3cN0OvVN+o6/6kjyVuWJqONr +# IdJ5knIWx6UKoWXzRdcqbsSyDpyuUjOFAPyQdQ3zs1DL9vJbOUasQOilR+YVX+ULN9Q17GkK +# IwZc68b9bDZQRtJi2bOhorWamHKZuEKw95lCEHOms/C4Lw04y7sPnXV0MZejXfn/X4N5BELb +# ItSPhoe2IBrh9p1W7CMvfkvjO62nM6oqh6vdKmgW4Im/PG+7DYpLAIHY+C0WsbI2BKDTHNYu +# VKBmUjgMwfz+peks7pJBUgT74XR954vnOvMn0IiSV/+aoHANzeA9dxkt5W5YW1gBK9sw3eTQ +# 9jcRJzswkuHqgE+HFqGFzIgBYB+769+vUdbVEIDKPQXJB94VoLv2oFe1eOQhIbuBTZtDe2x6 +# DCE3Nw== +# =Kaig +# -----END PGP PUBLIC KEY BLOCK----- + prometheus-ceph-exporter: + charm: cs:prometheus-ceph-exporter + series: bionic + bindings: + "": *oam-space + ceph: *ceph-public-space + num_units: 1 + to: + - lxd:200 +# external-policy-routing: +# charm: cs:~canonical-bootstack/policy-routing +# options: +# cidr: *external-network-cidr +# gateway: *external-network-gateway + + ceilometer: + charm: cs:ceilometer + num_units: 3 + bindings: + "": *oam-space + public: *public-space + admin: *admin-space + internal: *internal-space + options: + openstack-origin: *openstack-origin + region: *openstack-region + use-internal-endpoints: True + to: + - lxd:200 + - lxd:201 + - lxd:202 + ceilometer-agent: + charm: cs:ceilometer-agent + options: + use-internal-endpoints: True + + #Just to depoy ubuntu charm for contrail servers + juniper-server: + charm: cs:ubuntu + num_units: 3 + to: + - 400 + - 401 + - 402 + +# vault stuff + etcd: + charm: cs:etcd + num_units: 3 + bindings: + "": *oam-space + cluster: *internal-space + db: *internal-space + options: + channel: 3.2/stable + to: + - lxd:400 + - lxd:401 + - lxd:402 + easyrsa: + charm: cs:~containers/easyrsa + num_units: 1 + bindings: + "": *oam-space + to: + - lxd:402 + vault: + charm: cs:vault + num_units: 3 + bindings: + "": *oam-space + access: *internal-space + secrets: *internal-space + certificates: *internal-space + ha: *internal-space + etcd: *internal-space + cluster: *internal-space + options: + vip: *vault-vip + nagios_context: *nagios-context + to: + - lxd:400 + - lxd:401 + - lxd:402 + +relations: + # openstack + - [ "mysql:ha", "hacluster-mysql:ha" ] + + - [ "keystone:ha", "hacluster-keystone:ha" ] + - [ "keystone:shared-db", "mysql:shared-db" ] + + - [ "cinder:shared-db", "mysql:shared-db" ] + - [ "cinder:identity-service", "keystone:identity-service" ] + - [ "cinder:amqp", "rabbitmq-server:amqp" ] + - [ "cinder:ha", "hacluster-cinder:ha" ] + + - [ "cinder-ceph:ceph", "ceph-mon:client" ] + - [ "cinder-ceph:storage-backend", "cinder:storage-backend" ] + +# - [ "cinder2:shared-db", "mysql:shared-db" ] +# - [ "cinder2:identity-service", "keystone:identity-service" ] +# - [ "cinder2:amqp", "rabbitmq-server:amqp" ] +# - [ "cinder2:ha", "hacluster-cinder2:ha" ] + + - [ "cinder-ceph:storage-backend", "cinder2:storage-backend" ] + +# - [ "cinder2", "cinder-backup" ] + + - [ "ceph-osd:mon", "ceph-mon:osd" ] +# - [ "ceph-osd2:mon", "ceph-mon:osd" ] + + - [ "glance:ha", "hacluster-glance:ha" ] + - [ "glance:shared-db", "mysql:shared-db" ] + - [ "glance:identity-service", "keystone:identity-service" ] + - [ "glance:ceph", "ceph-mon:client" ] + - [ "glance:amqp", "rabbitmq-server:amqp" ] + - [ "glance:image-service", "cinder:image-service" ] + + - [ "heat:ha", "hacluster-heat:ha" ] + - [ "heat:shared-db", "mysql:shared-db" ] + - [ "heat:identity-service", "keystone:identity-service" ] + - [ "heat:amqp", "rabbitmq-server:amqp" ] + + - [ "neutron-api:ha", "hacluster-neutron:ha" ] + - [ "neutron-api:shared-db", "mysql:shared-db" ] + - [ "neutron-api:amqp", "rabbitmq-server:amqp" ] + - [ "neutron-api:neutron-api", "nova-cloud-controller:neutron-api" ] + - [ "neutron-api:identity-service", "keystone:identity-service" ] + + - [ "nova-cloud-controller:ha", "hacluster-nova:ha" ] + - [ "nova-cloud-controller:shared-db", "mysql:shared-db" ] + - [ "nova-cloud-controller:amqp", "rabbitmq-server:amqp" ] + - [ "nova-cloud-controller:identity-service", "keystone:identity-service" ] + - [ "nova-cloud-controller:image-service", "glance:image-service" ] + - [ "nova-cloud-controller:memcache", "memcached:cache" ] + + - [ "nova-compute:juju-info", "ntp:juju-info" ] + - [ "nova-compute:amqp", "rabbitmq-server:amqp" ] + - [ "nova-compute:ceph", "ceph-mon:client" ] + - [ "nova-compute:ceph-access", "cinder-ceph:ceph-access" ] + - [ "nova-compute:image-service", "glance:image-service" ] + - [ "nova-compute:cloud-compute", "nova-cloud-controller:cloud-compute" ] + + - [ "openstack-dashboard:ha", "hacluster-horizon:ha" ] + - [ "openstack-dashboard:identity-service", "keystone:identity-service" ] + - [ "openstack-dashboard:shared-db", "mysql:shared-db" ] + + # ceilometer + - [ "ceilometer:identity-credentials", "keystone:identity-credentials" ] + - [ "ceilometer:amqp", "rabbitmq-server:amqp" ] + + - [ "ceilometer-agent:ceilometer-service", "ceilometer:ceilometer-service" ] + - [ "ceilometer-agent:nova-ceilometer", "nova-compute:nova-ceilometer" ] + - [ "ceilometer-agent:amqp", "rabbitmq-server:amqp"] + + # gnocchi + - [ "gnocchi:ha", "hacluster-gnocchi:ha" ] + - [ "gnocchi:shared-db", "mysql:shared-db" ] + - [ "gnocchi:amqp", "rabbitmq-server:amqp" ] + - [ "gnocchi:identity-service", "keystone:identity-service" ] + - [ "gnocchi:storage-ceph", "ceph-mon:client" ] + - [ "gnocchi:coordinator-memcached", "memcached:cache" ] + - [ "gnocchi:metric-service", "ceilometer:metric-service" ] + + # aodh + - [ "aodh:shared-db", "mysql:shared-db" ] + - [ "aodh:identity-service", "keystone:identity-service" ] + - [ "aodh:amqp", "rabbitmq-server:amqp" ] + - [ "aodh:ha", "hacluster-aodh:ha" ] + + # sysconfig relations + #- [ "ceph-osd:juju-info", "sysconfig-storage:juju-info" ] + - [ "nova-compute:juju-info", "sysconfig-compute:juju-info" ] + - [ "neutron-gateway:juju-info", "sysconfig-control:juju-info" ] + + # Neutron-gateway relations + - [ "neutron-gateway", "nova-cloud-controller" ] + - [ "neutron-gateway:amqp", "rabbitmq-server:amqp" ] + - [ "neutron-gateway:neutron-plugin-api", "neutron-api:neutron-plugin-api" ] + - [ "neutron-gateway:juju-info", "ntp:juju-info" ] + + # Neutron-openvswitch relations + - [ "neutron-openvswitch:amqp" , "rabbitmq-server:amqp" ] + - [ "neutron-openvswitch" , "neutron-api" ] + - [ "neutron-openvswitch" , "nova-compute" ] + + # vault stuff + - [ "vault:shared-db", "mysql:shared-db" ] + - [ "vault:ha", "hacluster-vault:ha" ] + + - [ "ceph-osd:secrets-storage", "vault:secrets"] +# - [ "ceph-osd2:secrets-storage", "vault:secrets"] + - [ "etcd:certificates", "easyrsa:client" ] + - [ "etcd:db", "vault:etcd" ] + + # vault lma/monitoring + - [ "telegraf:juju-info", "vault:juju-info" ] + - [ "filebeat:beats-host", "vault:juju-info" ] + - [ "nrpe-container:nrpe-external-master", "vault:nrpe-external-master" ] + - [ "landscape-client:container", "vault:juju-info" ] + + - [ "telegraf:juju-info", "etcd:juju-info" ] + - [ "filebeat:beats-host", "etcd:juju-info" ] + - [ "nrpe-container:nrpe-external-master", "etcd:nrpe-external-master" ] + - [ "landscape-client:container", "etcd:juju-info" ] + + - [ "telegraf:juju-info", "easyrsa:juju-info" ] + - [ "filebeat:beats-host", "easyrsa:juju-info" ] + - [ "nrpe-container:general-info", "easyrsa:juju-info" ] + - [ "landscape-client:container", "easyrsa:juju-info" ] + + # memcached + - [ "memcached:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "memcached:juju-info", "telegraf:juju-info" ] + - [ "memcached:juju-info", "filebeat:beats-host" ] + - [ "memcached:juju-info", "landscape-client:container" ] + + # grafana + - [ "grafana:juju-info", "filebeat:beats-host" ] + - [ "grafana:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "grafana:juju-info", "telegraf:juju-info" ] + - [ "grafana:juju-info", "landscape-client:container" ] + + # graylog + - [ "graylog:elasticsearch", "elasticsearch:client" ] + - [ "graylog:mongodb", "graylog-mongodb:database" ] + - [ "graylog:beats", "filebeat:logstash" ] + - [ "graylog:nrpe-external-master", "nrpe-host:nrpe-external-master" ] + - [ "graylog:juju-info", "telegraf:juju-info" ] + - [ "graylog:juju-info", "landscape-client:container" ] + + # nagios + - [ "nagios:juju-info", "filebeat:beats-host" ] + - [ "nagios:juju-info", "telegraf:juju-info" ] + - [ "nagios:monitors", "nrpe-container:monitors" ] + - [ "nagios:monitors", "nrpe-host:monitors" ] + - [ "nagios:juju-info", "landscape-client:container" ] + + # openstack-service-checks + - [ "openstack-service-checks:identity-credentials", "keystone:identity-credentials" ] + - [ "openstack-service-checks:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "openstack-service-checks:juju-info", "telegraf:juju-info" ] + - [ "openstack-service-checks:juju-info", "filebeat:beats-host" ] + - [ "openstack-service-checks:juju-info", "landscape-client:container" ] + + # graylog-mongodb + - [ "graylog-mongodb:juju-info", "telegraf:juju-info" ] + - [ "graylog-mongodb:juju-info", "filebeat:beats-host" ] + - [ "graylog-mongodb:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "graylog-mongodb:juju-info", "landscape-client:container" ] + + # elasticsearch + - [ "elasticsearch:juju-info", "filebeat:beats-host" ] + - [ "elasticsearch:juju-info", "telegraf:juju-info" ] + - [ "elasticsearch:nrpe-external-master", "nrpe-host:nrpe-external-master" ] + - [ "elasticsearch:juju-info", "landscape-client:container" ] + + # prometheus + - [ "prometheus:juju-info", "filebeat:beats-host" ] + - [ "prometheus:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "prometheus:juju-info", "telegraf-prometheus:juju-info" ] + - [ "prometheus:grafana-source", "grafana:grafana-source" ] + - [ "prometheus:target", "telegraf:prometheus-client" ] + - [ "prometheus:juju-info", "landscape-client:container" ] + + # prometheus-openstack-exporter + - [ "prometheus-openstack-exporter:identity-credentials", "keystone:identity-credentials" ] + - [ "prometheus-openstack-exporter:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "prometheus-openstack-exporter:prometheus-openstack-exporter-service", "prometheus:target" ] + - [ "prometheus-openstack-exporter:juju-info", "filebeat:beats-host" ] + - [ "prometheus-openstack-exporter:juju-info", "telegraf:juju-info" ] + - [ "prometheus-openstack-exporter:juju-info", "landscape-client:container" ] + + # prometheus-ceph-exporter + - [ "prometheus-ceph-exporter:ceph", "ceph-mon:client" ] + - [ "prometheus-ceph-exporter:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "prometheus-ceph-exporter:ceph-exporter", "prometheus:target" ] + - [ "prometheus-ceph-exporter:juju-info", "filebeat:beats-host" ] + - [ "prometheus-ceph-exporter:juju-info", "telegraf:juju-info" ] + - [ "prometheus-ceph-exporter:juju-info", "landscape-client:container" ] + + # juniper server + - [ "juniper-server:juju-info", "ntp:juju-info" ] + + # LMA/landscape subordinates + - [ "nova-compute", "filebeat" ] + - [ "nova-compute", "telegraf" ] + - [ "nova-compute", "nrpe-host" ] + - [ "nova-compute", "landscape-client" ] + + - [ "neutron-gateway", "filebeat" ] + - [ "neutron-gateway", "telegraf" ] + - [ "neutron-gateway", "nrpe-host" ] + - [ "neutron-gateway", "landscape-client" ] + + - [ "keystone", "filebeat" ] + - [ "keystone", "telegraf" ] + - [ "keystone", "nrpe-container" ] + - [ "keystone", "landscape-client" ] + + - [ "glance", "filebeat" ] + - [ "glance", "telegraf" ] + - [ "glance", "nrpe-container" ] + - [ "glance", "landscape-client" ] + + - [ "cinder", "filebeat" ] + - [ "cinder", "telegraf" ] + - [ "cinder", "nrpe-container" ] + - [ "cinder", "landscape-client" ] + +# - [ "cinder2", "filebeat" ] +# - [ "cinder2", "telegraf" ] +# - [ "cinder2", "nrpe-container" ] +# - [ "cinder2", "landscape-client" ] + + - [ "heat", "filebeat" ] + - [ "heat", "telegraf" ] + - [ "heat", "nrpe-container" ] + - [ "heat", "landscape-client" ] + + - [ "mysql", "filebeat" ] + - [ "mysql", "telegraf" ] + - [ "mysql", "nrpe-container" ] + - [ "mysql", "landscape-client" ] + + - [ "ceph-mon", "filebeat" ] + - [ "ceph-mon", "telegraf" ] + - [ "ceph-mon", "nrpe-container" ] + - [ "ceph-mon", "landscape-client" ] + + - [ "neutron-api", "filebeat" ] + - [ "neutron-api", "telegraf" ] + - [ "neutron-api", "nrpe-container" ] + - [ "neutron-api", "landscape-client" ] + + - [ "rabbitmq-server", "filebeat" ] + - [ "rabbitmq-server", "telegraf" ] + - [ "rabbitmq-server", "nrpe-container" ] + - [ "rabbitmq-server", "landscape-client" ] + + - [ "openstack-dashboard", "filebeat" ] + - [ "openstack-dashboard", "telegraf" ] + - [ "openstack-dashboard", "nrpe-container" ] + - [ "openstack-dashboard", "landscape-client" ] + + - [ "nova-cloud-controller", "filebeat" ] + - [ "nova-cloud-controller", "telegraf" ] + - [ "nova-cloud-controller", "nrpe-container" ] + - [ "nova-cloud-controller", "landscape-client" ] + + - [ "gnocchi", "filebeat" ] + - [ "gnocchi", "telegraf" ] + - [ "gnocchi", "nrpe-container" ] + - [ "gnocchi", "landscape-client" ] + + - [ "ceilometer", "filebeat" ] + - [ "ceilometer", "telegraf" ] + - [ "ceilometer", "nrpe-container" ] + - [ "ceilometer", "landscape-client" ] + + - [ "aodh", "filebeat" ] + - [ "aodh", "telegraf" ] + - [ "aodh", "landscape-client" ] + - [ "aodh", "nrpe-container" ] + + - [ "juniper-server", "telegraf" ] + - [ "juniper-server", "filebeat" ] + - [ "juniper-server", "landscape-client" ] + - [ "juniper-server", "nrpe-host" ] + + - [ "hacluster-aodh:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "hacluster-cinder:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "hacluster-glance:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "hacluster-gnocchi:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "hacluster-heat:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "hacluster-horizon:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "hacluster-keystone:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "hacluster-mysql:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "hacluster-neutron:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "hacluster-nova:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + + # Landscape + - [ "landscape-server:juju-info", "ntp:juju-info" ] + - [ "landscape-server:juju-info", "filebeat:beats-host" ] + - [ "landscape-server:juju-info", "nrpe-host:general-info" ] + - [ "landscape-server:juju-info", "telegraf:juju-info" ] + - [ "landscape-server:juju-info", "landscape-client:container" ] + + - [ "landscape-rabbitmq-server:juju-info", "ntp:juju-info" ] + - [ "landscape-rabbitmq-server:juju-info", "filebeat:beats-host" ] + - [ "landscape-rabbitmq-server:nrpe-external-master", "nrpe-host:nrpe-external-master" ] + - [ "landscape-rabbitmq-server:juju-info", "telegraf:juju-info" ] + - [ "landscape-rabbitmq-server:juju-info", "landscape-client:container" ] + + - [ "landscape-postgresql:juju-info", "ntp:juju-info" ] + - [ "landscape-postgresql:juju-info", "filebeat:beats-host" ] + - [ "landscape-postgresql:local-monitors", "nrpe-host:local-monitors" ] + - [ "landscape-postgresql:juju-info", "nrpe-host:general-info" ] + - [ "landscape-postgresql:juju-info", "telegraf:juju-info" ] + - [ "landscape-postgresql:juju-info", "landscape-client:container" ] + + - [ "landscape-haproxy:juju-info", "filebeat:beats-host" ] + - [ "landscape-haproxy:juju-info", "nrpe-host:general-info" ] + - [ "landscape-haproxy:local-monitors", "nrpe-host:local-monitors" ] + - [ "landscape-haproxy:juju-info", "telegraf:juju-info" ] + - [ "landscape-haproxy:juju-info", "landscape-client:container" ] + + - [ "landscape-server:amqp", "landscape-rabbitmq-server:amqp" ] + - [ "landscape-server:website", "landscape-haproxy:reverseproxy" ] + - [ "landscape-server:db", "landscape-postgresql:db-admin" ] + + + diff --git a/config/canonical-openstack-rules.yaml b/config/canonical-openstack-rules.yaml new file mode 100644 index 0000000..58af4af --- /dev/null +++ b/config/canonical-openstack-rules.yaml @@ -0,0 +1,113 @@ +subordinates: + telegraf: + where: all except prometheus # and prometheus-ceph-exporter and prometheus-openstack-exporter + host-suffixes: [prometheus] + landscape-client: + where: all except landscape-server + filebeat: + where: all except graylog + canonical-livepatch: + where: host only + nrpe: + where: container aware # and except nagios + host-suffixes: [host, physical, guest] + container-suffixes: [lxd, container] + ntp: + where: host only # You don't want NTP in a container duelling with ntp in the host + thruk-agent: + where: on nagios + +operations mandatory: &operations-mandatory-charms + - elasticsearch + - grafana + - graylog + - landscape-server + - nagios + - openstack-service-checks + - prometheus2 + - prometheus-openstack-exporter + - prometheus-ceph-exporter + +operations mandatory dependencies: &operations-mandatory-deps + - postgresql + +operations subordinates: &operations-mandatory-subs + - canonical-livepatch + - filebeat + - ksplice + - landscape-client + - lldpd + - nrpe + - ntp + - telegraf + - thruk-agent + +operations charms: &operations-charms + - *operations-mandatory-charms + - *operations-mandatory-deps + - *operations-mandatory-subs + +openstack mandatory: &openstack-mandatory-charms + - ceilometer + - ceilometer-agent + - ceph-mon + - ceph-osd + - cinder + - cinder-ceph + - glance + - heat + - keystone + - neutron-api + - nova-cloud-controller + - nova-compute + - openstack-dashboard + +openstack mandatory deps: &openstack-mandatory-deps + - haproxy + - memcached + - percona-cluster + - rabbitmq-server + +openstack mandatory subordinates: &openstack-mandatory-subs + - hacluster + +openstack optional charms: &openstack-optional-charms + - aodh + - ceph-radosgw + - designate + - designate-bind + - glance-simplestreams-sync + - glance-sync-slave + - gnocchi + - keystone-ldap + - mongodb # Optional since Gnochii + - neutron-gateway + - swift-proxy + - swift-storage + - cinder-backup + - vault + - etcd + - easyrsa + - neutron-openvswitch + +cisco-aci-charms: &cisco-aci-charms + - neutron-api-plugin-aci + - openstack-dashboard-plugin-gbp + +helper-charms: &helper-charms + - sysconfig + - bcache-tuning + - policy-routing + +openstack charms: &openstack-charms + - *openstack-mandatory-charms + - *openstack-mandatory-deps + - *openstack-mandatory-subs + - *openstack-optional-charms + - *cisco-aci-charms + +known charms: + - ubuntu + - *openstack-charms + - *operations-charms + - *helper-charms diff --git a/config/dnsresources.yaml b/config/dnsresources.yaml new file mode 100644 index 0000000..24809a0 --- /dev/null +++ b/config/dnsresources.yaml @@ -0,0 +1,54 @@ +--- +# Internal VIPs [OAM network - vlan190] +- fqdn: aodh-internal.example.com + ip_addresses: 10.0.1.211 +- fqdn: cinder-internal.example.com + ip_addresses: 10.0.1.212 +- fqdn: dashboard-internal.example.com + ip_addresses: 10.0.1.213 +- fqdn: glance-internal.example.com + ip_addresses: 10.0.1.214 +- fqdn: heat-internal.example.com + ip_addresses: 10.0.1.215 +- fqdn: keystone-internal.example.com + ip_addresses: 10.0.1.216 +- fqdn: mysql-internal.example.com + ip_addresses: 10.0.1.217 +- fqdn: neutron-internal.example.com + ip_addresses: 10.0.1.218 +- fqdn: nova-internal.example.com + ip_addresses: 10.0.1.219 +- fqdn: gnocchi-internal.example.com + ip_addresses: 10.0.1.220 +- fqdn: vault-internal.example.com + ip_addresses: 10.0.1.221 +# This record needs to be created manually after the creation of the Nagios VM +#- fqdn: nagios-internal.example.com +# ip_addresses: 10.0.1.63 +#- fqdn: landscape-internal.example.com +# ip_addresses: 10.0.1.63 +#- fqdn: graylog-internal.example.com +# ip_addresses: 10.0.1.63 +# External VIPs [external network - vlan191] +- fqdn: aodh.example.com + ip_addresses: 10.0.1.211 +- fqdn: cinder.example.com + ip_addresses: 10.0.1.212 +- fqdn: dashboard.example.com + ip_addresses: 10.0.1.213 +- fqdn: glance.example.com + ip_addresses: 10.0.1.214 +- fqdn: heat.example.com + ip_addresses: 10.0.1.215 +- fqdn: keystone.example.com + ip_addresses: 10.0.1.216 +- fqdn: mysql.example.com + ip_addresses: 10.0.1.217 +- fqdn: neutron.example.com + ip_addresses: 10.0.1.218 +- fqdn: nova.example.com + ip_addresses: 10.0.1.219 +- fqdn: gnocchi.example.com + ip_addresses: 10.0.1.220 +- fqdn: vault.example.com + ip_addresses: 10.0.1.221 diff --git a/config/hosts.yaml b/config/hosts.yaml new file mode 100644 index 0000000..32e69e4 --- /dev/null +++ b/config/hosts.yaml @@ -0,0 +1,39 @@ +- hostname: asrock01 + ip: 10.0.1.241 + vm_zone: asrock01 + pods: + - name: as1-juju-01 + disk: 20G + cpu: 2 + mem: 4G + nics: + - space: oam + ip: 10.0.1.231 + mode: auto + tags: ['juju'] +- hostname: asrock02 + ip: 10.0.1.242 + vm_zone: asrock02 + pods: + - name: as2-juju-01 + disk: 20G + cpu: 2 + mem: 4G + nics: + - space: oam + ip: 10.0.1.232 + mode: auto + tags: ['juju'] +- hostname: asrock03 + ip: 10.0.1.243 + vm_zone: asrock03 + pods: + - name: as3-juju-01 + disk: 20G + cpu: 2 + mem: 4G + nics: + - space: oam + ip: 10.0.1.233 + mode: auto + tags: ['juju'] diff --git a/config/juju-model-default-cis.yaml b/config/juju-model-default-cis.yaml new file mode 100644 index 0000000..114927b --- /dev/null +++ b/config/juju-model-default-cis.yaml @@ -0,0 +1,72 @@ +cloudinit-userdata: | + packages: + - squashfuse + - libopenscap8 + write_files: + - owner: root:root + path: /etc/apt/auth.conf.d/cis-harden.conf + permissions: '0600' + content: | + # Credentials to allow the connecion for the CIS benchmarks private PPA + machine private-ppa.launchpad.net/ubuntu-advantage/security-benchmarks/ubuntu + login arif-ali + password kNnpLf27XvGsdwt6VxfT + - owner: root:root + path: /tmp/cis-hardening.conf + permissions: '0644' + content: | + # Hash created by grub-mkpasswd-pbkdf2 to set grub password. If empty, grub password + # is not set. + # (CIS rule 1.4.2) + grub_hash= + # Grub user set for authentication + grub_user=root + + # Time synchronization service selected (ntp or chrony - if empty, none will be installed) + # (CIS rule 2.2.1.1-2.2.1.3) + time_sync_svc= + time_sync_addr= + + # Audit log storage size, before log is automatically rotated + # (CIS rule 4.1.1.1) + max_log_file=8 + + # Remote log host address (CIS rule 4.2.2.4) + # Use the format loghost.example.com:554, to define the port + remote_log_server= + + # SSH access limitation parameters at /etc/ssh/sshd_config (CIS rule 5.2.14) + AllowUsers=ubuntu + AllowGroups= + DenyUsers= + DenyGroups= + + # PAM password quality parameters at /etc/security/pwquality.conf (CIS rule 5.3.1) + minlen=14 + dcredit=-1 + ucredit=-1 + ocredit=-1 + lcredit=-1 + + # sudo group members, aside from root (CIS rule 5.6) + sudo_member= + + # Unowned files will be changed to this user (CIS rule 6.1.11) + unowned_user=root + # Ungrouped files will be changed to this user (CIS rule 6.1.12) + unowned_group=root + + # Delete files in the home directory which violate CIS rules (CIS rules 6.2.11, 6.2.12, 6.2.14) + delete_user_files=true + preruncmd: + - locale-gen en_GB.UTF-8; update-locale + - sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys A166877412DAC26E73CEBF3FF6C280178D13028C + - sudo add-apt-repository "deb https://private-ppa.launchpad.net/ubuntu-advantage/security-benchmarks/ubuntu bionic main" + - sudo apt update + - sudo DEBIAN_FRONTEND=noninteractive apt install -y -q usg-cisbenchmark + - cd /usr/share/ubuntu-scap-security-guides/cis-hardening; sudo ./Canonical_Ubuntu_18.04_CIS-harden.sh -f /tmp/cis-hardening.conf lvl2_server + # remove auditd as added by Hardening script but is not supported on containers + - "systemd-detect-virt --container && apt purge -y auditd" + +default-series: "bionic" +apt-mirror: http://192.168.1.12/ubuntu diff --git a/config/juju-model-default.yaml b/config/juju-model-default.yaml new file mode 100644 index 0000000..f00136a --- /dev/null +++ b/config/juju-model-default.yaml @@ -0,0 +1,32 @@ +cloudinit-userdata: | + write_files: + - owner: root:root + path: /root/99-post-juju.yaml + permissions: '0644' + content: | + network: + version: 2 + ethernets: + ens3: + link-local: [] + ens4: + link-local: [] + ens5: + link-local: [] + ens6: + link-local: [] + ens7: + link-local: [] + ens8: + link-local: [] + ens9: + link-local: [] + preruncmd: + - locale-gen en_GB.UTF-8; update-locale + - "systemd-detect-virt --container && rm -rf /root/99-post-juju.yaml" + - "! systemd-detect-virt --container && mv /root/99-post-juju.yaml /etc/netplan/99-post-juju.yaml" + - "! systemd-detect-virt --container && sudo lxc profile set default security.nesting true" + - sudo netplan apply + +default-series: "bionic" +apt-mirror: http://192.168.1.12/ubuntu diff --git a/config/juju_deploy.sh b/config/juju_deploy.sh new file mode 100755 index 0000000..7a8b426 --- /dev/null +++ b/config/juju_deploy.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +juju deploy ./bundle.yaml \ + --overlay ./overlays/ovs.yaml \ + --overlay ./overlays/hostnames.yaml \ + --overlay ./overlays/ldap.yaml \ + --overlay ./overlays/resources.yaml \ + --overlay ./overlays/openstack_versioned_overlay.yaml \ + --overlay ./overlays/stsstack.yaml $* + +# --overlay ./overlays/contrail.yaml \ +# --overlay ./overlays/openstack_versioned_overlay.yaml \ +# --overlay ./overlays/openstack_versioned_overlay_gemini.yaml \ +# --overlay ./overlays/contrail_versioned_overlay.yaml \ +# --overlay ./overlays/ssl.yaml \ +# --overlay ./overlays/contrail.yaml \ +# --overlay ./overlays/contrail_versioned_overlay.yaml \ diff --git a/config/master.yaml b/config/master.yaml new file mode 100644 index 0000000..12822b4 --- /dev/null +++ b/config/master.yaml @@ -0,0 +1,94 @@ +project: + customer: Arif Ali + project: Internal System + opportunity: blah + sku: fcb-stable-queens-bionic-bluestore + arch: disaggregated + hw: approved +layers: +- name: baremetal + type: baremetal + config: + hosts: include-rel://config/hosts.yaml + ssh_user: ubuntu +- name: maas + type: maas + parent: baremetal + config: + tweaks: + - nobond + - nobridge + maas_vip: 192.168.1.22 + postgresql_vip: 192.168.1.22 + package_repositories: [] + maas_config: + maas_name: maas + completed_intro: True + dnssec_validation: 'no' + upstream_dns: + - 192.168.1.13 + ntp_servers: + - 0.uk.pool.ntp.org + kernel_opts: + - console=tty0 + - console=ttyS0,115200n8 + maas_boot_source_selections: + - release: 'focal' + arches: ['amd64'] + - release: 'bionic' + arches: ['amd64'] + maas_admin: admin + maas_admin_email: mail@arif-ali.co.uk + # The password will be auto-generated in `generated/maas/maas-pass` + maas_admin_password: openstack + infra_extra_repositories: + - ppa:maas/stable + networks: include-rel://config/networks.yaml + enlist_timeout: 1801 + nodeconfig: include-rel://config/bucketsconfig.yaml + nodes: include-rel://config/nodes.yaml + domains: + - name: maas + #authoritative: false + is_default: true + dnsresources: include-rel://config/dnsresources.yaml +- name: juju_maas_controller + type: juju_maas_controller + parent: maas + config: + ha: 3 + ha_timeout: 900 + controller_name: home-maas + model_defaults: juju-model-default.yaml +- name: openstack + type: openstack + parent: juju_maas_controller + config: + juju_model: openstack + bundles: # Primary bundle followed by optional overlays. + - bundle.yaml + - overlays/hostnames.yaml + - overlays/ovs.yaml + - overlays/ldap.yaml + - overlays/openstack_versioned_overlay.yaml + - overlays/stsstack.yaml + ha_type: ssl + openstack_config: include-rel://config/openstack.yaml + validate: + - type: rados + config: + unit: ceph-mon/0 + - type: fio + config: + unit: ceph-mon/0 + - type: rally + config: + task_yaml: rally.yaml + +#- name: magpie +# type: magpie +# parent: juju_maas_controller +# config: +# juju_model: magpie +# oam-space: oam-space + diff --git a/config/networks.yaml b/config/networks.yaml new file mode 100644 index 0000000..9094529 --- /dev/null +++ b/config/networks.yaml @@ -0,0 +1,131 @@ +fabrics: + default: + vlans: + 300: + # OAM network + dhcp_on: true + mtu: 1500 + space: oam + subnets: + oam: + cidr: 10.0.1.0/24 + gateway_ip: 10.0.1.253 + reserved: + # 10.2.70.1, .2,.3, reserved + # gap: 4-7, infra-head, reserved + # 8-10 reserved diff env + # 11-13 nodes static ips + # 14-16 infras + # 17-25: nodes storage static ips + # 26-126: future use, reserved + # 127-255: unused, future use, reserved + # 71.0: openstack + # .1 .2 maas + # .3-18: vips + # 19-59: dhcp + # 60-62: juju controllers + # 63-254: containers + dhcp: + start_ip: 10.0.1.1 + end_ip: 10.0.1.9 + type: dynamic + openstack-vips: + start_ip: 10.0.1.211 + end_ip: 10.0.1.225 + servers: + start_ip: 10.0.1.241 + end_ip: 10.0.1.254 + 1: + # External is on the same bond as OAM -> MTU 1500 + mtu: 1500 + space: external + subnets: + external: + cidr: 192.168.1.0/24 + 301: + dhcp_on: true + mtu: 1500 + space: ceph-access + subnets: + ceph_access: + cidr: 10.0.2.0/24 + reserved: + maas: + start_ip: 10.0.2.1 + end_ip: 10.0.2.49 + type: dynamic + servers: + start_ip: 10.0.2.241 + end_ip: 10.0.2.254 + 302: + dhcp_on: true + mtu: 1500 + space: ceph-replica + subnets: + ceph_replication: + cidr: 10.0.3.0/24 + reserved: + maas: + start_ip: 10.0.3.1 + end_ip: 10.0.3.49 + type: dynamic + servers: + start_ip: 10.0.3.241 + end_ip: 10.0.3.254 + 303: + dhcp_on: true + mtu: 1500 + space: overlay + subnets: + overlay: + cidr: 10.0.4.0/24 + reserved: + maas: + start_ip: 10.0.4.1 + end_ip: 10.0.4.49 + type: dynamic + servers: + start_ip: 10.0.4.241 + end_ip: 10.0.4.254 + 304: + dhcp_on: true + mtu: 1500 + space: internal + subnets: + internal: + cidr: 10.0.5.0/24 + reserved: + maas: + start_ip: 10.0.5.1 + end_ip: 10.0.5.49 + type: dynamic + servers: + start_ip: 10.0.5.241 + end_ip: 10.0.5.254 + 305: + dhcp_on: true + mtu: 1500 + space: admin + subnets: + admin: + cidr: 10.0.6.0/24 + reserved: + maas: + start_ip: 10.0.6.1 + end_ip: 10.0.6.49 + type: dynamic + servers: + start_ip: 10.0.6.241 + end_ip: 10.0.6.254 + +spaces: + ceph-access: + description: Ceph access. + ceph-replica: + description: Ceph replication. + external: + description: Floating IP network. + oam: + description: Operations, administration and management. PXE and stuff. + overlay: + description: OVS overlay space for tenant networks diff --git a/config/nodes.yaml b/config/nodes.yaml new file mode 100644 index 0000000..989afc5 --- /dev/null +++ b/config/nodes.yaml @@ -0,0 +1,24 @@ +#asrock01: +# bmc_user: include-rel://secrets/cimc-user +# bmc_password: include-rel://secrets/cimc-password +# power_type: manual +# bmc_address: 10.2.69.17 +# bmc_power_boot_type: efi +# zone: asrock01 +#asrock02: +# bmc_user: include-rel://secrets/cimc-user +# bmc_password: include-rel://secrets/cimc-password +# power_type: manual +# bmc_address: 10.2.69.18 +# bmc_power_boot_type: efi +# zone: asrock02 +#asrock03: +# bmc_user: include-rel://secrets/cimc-user +# bmc_password: include-rel://secrets/cimc-password +# power_type: manual +# bmc_address: 10.2.69.19 +# bmc_power_boot_type: efi +# zone: asrock03 +as1-maas-node-01: + power_type: virsh + zone: asrock03 diff --git a/config/openstack.yaml b/config/openstack.yaml new file mode 100644 index 0000000..dab74a5 --- /dev/null +++ b/config/openstack.yaml @@ -0,0 +1,21 @@ +tenant_name: ubuntu-net +tenant_subnet_name: ubuntu-subnet +tenant_cidr: 172.16.0.0/24 +tenant_gateway: 172.16.0.1 +tenant_start: 172.16.0.10 +tenant_end: 172.16.0.254 +ext_name: ext-net +ext_subnet_name: ext-subnet +ext_cidr: 10.0.0.0/24 +ext_gateway: 10.0.0.1 +ext_start: 10.0.0.2 +ext_end: 10.0.0.254 +router_name: ubuntu-router +network_type: vlan +segmentation_id: 1001 +physnet: physnet1 +keypair_file: /home/arif/.ssha/id_rsa.pub +keypair_name: ubuntu-keypair +image_series: bionic +kvm_image_name: bionic-kvm +lxd_image_name: bionic-lxd diff --git a/config/overlays/contrail.yaml b/config/overlays/contrail.yaml new file mode 100644 index 0000000..106cad8 --- /dev/null +++ b/config/overlays/contrail.yaml @@ -0,0 +1,264 @@ +--- +variables: + + oam-space: &oam-space oam + public-space: &public-space oam + + docker-registry: &docker-registry hub.juniper.net/contrail + docker-user: &docker-user include-file://../../secrets/juniper-username.txt + docker-password: &docker-password include-file://../../secrets/juniper-password.txt + #docker_runtime_repo: &docker-repo http://repo1.nci.bt.com/wes-505/current/mirror/download.docker.com/linux/ubuntu/ + #docker_runtime_key_url: &docker-key http://repo1.nci.bt.com/wes-505/current/mirror/download.docker.com/linux/ubuntu/gpg + + #image-tag: &image-tag 5.1.0-0.38-queens + #adastral image-tag: &image-tag 19.30-queens + # contrail version + image-tag: &image-tag "2011.138" #New LTS release - 1912.L1.46" + #contrail-proxy: &contrail-proxy "" + #contrail-no-proxy: &contrail-no-proxy "" + # in old charms - network also used for api calls - contrail controller and api + # in latest charms - seperate api network. could split this. + # contrail-control-net: &contrail-control-net 172.16.4.0/22 + # #contrail net now on OAM + contrail-control-net: &contrail-control-net 10.0.1.0/24 + # Data net on SDN transport + contrail-data-net: &contrail-data-net 10.0.6.0/24 + # api in external + contrail-api-vip: &contrail-api-vip 10.0.1.221 + # contrail log level (SYS_NOTICE or SYS_DEBUG) + contrail-log-level: &contrail-log-level SYS_DEBUG + +applications: +# Contrail applications + contrail-openstack: + charm: cs:~juniper-os-software/contrail-openstack + options: + docker-registry: *docker-registry + docker-user: *docker-user + docker-password: *docker-password + #docker_runtime_repo: *docker-repo + #docker_runtime_key_url: *docker-key + #temp change to apt until bug fixed around no_proxy + #https://github.com/Juniper/contrail-charms/issues/150 + #docker_runtime: apt + #docker_runtime: custom + image-tag: *image-tag + #https_proxy: *contrail-proxy + #http_proxy: *contrail-proxy + #no_proxy: *contrail-no-proxy + + contrail-agent: + charm: cs:~juniper-os-software/contrail-agent + options: + log-level: *contrail-log-level + docker-registry: *docker-registry + docker-user: *docker-user + docker-password: *docker-password + #docker_runtime_repo: *docker-repo + #docker_runtime_key_url: *docker-key + #temp change to apt until bug fixed around no_proxy + #https://github.com/Juniper/contrail-charms/issues/150 + #docker_runtime: apt + #docker_runtime: custom + image-tag: *image-tag + #https_proxy: *contrail-proxy + #http_proxy: *contrail-proxy + #no_proxy: *contrail-no-proxy + #physical-interface: ens8 + #vhost-gateway: auto + #sriov-physical-interface: enp94s0f0 + #sriov-numvfs: "12" + + contrail-analytics: + charm: cs:~juniper-os-software/contrail-analytics + num_units: 3 + bindings: + "": *oam-space + expose: true + options: + log-level: *contrail-log-level + docker-registry: *docker-registry + docker-user: *docker-user + docker-password: *docker-password + #docker_runtime_repo: *docker-repo + #docker_runtime_key_url: *docker-key + #temp change to apt until bug fixed around no_proxy + #https://github.com/Juniper/contrail-charms/issues/150 + #docker_runtime: apt + #docker_runtime: custom + image-tag: *image-tag + #https_proxy: *contrail-proxy + #http_proxy: *contrail-proxy + #no_proxy: *contrail-no-proxy + control-network: *contrail-control-net + haproxy-http-mode: "http" + min-cluster-size: 3 + # added to try to resolve issue with contrail-haproxy IJ Juniper case: 2020-0708-0220 + vip: *contrail-api-vip + to: + - 500 + - 501 + - 502 + + contrail-analytics-db: + charm: cs:~juniper-os-software/contrail-analyticsdb + num_units: 3 + bindings: + "": *oam-space + expose: true + options: + log-level: *contrail-log-level + docker-registry: *docker-registry + docker-user: *docker-user + docker-password: *docker-password + #docker_runtime_repo: *docker-repo + #docker_runtime_key_url: *docker-key + #temp change to apt until bug fixed around no_proxy + #https://github.com/Juniper/contrail-charms/issues/150 + #docker_runtime: apt + #docker_runtime: custom + image-tag: *image-tag + #https_proxy: *contrail-proxy + #http_proxy: *contrail-proxy + #no_proxy: *contrail-no-proxy + control-network: *contrail-control-net + cassandra-minimum-diskgb: "4" + cassandra-jvm-extra-opts: "-Xms2g -Xmx4g" + min-cluster-size: 3 + to: + - 503 + - 504 + - 505 + + keepalived: + charm: cs:~containers/keepalived + options: + virtual_ip: *contrail-api-vip + port: 8143 + + contrail-haproxy: + charm: cs:haproxy + num_units: 3 + bindings: + "": *oam-space + #changed: reverseproxy: *overlay-space + reverseproxy: *oam-space + website: *public-space + public: *public-space + options: + default_timeouts: >- + queue 60000, connect 5000, client 120000, server 120000 + services: "" + source: backports + peering_mode: "active-active" + enable_monitoring: True + ssl_cert: SELFSIGNED + to: + - lxd:500 + - lxd:501 + - lxd:502 + + contrail-controller: + charm: cs:~juniper-os-software/contrail-controller + num_units: 3 + bindings: + "": *oam-space + expose: true + options: + log-level: *contrail-log-level + docker-registry: *docker-registry + docker-user: *docker-user + docker-password: *docker-password + #docker_runtime_repo: *docker-repo + #docker_runtime_key_url: *docker-key + #temp change to apt until bug fixed around no_proxy + #https://github.com/Juniper/contrail-charms/issues/150 + #docker_runtime: apt + #docker_runtime: custom + image-tag: *image-tag + #https_proxy: *contrail-proxy + #http_proxy: *contrail-proxy + #no_proxy: *contrail-no-proxy + control-network: *contrail-control-net + #new data network as we are now splitting above function + #data-network: *contrail-data-net + auth-mode: rbac + cassandra-minimum-diskgb: "4" + cassandra-jvm-extra-opts: "-Xms1g -Xmx2g" + vip: *contrail-api-vip + #local-rabbitmq-hostname-resolution: True + haproxy-https-mode: tcp + haproxy-http-mode: http + bgp-asn: '65000' + min-cluster-size: 3 + to: + - 506 + - 507 + - 508 + + contrail-keystone-auth: + charm: cs:~juniper-os-software/contrail-keystone-auth + num_units: 3 + bindings: + "": *oam-space + to: + - lxd:503 + - lxd:504 + - lxd:505 + +relations: + - ["contrail-keystone-auth:identity-admin", "keystone:identity-admin"] + + - ["contrail-controller:contrail-auth", "contrail-keystone-auth:contrail-auth"] + - ["contrail-controller:contrail-analytics", "contrail-analytics:contrail-analytics"] + - ["contrail-controller:contrail-analyticsdb", "contrail-analytics-db:contrail-analyticsdb"] + - ["contrail-controller", "ntp"] + + - ["contrail-analytics:contrail-analyticsdb", "contrail-analytics-db:contrail-analyticsdb"] + - ["contrail-analytics", "ntp"] + + - ["contrail-analytics-db", "ntp"] + + - ["contrail-openstack:nova-compute", "nova-compute:neutron-plugin"] + - ["contrail-openstack:neutron-api", "neutron-api:neutron-plugin-api-subordinate"] + - ["contrail-openstack:heat-plugin", "heat:heat-plugin-subordinate"] + - ["contrail-openstack:contrail-controller", "contrail-controller:contrail-controller"] + + - ["contrail-agent:juju-info", "nova-compute:juju-info"] + - ["contrail-agent:contrail-controller", "contrail-controller:contrail-controller"] + - ["contrail-analytics:http-services", "contrail-haproxy:reverseproxy"] + - ["contrail-controller:http-services", "contrail-haproxy:reverseproxy"] + - ["contrail-controller:https-services", "contrail-haproxy:reverseproxy"] + - ["contrail-haproxy:juju-info", "keepalived:juju-info"] + + # added to support SSL on API + #- [ "contrail-agent:tls-certificates", "easyrsa:client" ] + #- [ "contrail-agent-dpdk:tls-certificates", "easyrsa:client" ] + #- [ "contrail-controller:tls-certificates", "easyrsa:client" ] + #- [ "contrail-analytics:tls-certificates", "easyrsa:client" ] + #- [ "contrail-analytics-db:tls-certificates", "easyrsa:client" ] + + - [ "contrail-controller:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + - [ "contrail-controller:juju-info", "telegraf:juju-info" ] + - [ "contrail-controller:juju-info", "filebeat:beats-host" ] + - [ "contrail-controller:juju-info", "landscape-client:container" ] + + - [ "contrail-analytics:juju-info", "telegraf:juju-info" ] + - [ "contrail-analytics:juju-info", "filebeat:beats-host" ] + - [ "contrail-analytics:juju-info", "landscape-client:container" ] + - [ "contrail-analytics:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + + - [ "contrail-analytics-db:juju-info", "telegraf:juju-info" ] + - [ "contrail-analytics-db:juju-info", "filebeat:beats-host" ] + - [ "contrail-analytics-db:juju-info", "landscape-client:container" ] + - [ "contrail-analytics-db:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + + - [ "contrail-haproxy:juju-info", "telegraf:juju-info" ] + - [ "contrail-haproxy:juju-info", "filebeat:beats-host" ] + - [ "contrail-haproxy:juju-info", "landscape-client:container" ] + - [ "contrail-haproxy:nrpe-external-master", "nrpe-container:nrpe-external-master" ] + + - [ "contrail-keystone-auth:juju-info", "telegraf:juju-info" ] + - [ "contrail-keystone-auth:juju-info", "filebeat:beats-host" ] + - [ "contrail-keystone-auth:juju-info", "landscape-client:container" ] + - [ "contrail-keystone-auth:nrpe-external-master", "nrpe-container:nrpe-external-master" ] diff --git a/config/overlays/contrail_versioned_overlay.yaml b/config/overlays/contrail_versioned_overlay.yaml new file mode 100644 index 0000000..457af6d --- /dev/null +++ b/config/overlays/contrail_versioned_overlay.yaml @@ -0,0 +1,21 @@ +applications: + contrail-agent: + charm: cs:~juniper-os-software/contrail-agent-22 + contrail-agent-dpdk: + charm: cs:~juniper-os-software/contrail-agent-22 + contrail-analytics: + charm: cs:~juniper-os-software/contrail-analytics-20 + contrail-analytics-db: + charm: cs:~juniper-os-software/contrail-analyticsdb-20 + contrail-controller: + charm: cs:~juniper-os-software/contrail-controller-21 + contrail-haproxy: + charm: cs:haproxy-55 + contrail-keystone-auth: + charm: cs:~juniper-os-software/contrail-keystone-auth-20 + contrail-openstack: + charm: cs:~juniper-os-software/contrail-openstack-23 + juniper-server: + charm: cs:ubuntu-15 + keepalived: + charm: cs:~containers/keepalived-28 diff --git a/config/overlays/hostnames.yaml b/config/overlays/hostnames.yaml new file mode 100644 index 0000000..6dda5b6 --- /dev/null +++ b/config/overlays/hostnames.yaml @@ -0,0 +1,59 @@ +--- +applications: + + aodh: + options: + os-public-hostname: aodh.example.com + os-internal-hostname: &aodh-int aodh-internal.example.com + os-admin-hostname: *aodh-int + ceilometer: + options: + os-public-hostname: ceilometer.example.com + os-internal-hostname: &ceilometer-int ceilometer-internal.example.com + os-admin-hostname: *ceilometer-int + cinder: + options: + os-public-hostname: cinder.example.com + os-internal-hostname: &cinder-int cinder-internal.example.com + os-admin-hostname: *cinder-int + openstack-dashboard: + options: + os-public-hostname: dashboard.example.com + glance: + options: + os-public-hostname: glance.example.com + os-internal-hostname: &glance-int glance-internal.example.com + os-admin-hostname: *glance-int + gnocchi: + options: + os-public-hostname: gnocchi.example.com + os-internal-hostname: &gnocchi-int gnocchi-internal.example.com + os-admin-hostname: *gnocchi-int + heat: + options: + os-public-hostname: heat.example.com + os-internal-hostname: &heat-int heat-internal.example.com + os-admin-hostname: *heat-int + keystone: + options: + os-public-hostname: keystone.example.com + os-internal-hostname: &keystone-int keystone-internal.example.com + os-admin-hostname: *keystone-int + neutron-api: + options: + os-public-hostname: neutron.example.com + os-internal-hostname: &neutron-int neutron-internal.example.com + os-admin-hostname: *neutron-int + nova-cloud-controller: + options: + os-public-hostname: nova.example.com + os-internal-hostname: &nova-int nova-internal.example.com + os-admin-hostname: *nova-int +# ceph-radosgw: +# options: +# os-public-hostname: swift.example.com +# os-internal-hostname: &swift-int swift-internal.example.com +# os-admin-hostname: *swift-int + vault: + options: + hostname: vault-internal.example.com diff --git a/config/overlays/ldap.yaml b/config/overlays/ldap.yaml new file mode 100644 index 0000000..8b4da0e --- /dev/null +++ b/config/overlays/ldap.yaml @@ -0,0 +1,106 @@ +--- + +applications: + ldap-domain1: + charm: cs:~openstack-charmers/ldap-test-fixture + bindings: + "": oam + num_units: 1 + to: + - lxd:100 + ldap-domain2: + charm: cs:~openstack-charmers/ldap-test-fixture + num_units: 1 + bindings: + "": oam + to: + - lxd:101 + ldap-domain3: + charm: cs:~openstack-charmers/ldap-test-fixture + num_units: 1 + bindings: + "": oam + to: + - lxd:102 + keystone-ldap-domain1: + charm: cs:keystone-ldap + options: + ldap-user: cn=admin,dc=test,dc=com + ldap-password: crapper + ldap-suffix: dc=test,dc=com + domain-name: domain1 + ldap-config-flags: >- + { + use_pool: true, + pool_size: 10, + pool_retry_max: 1, + user_tree_dn: "ou=users,dc=test,dc=com", + user_objectclass: "posixAccount", + user_id_attribute: uid, + user_name_attribute: uid, + user_attribute_ignore: userPassword, + query_scope: sub, + group_name_attribute: cn, + group_member_attribute: memberUid, + group_desc_attribute: description, + group_tree_dn: "ou=groups,dc=test,dc=com", + group_id_attribute: cn, + group_objectclass: "posixGroup", + group_members_are_ids: true, + } + keystone-ldap-domain2: + charm: cs:keystone-ldap + options: + ldap-user: cn=admin,dc=test,dc=com + ldap-password: crapper + ldap-suffix: dc=test,dc=com + domain-name: domain2 + ldap-config-flags: >- + { + use_pool: true, + pool_size: 10, + pool_retry_max: 1, + user_tree_dn: "ou=users,dc=test,dc=com", + user_objectclass: "posixAccount", + user_id_attribute: uid, + user_name_attribute: uid, + user_attribute_ignore: userPassword, + query_scope: sub, + group_name_attribute: cn, + group_member_attribute: member, + group_desc_attribute: description, + group_tree_dn: "ou=groups,dc=test,dc=com", + group_id_attribute: cn, + group_objectclass: "groupOfNames", + group_members_are_ids: false, + } + keystone-ldap-domain3: + charm: cs:keystone-ldap + options: + ldap-user: cn=admin,dc=test,dc=com + ldap-password: crapper + ldap-suffix: dc=test,dc=com + domain-name: domain3 + ldap-config-flags: >- + { + use_pool: true, + pool_size: 10, + pool_retry_max: 1, + user_tree_dn: "ou=users,dc=test,dc=com", + user_objectclass: "posixAccount", + user_id_attribute: uid, + user_name_attribute: uid, + user_attribute_ignore: userPassword, + query_scope: sub, + group_name_attribute: cn, + group_member_attribute: uniqueMember, + group_desc_attribute: description, + group_tree_dn: "ou=groups,dc=test,dc=com", + group_id_attribute: cn, + group_objectclass: "groupOfUniqueNames", + } + +relations: + - [ "keystone", "keystone-ldap-domain1" ] + - [ "keystone", "keystone-ldap-domain2" ] + - [ "keystone", "keystone-ldap-domain3" ] diff --git a/config/overlays/openstack_versioned_overlay.yaml b/config/overlays/openstack_versioned_overlay.yaml new file mode 100644 index 0000000..8637035 --- /dev/null +++ b/config/overlays/openstack_versioned_overlay.yaml @@ -0,0 +1,149 @@ +applications: + aodh: + charm: cs:aodh-35 +# bcache-tuning: +# charm: cs:bcache-tuning-2 +# canonical-livepatch: +# charm: cs:canonical-livepatch-34 + ceilometer: + charm: cs:ceilometer-268 + ceilometer-agent: + charm: cs:ceilometer-agent-258 + ceph-mon: + charm: cs:ceph-mon-44 + ceph-osd: + charm: cs:ceph-osd-294 +# ceph-osd2: +# charm: cs:ceph-osd-294 +# ceph-radosgw: +# charm: cs:ceph-radosgw-283 + cinder: + charm: cs:cinder-297 + cinder2: + charm: cs:cinder-297 + cinder-ceph: + charm: cs:cinder-ceph-251 + easyrsa: + charm: cs:~containers/easyrsa-296 + elasticsearch: + charm: cs:elasticsearch-39 + etcd: + charm: cs:etcd-488 +# external-policy-routing: +# charm: cs:~canonical-bootstack/policy-routing-3 + filebeat: + charm: cs:filebeat-29 + glance: + charm: cs:glance-292 + gnocchi: + charm: cs:gnocchi-32 + grafana: + charm: cs:~prometheus-charmers/grafana-38 + graylog: + charm: cs:graylog-49 + graylog-mongodb: + charm: cs:mongodb-53 + hacluster-aodh: + charm: cs:hacluster-63 + hacluster-cinder: + charm: cs:hacluster-63 + hacluster-cinder2: + charm: cs:hacluster-63 + hacluster-glance: + charm: cs:hacluster-63 + hacluster-gnocchi: + charm: cs:hacluster-63 + hacluster-heat: + charm: cs:hacluster-63 + hacluster-horizon: + charm: cs:hacluster-63 + hacluster-keystone: + charm: cs:hacluster-63 + hacluster-mysql: + charm: cs:hacluster-63 + hacluster-neutron: + charm: cs:hacluster-63 + hacluster-nova: + charm: cs:hacluster-63 +# hacluster-radosgw: +# charm: cs:hacluster-63 + hacluster-vault: + charm: cs:hacluster-63 + heat: + charm: cs:heat-271 + keystone-ldap-domain1: + charm: cs:keystone-ldap-23 + keystone-ldap-domain2: + charm: cs:keystone-ldap-23 + keystone-ldap-domain3: + charm: cs:keystone-ldap-23 + juniper-server: + charm: cs:ubuntu-15 + keystone: + charm: cs:keystone-309 + landscape-client: + charm: cs:landscape-client-32 + landscape-haproxy: + charm: cs:haproxy-55 + landscape-postgresql: + charm: cs:postgresql-199 + landscape-rabbitmq-server: + charm: cs:rabbitmq-server-97 + landscape-server: + charm: cs:landscape-server-38 +# lldpd: +# charm: cs:lldpd-0 + memcached: + charm: cs:memcached-26 + mysql: + charm: cs:percona-cluster-282 + nagios: + charm: cs:nagios-35 +# ncitest-ldap: +# charm: cs:keystone-ldap-23 + neutron-gateway: + charm: cs:neutron-gateway-276 + neutron-openvswitch: + charm: cs:neutron-openvswitch-269 + neutron-api: + charm: cs:neutron-api-282 + nova-cloud-controller: + charm: cs:nova-cloud-controller-340 + nova-compute: + charm: cs:nova-compute-311 +# nrpe-compute: +# charm: cs:nrpe-61 + nrpe-container: + charm: cs:nrpe-61 + nrpe-host: + charm: cs:nrpe-61 +# nrpe-kvm: +# charm: cs:nrpe-61 + ntp: + charm: cs:ntp-37 + openstack-dashboard: + charm: cs:openstack-dashboard-297 + openstack-service-checks: + charm: cs:~canonical-bootstack/openstack-service-checks-30 + prometheus: + charm: cs:prometheus2-12 + prometheus-ceph-exporter: + charm: cs:prometheus-ceph-exporter-5 + prometheus-openstack-exporter: + charm: cs:prometheus-openstack-exporter-10 + rabbitmq-server: + charm: cs:rabbitmq-server-97 + sysconfig-compute: + charm: cs:sysconfig-2 +# sysconfig-storage: +# charm: cs:sysconfig-2 + sysconfig-control: + charm: cs:sysconfig-2 + telegraf: + charm: cs:telegraf-30 + telegraf-prometheus: + charm: cs:telegraf-30 +# thruk-agent: +# charm: cs:thruk-agent-2 + vault: + charm: cs:vault-32 diff --git a/config/overlays/openstack_versioned_overlay_focal.yaml b/config/overlays/openstack_versioned_overlay_focal.yaml new file mode 100644 index 0000000..b031c3d --- /dev/null +++ b/config/overlays/openstack_versioned_overlay_focal.yaml @@ -0,0 +1,221 @@ +applications: + aodh: + charm: cs:aodh-48 + appformix-advanced-routing: + charm: cs:advanced-routing-5 + ccom-server: + charm: cs:ubuntu-18 + appformix-server: + charm: cs:ubuntu-18 + bcache-tuning: + charm: cs:bcache-tuning-6 + canonical-livepatch: + charm: cs:canonical-livepatch-42 + ceilometer: + charm: cs:ceilometer-282 + ceilometer-agent: + charm: cs:ceilometer-agent-271 + ceph-mon: + charm: cs:ceph-mon-55 + ceph-mon2: + charm: cs:ceph-mon-55 + ceph-osd: + charm: cs:ceph-osd-310 + ceph-osd2: + charm: cs:ceph-osd-310 + ceph-radosgw: + charm: cs:ceph-radosgw-296 + cinder: + charm: cs:cinder-310 + cinder-ceph: + charm: cs:cinder-ceph-262 # upgrade to support availability-zone specification + cinder-ceph2: + charm: cs:cinder-ceph-262 # upgrade to support availability-zone specification + cinder-infinidat: + charm: cs:~bt-charmers/cinder-infinidat-19 + contrail-agent: + charm: cs:~juniper-os-software/contrail-agent-27 + contrail-agent-dpdk: + charm: cs:~juniper-os-software/contrail-agent-27 + contrail-analytics: + charm: cs:~juniper-os-software/contrail-analytics-25 + contrail-analytics-db: + charm: cs:~juniper-os-software/contrail-analyticsdb-25 + contrail-controller: + charm: cs:~juniper-os-software/contrail-controller-27 + contrail-haproxy: + charm: cs:haproxy-61 + contrail-keystone-auth: + charm: cs:~juniper-os-software/contrail-keystone-auth-26 + contrail-openstack: + charm: cs:~juniper-os-software/contrail-openstack-28 + controller-server: + charm: cs:ubuntu-18 + easyrsa: + charm: cs:~containers/easyrsa-408 + elasticsearch: + charm: cs:elasticsearch-49 + etcd: + charm: cs:etcd-583 + external-advanced-routing: + charm: cs:advanced-routing-5 + filebeat: + charm: cs:filebeat-33 + glance: + charm: cs:glance-305 + gnocchi: + charm: cs:gnocchi-46 + grafana: + charm: cs:grafana-49 + graylog: + charm: cs:graylog-47 + graylog-mongodb: + charm: cs:mongodb-59 + hacluster-aodh: + charm: cs:hacluster-76 + hacluster-cinder: + charm: cs:hacluster-76 + hacluster-glance: + charm: cs:hacluster-76 + hacluster-gnocchi: + charm: cs:hacluster-76 + hacluster-heat: + charm: cs:hacluster-76 + hacluster-horizon: + charm: cs:hacluster-76 + hacluster-placement: + charm: cs:hacluster-76 + hacluster-keystone: + charm: cs:hacluster-76 + hacluster-manila: + charm: cs:hacluster-76 + hacluster-neutron: + charm: cs:hacluster-76 + hacluster-nova: + charm: cs:hacluster-76 + hacluster-radosgw: + charm: cs:hacluster-76 + hacluster-vault: + charm: cs:hacluster-76 + heat: + charm: cs:heat-283 + infinidat-tools: + charm: cs:~bt-charmers/infinidat-tools-8 + juniper-server: + charm: cs:ubuntu-18 + keepalived: + charm: cs:~containers/keepalived-98 + keystone: + charm: cs:keystone-323 + landscape-client: + charm: cs:landscape-client-35 + landscape-haproxy: + charm: cs:haproxy-61 + landscape-postgresql: + charm: cs:postgresql-233 + landscape-rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server-438 # attempted fix for LP#1939702 + landscape-server: + charm: cs:landscape-server-39 + lldpd: + charm: cs:lldpd-9 + manila: + charm: cs:manila-30 + manila-dashboard: + charm: cs:~openstack-charmers/manila-dashboard-2 + manila-infinidat: + charm: cs:~bt-charmers/manila-infinidat-14 + memcached: + charm: cs:memcached-32 + mysql-innodb-cluster: + charm: cs:mysql-innodb-cluster-11 + aodh-mysql-router: + charm: cs:mysql-router-11 + keystone-mysql-router: + charm: cs:mysql-router-11 + cinder-mysql-router: + charm: cs:mysql-router-11 + glance-mysql-router: + charm: cs:mysql-router-11 + gnocchi-mysql-router: + charm: cs:mysql-router-11 + heat-mysql-router: + charm: cs:mysql-router-11 + nova-cloud-controller-mysql-router: + charm: cs:mysql-router-11 + neutron-api-mysql-router: + charm: cs:mysql-router-11 + openstack-dashboard-mysql-router: + charm: cs:mysql-router-11 + placement-mysql-router: + charm: cs:mysql-router-11 + vault-mysql-router: + charm: cs:mysql-router-11 + manila-mysql-router: + charm: cs:mysql-router-11 + nagios: + charm: cs:nagios-44 + inf-ldap: + charm: cs:keystone-ldap-35 + int-ldap: + charm: cs:keystone-ldap-35 + neutron-api: + charm: cs:neutron-api-294 + placement: + charm: cs:placement-19 + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller-549 # bug LP#1928992 Placement endpoints not being updated, or perhaps regressing to n-c-c endpoints, even after deploying placement service for train + nova-compute-kvm: + charm: cs:nova-compute-327 + nova-compute-kvm-dpdk: + charm: cs:nova-compute-327 + nrpe-compute-kvm: + charm: cs:nrpe-73 + nrpe-compute-kvm-dpdk: + charm: cs:nrpe-73 + nrpe-container: + charm: cs:nrpe-73 + #nrpe-contrail: + # charm: cs:nrpe-61 + nrpe-host: + charm: cs:nrpe-73 + nrpe-host-ceph-osd: + charm: cs:nrpe-73 + nrpe-kvm: + charm: cs:nrpe-73 + nrpe-kvm-appformix: + charm: cs:nrpe-73 + ntp: + charm: cs:ntp-46 + openstack-dashboard: + charm: cs:openstack-dashboard-313 + openstack-service-checks: + #charm: cs:openstack-service-checks-4 # this breaks SSL because of certifi bug 1924816 + charm: cs:~llama-charmers-next/openstack-service-checks-12 + prometheus: + charm: cs:prometheus2-22 + prometheus-ceph-exporter: + charm: cs:prometheus-ceph-exporter-13 + prometheus-openstack-exporter: + charm: cs:prometheus-openstack-exporter-17 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server-438 # attempted fix for LP#1939702 + #charm: cs:rabbitmq-server-110 + sysconfig-compute: + charm: cs:sysconfig-2 + sysconfig-compute-dpdk: + charm: cs:sysconfig-2 + sysconfig-control: + charm: cs:sysconfig-2 + sysconfig-storage: + charm: cs:sysconfig-2 + telegraf: + charm: cs:telegraf-41 + telegraf-appformix: + charm: cs:telegraf-41 + telegraf-prometheus: + charm: cs:telegraf-41 + thruk-agent: + charm: cs:thruk-agent-10 + vault: + charm: cs:vault-46 diff --git a/config/overlays/openstack_versioned_overlay_gemini.yaml b/config/overlays/openstack_versioned_overlay_gemini.yaml new file mode 100644 index 0000000..c1aa639 --- /dev/null +++ b/config/overlays/openstack_versioned_overlay_gemini.yaml @@ -0,0 +1,141 @@ +applications: + aodh: + charm: cs:aodh-27 +# bcache-tuning: +# charm: cs:~james-page/bcache-tuning-10 +# canonical-livepatch: +# charm: cs:canonical-livepatch-32 + ceilometer: + charm: cs:ceilometer-262 + ceilometer-agent: + charm: cs:ceilometer-agent-252 + ceph-mon: + charm: cs:ceph-mon-38 + ceph-osd: + charm: cs:ceph-osd-285 +# ceph-radosgw: +# charm: cs:ceph-radosgw-271 + cinder: + charm: cs:cinder-297 + cinder-ceph: + charm: cs:cinder-ceph-243 + easyrsa: + charm: cs:~containers/easyrsa-254 + elasticsearch: + charm: cs:elasticsearch-37 + etcd: + charm: cs:etcd-434 +# external-policy-routing: +# charm: cs:~canonical-bootstack/policy-routing-3 + filebeat: + charm: cs:filebeat-24 + glance: + charm: cs:glance-290 + gnocchi: + charm: cs:gnocchi-23 + grafana: + charm: cs:~prometheus-charmers/grafana-33 + graylog: + charm: cs:graylog-32 + graylog-mongodb: + charm: cs:mongodb-52 + hacluster-aodh: + charm: cs:hacluster-55 + hacluster-cinder: + charm: cs:hacluster-55 + hacluster-glance: + charm: cs:hacluster-55 + hacluster-gnocchi: + charm: cs:hacluster-55 + hacluster-heat: + charm: cs:hacluster-55 + hacluster-horizon: + charm: cs:hacluster-55 + hacluster-keystone: + charm: cs:hacluster-55 + hacluster-mysql: + charm: cs:hacluster-55 + hacluster-neutron: + charm: cs:hacluster-55 + hacluster-nova: + charm: cs:hacluster-55 +# hacluster-radosgw: +# charm: cs:hacluster-55 + hacluster-vault: + charm: cs:hacluster-55 + heat: + charm: cs:heat-263 + keepalived: + charm: cs:~containers/keepalived-28 + keystone-ldap-domain1: + charm: cs:keystone-ldap-18 + keystone-ldap-domain2: + charm: cs:keystone-ldap-18 + keystone-ldap-domain3: + charm: cs:keystone-ldap-18 + juniper-server: + charm: cs:ubuntu-15 + keystone: + charm: cs:keystone-309 + landscape-client: + charm: cs:landscape-client-32 + landscape-haproxy: + charm: cs:haproxy-55 + landscape-postgresql: + charm: cs:postgresql-199 + landscape-rabbitmq-server: + charm: cs:rabbitmq-server-89 + landscape-server: + charm: cs:landscape-server-33 +# lldpd: +# charm: cs:~ivoks/lldpd-5 + memcached: + charm: cs:memcached-23 + mysql: + charm: cs:percona-cluster-276 + nagios: + charm: cs:nagios-33 + neutron-gateway: + charm: cs:neutron-gateway-276 + neutron-openvswitch: + charm: cs:neutron-openvswitch-269 + neutron-api: + charm: cs:neutron-api-281 + nova-cloud-controller: + charm: cs:nova-cloud-controller-339 + nova-compute: + charm: cs:nova-compute-302 +# nrpe-compute: +# charm: cs:nrpe-58 + nrpe-container: + charm: cs:nrpe-58 + nrpe-host: + charm: cs:nrpe-58 +# nrpe-kvm: +# charm: cs:nrpe-58 + ntp: + charm: cs:ntp-32 + openstack-dashboard: + charm: cs:openstack-dashboard-288 + openstack-service-checks: + charm: cs:~canonical-bootstack/openstack-service-checks-22 + prometheus: + charm: cs:prometheus2-10 + prometheus-ceph-exporter: + charm: cs:prometheus-ceph-exporter-5 + prometheus-openstack-exporter: + charm: cs:prometheus-openstack-exporter-10 + rabbitmq-server: + charm: cs:rabbitmq-server-89 + sysconfig-compute: + charm: cs:sysconfig-1 + sysconfig-control: + charm: cs:sysconfig-1 + telegraf: + charm: cs:telegraf-29 + telegraf-prometheus: + charm: cs:telegraf-29 +# thruk-agent: +# charm: cs:thruk-agent-2 + vault: + charm: cs:vault-24 diff --git a/config/overlays/ovs.yaml b/config/overlays/ovs.yaml new file mode 100644 index 0000000..29fb4b3 --- /dev/null +++ b/config/overlays/ovs.yaml @@ -0,0 +1,22 @@ +--- +variables: + nova-default-filters: &nova-default-filters >- + RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter, + ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter, + ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,DifferentHostFilter, + SameHostFilter,AggregateInstanceExtraSpecsFilter,NUMATopologyFilter, + AggregateCoreFilter,DiskFilter + vlan-ranges: &vlan-ranges physnet1:350:599 + +applications: + nova-cloud-controller: + options: + scheduler-default-filters: *nova-default-filters + nova-compute: + options: + # AppArmor needs to be disabled: LP:1820302 + aa-profile-mode: disable + neutron-api: + options: + vlan-ranges: *vlan-ranges + diff --git a/config/overlays/resources.yaml b/config/overlays/resources.yaml new file mode 100644 index 0000000..4ccf468 --- /dev/null +++ b/config/overlays/resources.yaml @@ -0,0 +1,7 @@ + +applications: + keystone: + options: + use-policyd-override: true + resources: + policyd-override: ../resources/keystone.zip diff --git a/config/overlays/ssl.yaml b/config/overlays/ssl.yaml new file mode 100644 index 0000000..29da832 --- /dev/null +++ b/config/overlays/ssl.yaml @@ -0,0 +1,124 @@ +--- +variables: + ssl_ca: &ssl_ca >- + include-base64://../ssl/cacert.pem + ssl_crt: &ssl_crt >- + include-base64://../ssl/servercert.pem + ssl_key: &ssl_key >- + include-base64://../ssl/serverkey.pem + +applications: + aodh: + options: + ssl_ca: *ssl_ca + ssl_cert: *ssl_crt + ssl_key: *ssl_key + ceilometer: + options: + ssl_ca: *ssl_ca + cinder: + options: + ssl_ca: *ssl_ca + ssl_cert: *ssl_crt + ssl_key: *ssl_key + openstack-dashboard: + options: + ssl_ca: *ssl_ca + ssl_cert: *ssl_crt + ssl_key: *ssl_key + glance: + options: + ssl_ca: *ssl_ca + ssl_cert: *ssl_crt + ssl_key: *ssl_key + gnocchi: + options: + ssl_ca: *ssl_ca + ssl_cert: *ssl_crt + ssl_key: *ssl_key + heat: + options: + ssl_ca: *ssl_ca + ssl_cert: *ssl_crt + ssl_key: *ssl_key + keystone: + options: + ssl_ca: *ssl_ca + ssl_cert: *ssl_crt + ssl_key: *ssl_key + neutron-api: + options: + ssl_ca: *ssl_ca + ssl_cert: *ssl_crt + ssl_key: *ssl_key + nova-cloud-controller: + options: + ssl_ca: *ssl_ca + ssl_cert: *ssl_crt + ssl_key: *ssl_key + console-ssl-cert: *ssl_crt + console-ssl-key: *ssl_key +# ceph-radosgw: +# options: +# ssl_ca: *ssl_ca +# ssl_cert: *swift_crt +# ssl_key: *swift_key + openstack-service-checks: + options: + trusted_ssl_ca: *ssl_ca + prometheus-openstack-exporter: + options: + ssl_ca: *ssl_ca +# vault: +# options: +# ssl-ca: *ssl_ca +# ssl-cert: *ssl_crt +# ssl-key: *ssl_key + nagios: + options: + ssl_chain: *ssl_ca + ssl_cert: *ssl_crt + ssl_key: *ssl_key + ssl: 'on' + landscape-client: + options: + # this charm expects that the variable startswith base64: string. The rest is identical to the *ssl_ca value. + ssl-public-key: "base64:LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZqekNDQTNlZ0F3SUJBZ0lVWGdQbWZkdktY +YXVNbHNqTjhVeTJuTWdhamF3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1Z6RUxNQWtHQTFVRUJoTUNS +MEl4RURBT0JnTlZCQWdNQjBWdVoyeGhibVF4RHpBTkJnTlZCQWNNQmt4dgpibVJ2YmpFVk1CTUdB +MVVFQ2d3TVZXSjFiblIxSUVOc2IzVmtNUTR3REFZRFZRUUxEQVZEYkc5MVpEQWVGdzB5Ck1UQXlN +VEV4TWpRNU1URmFGdzB5TWpBeU1URXhNalE1TVRGYU1GY3hDekFKQmdOVkJBWVRBa2RDTVJBd0Rn +WUQKVlFRSURBZEZibWRzWVc1a01ROHdEUVlEVlFRSERBWk1iMjVrYjI0eEZUQVRCZ05WQkFvTURG +VmlkVzUwZFNCRApiRzkxWkRFT01Bd0dBMVVFQ3d3RlEyeHZkV1F3Z2dJaU1BMEdDU3FHU0liM0RR +RUJBUVVBQTRJQ0R3QXdnZ0lLCkFvSUNBUURpV2tiK3luRnBDVXgxakxlaFM0SUl4MDBjTm51OU81 +eTRSbExxZFQ1TXltQzZFRVdBK0RvRnA5VEMKRW10R3ViUWdXNklSVHlJRi9hKytZNGFlRG0vU0NW +TEJ1OWZGZSt3WVdEbDU1L1F6SUJVMUhkWEsrdnBIRlB3SQpVQjRKQlZEUi9UWWZmWG5IMmJTVnMx +eE8xOVRwb2krZ0d4OW5UaEpsMVpyN2pNRkpXbmlXUG5FdEVuSG51Z3owCll0VkRUbEp3OHo2ZmVN +ME1GQlprcWZmZmxoTEpYMlA1OURQM2ZXV1lzcEFSM29TemFwb1JsbURQbHZnZDU0T28KZnJubFlK +cTdyWTRVTmdYYXpUNkZPQklHcDVOTVk1S0M1dXBpbVVEdzZlbTZubW10TjVuZUhkdTR3YlBsRVp6 +Nwp1a2o1cFFCREcrOHJpazJhSFB3VkFaOXNZaTNJUXdCc0lvL1pwMHpGeGhkbnNXTzltanNXWWpY +RkJaaFg4UzZzCmZZUFZlSXd1VEsrenZMbkxxZmpCUHBKTG11Um5HT0VhT1RhME9UUUtHZHZncDhX +eFJjc2Y2VXVhTFduUWRURkMKNjFzOE1jVG5GbXhhdWg0TmhmVVYvZURjTlRxU29pZHlDM0lUVmdR +eWJyTis4UW52cnlKcjNycjFVcDl1MzZDRwpJN3dqOHY3dE8vU0tId2ZPcHQ2UmhGNFBrWVRyNnQy +TmR3cURsWnBFMFBTOU5Wbk91S2dubzZmVkpKK1dvaEhxCktTM2Vnckw1elNRbTdDZ2JUTEJyR3NS +UnVoNitVRjVhOWtnZGpkVHVTT1BGYlBLa283djE3VndqQ0VYSlZJUlcKSEF6OW1nd0RVTTNjOU9Z +cEZ2TCs0ZUNVbWc3Mm82djh2NUpGd2JDMUNqdkdMZW5ON1FJREFRQUJvMU13VVRBZApCZ05WSFE0 +RUZnUVVNWlZ5NndacERTR1dZOTQxcm5CUzdJTzgxMVF3SHdZRFZSMGpCQmd3Rm9BVU1aVnk2d1pw +CkRTR1dZOTQxcm5CUzdJTzgxMVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBC +QVFzRkFBT0MKQWdFQU1TY0xMSFdRd1B3Y3YycTFrREkzdkg4bWNVU1ZFaG50VS9sVzNVeXh3eldi +VzZZVjJ5WFVtUmpKMlU1dApLRzc3RE5hUXF6T09HcEUwSjJuTDdlcHRoUW01SU1pNW5XNFFUa05C +YVVhOS9QQkk1UkxWZGZCRkZFT3NjQ3VtCnFqeEFQdWdYZHFJMWQreEFmZU9SNHcrY1E1UFNvVW1P +b3JXSzVZQ1BQaU1SVVJnVU50MGNZbjFGR0QzN2FIMUUKeng5RnN1U3lGekhBU1hMNCtYSFNYaHNB +MzJnRTA5SHIyUzRlbytSWUIwbnNreHlCWTBibG55c2d4QXFXMkFGSQo2aGhubTE5SldkZUdSUFBB +ZnRlWFRmNzlqcEF2Rmc2YWZmeWtaalZ4NDR1WSt5TCtNUTR3eEx2WHlrU2MxV3lJCkFwaG9HWkRW +WGxJZVorMVRLVnVXdnZPZlBveVJyYVRyeUtkRGtzNThzaWxBeWV4My9XTktidXRyRUN4V2Jvc0sK +LzkzWExRWnlVdmx0b2JncVpEWDF0Rk9NTDN3ZkdCZzl4dTczMTJ3NXhvN2c1WlFnT0lJaERkdzRw +OGlrekJHaQorZXdvNnhIblZEaE9UWGFCcmtuN3Rwbnh0NGRxQUdMdVJTRC9NV1crTTZXZThwNlI5 +TkRLWlBwdDdmVVk1NFlvCm9ISzlSYVMyeHZ3UGxWYjNqaHVKbkxDRWQ0eUdBQnEvcDhlWTZVV01P +TXBqbEFzT3FDN2pnSlRCc3hySnQvdnEKK3RYdDBZdVVTUW5seHBNV0hnVVdFcVMxU2t2R09JVjhZ +MTdGQXphVDRDV3VtSEJlUnFpNS83R2hKcUtSa05VZQo4NENDOWJQMkswN3ZkbGp1blNlR1dCbTJW +YXY3STdQQ0RoK0dQVmp2QnE1T2QvUT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + landscape-haproxy: + options: + ssl_cert: *ssl_crt + ssl_key: *ssl_key diff --git a/config/overlays/stsstack.yaml b/config/overlays/stsstack.yaml new file mode 100644 index 0000000..78f1fa4 --- /dev/null +++ b/config/overlays/stsstack.yaml @@ -0,0 +1,9 @@ +--- +applications: + neutron-api: + options: + flat-network-providers: physnet1 + enable-vlan-trunking: true + keystone: + options: + admin-password: openstack diff --git a/config/ssl/cacert.pem b/config/ssl/cacert.pem new file mode 100644 index 0000000..795d716 --- /dev/null +++ b/config/ssl/cacert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFjzCCA3egAwIBAgIUXgPmfdvKXauMlsjN8Uy2nMgajawwDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCR0IxEDAOBgNVBAgMB0VuZ2xhbmQxDzANBgNVBAcMBkxv +bmRvbjEVMBMGA1UECgwMVWJ1bnR1IENsb3VkMQ4wDAYDVQQLDAVDbG91ZDAeFw0y +MTAyMTExMjQ5MTFaFw0yMjAyMTExMjQ5MTFaMFcxCzAJBgNVBAYTAkdCMRAwDgYD +VQQIDAdFbmdsYW5kMQ8wDQYDVQQHDAZMb25kb24xFTATBgNVBAoMDFVidW50dSBD +bG91ZDEOMAwGA1UECwwFQ2xvdWQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQDiWkb+ynFpCUx1jLehS4IIx00cNnu9O5y4RlLqdT5MymC6EEWA+DoFp9TC +EmtGubQgW6IRTyIF/a++Y4aeDm/SCVLBu9fFe+wYWDl55/QzIBU1HdXK+vpHFPwI +UB4JBVDR/TYffXnH2bSVs1xO19Tpoi+gGx9nThJl1Zr7jMFJWniWPnEtEnHnugz0 +YtVDTlJw8z6feM0MFBZkqffflhLJX2P59DP3fWWYspAR3oSzapoRlmDPlvgd54Oo +frnlYJq7rY4UNgXazT6FOBIGp5NMY5KC5upimUDw6em6nmmtN5neHdu4wbPlEZz7 +ukj5pQBDG+8rik2aHPwVAZ9sYi3IQwBsIo/Zp0zFxhdnsWO9mjsWYjXFBZhX8S6s +fYPVeIwuTK+zvLnLqfjBPpJLmuRnGOEaOTa0OTQKGdvgp8WxRcsf6UuaLWnQdTFC +61s8McTnFmxauh4NhfUV/eDcNTqSoidyC3ITVgQybrN+8QnvryJr3rr1Up9u36CG +I7wj8v7tO/SKHwfOpt6RhF4PkYTr6t2NdwqDlZpE0PS9NVnOuKgno6fVJJ+WohHq +KS3egrL5zSQm7CgbTLBrGsRRuh6+UF5a9kgdjdTuSOPFbPKko7v17VwjCEXJVIRW +HAz9mgwDUM3c9OYpFvL+4eCUmg72o6v8v5JFwbC1CjvGLenN7QIDAQABo1MwUTAd +BgNVHQ4EFgQUMZVy6wZpDSGWY941rnBS7IO811QwHwYDVR0jBBgwFoAUMZVy6wZp +DSGWY941rnBS7IO811QwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AgEAMScLLHWQwPwcv2q1kDI3vH8mcUSVEhntU/lW3UyxwzWbW6YV2yXUmRjJ2U5t +KG77DNaQqzOOGpE0J2nL7epthQm5IMi5nW4QTkNBaUa9/PBI5RLVdfBFFEOscCum +qjxAPugXdqI1d+xAfeOR4w+cQ5PSoUmOorWK5YCPPiMRURgUNt0cYn1FGD37aH1E +zx9FsuSyFzHASXL4+XHSXhsA32gE09Hr2S4eo+RYB0nskxyBY0blnysgxAqW2AFI +6hhnm19JWdeGRPPAfteXTf79jpAvFg6affykZjVx44uY+yL+MQ4wxLvXykSc1WyI +AphoGZDVXlIeZ+1TKVuWvvOfPoyRraTryKdDks58silAyex3/WNKbutrECxWbosK +/93XLQZyUvltobgqZDX1tFOML3wfGBg9xu7312w5xo7g5ZQgOIIhDdw4p8ikzBGi ++ewo6xHnVDhOTXaBrkn7tpnxt4dqAGLuRSD/MWW+M6We8p6R9NDKZPpt7fUY54Yo +oHK9RaS2xvwPlVb3jhuJnLCEd4yGABq/p8eY6UWMOMpjlAsOqC7jgJTBsxrJt/vq ++tXt0YuUSQnlxpMWHgUWEqS1SkvGOIV8Y17FAzaT4CWumHBeRqi5/7GhJqKRkNUe +84CC9bP2K07vdljunSeGWBm2Vav7I7PCDh+GPVjvBq5Od/Q= +-----END CERTIFICATE----- diff --git a/config/ssl/copycerts.sh b/config/ssl/copycerts.sh new file mode 100755 index 0000000..c8d98ff --- /dev/null +++ b/config/ssl/copycerts.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +CERT_DIR=~/stsstack-bundles/openstack/ssl/openstack-ssl-test/results + +cp $CERT_DIR/servercert.pem . +cp $CERT_DIR/serverkey.pem . +cp $CERT_DIR/cacert.pem . diff --git a/config/ssl/privkey.pem b/config/ssl/privkey.pem new file mode 100644 index 0000000..8eccc14 --- /dev/null +++ b/config/ssl/privkey.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDYxKy1xtJoFBdL +LwniTgmfE0fhy3W/W07rf3sNSIADIy+yZjAtfCOTEbmUnOWA4iWTAwrnnJo/CcAz +4hKzHS8BG2RFPplKjNz2VxItfIZlO6WnaLJ9JSEhgrB3ipfbL7a0xWvazhsUWYfl +5SLvElYQRx12Q1z6K44P6NKFcQFvbhAL6aeWxeRD4Eg5mDD5nhHS6hYc96zBg0Qh +QW/vzusZCtwSBxujZKYREj8xdFQdjutF+Pp+w+axlR0IgWZ1bVMYyL3Mag7Fy8db +zryBZZgEORg89zThaF0cnrtqQeHA9vbsfxlS55Dqp26m40xU0TcP/7kqO3A0aUOx +bmp5H5qaQwO7uXxS3s4nD8n08ZDWC/M9vAb6gBvEuUzdNR9mWjiegJbjdiRhrkNM +7tzdX5oFG3LjWr5Ic+JPaHH2WKntuKdTTwAZz1Mw6KmX5hjxJuz8xkLUZ9n02C1d +v+HF6q+5OfUmX9vgjDC/7fm0yEVP7aEP7najcBq2QtuEtktFpr19ICVGWXa7NQM8 +OLPDsOMqwbw/nCa7QddV6+Jib/j+XYGlXhX2X8Hr4/+/rJO2d0vxCy1GHZmHBDHn +Ip4nu29AsSCfZH27LkAcLXHFYOBFBlRBdJPdzgX4K5f+abzjXTYN6vGVLWNrofez +33aAveDT5Tr4Hu9Foo3AzAuxOY8n6QIDAQABAoICABeFJ5RFsXjXjTt8LB9hBkw7 +ZFZMpEcSGjsVRv3G+xTcEYXS7rFQwe9oQfGtIH3ei+MqtlkxNc9XOIKoqgzDQuFk +3nlg9HMRW7xi8Db9Rvve//dtNci8aZ7gNlOSNy3yPGna7fv81xVbLZK1TRgRrkLv +5HN7lbATX8O8KHOQMm6ry5PvU17ZtA3lXRDawZ2kGB0Bh0q3WQhaPN864XFsyzeZ +ZQ2Ttd0Bw78gcrjj/WiDpGWgQVXV3ccw/ch5eeuN5DFRPr7aVyYBGW2NvghwRJFY +vJeI3zUCBPBhDMbXxS4R3fMFS3Miayf66Ne03Ahr9wSGY8oATwlwzoaHC4h+9GBL +eaDkW9pv8WUiNGl/XYF4CgrW5hdOGWn/vt7NzaloO2zfzVhdjjtRVQYB2YO5AMw4 +Zr8LozKpjhjDeG/0rCOxnYE6SBNmtOGvGA1X3SOqfNqLwC01kmT7bKJGDvSjXbX0 +suxg8HoVfaKJ4jTqU1mcdk/YqTKMvHEAOmJC7QK/xGOWXVJeA+uqgK2UgOZs7pOu +torSJtJWXdBrrx15oXq7axpEYObk06U/4307gpq0hjjnxuHTqv+TWM49VbA4VSHf +8qs4Det2d7/llk80iwyH/WG7KkO/WSFScAtHuSrddD0T8vnNy4mkVwd7LDAYLumc +LghK1R+qhNaxzy7TmATNAoIBAQDwUldJCjWe3tPoTmnC+oYfaN+R7ZPKeE1dyI7q +3E5xdmHksxPInq8HxNAG0kDVewugq7vwD7b1GyxkgMho3Encx2I6f0U6Wl8UmHx9 ++IT18upLi9OK/lfhjYWHJU4Xs3wMwmN1UOMOUO0lc2sIAy0QaUmMrf64QiALQkod +DsOFKVPne/MR20Emj6IKVOtD/k105PNj8B9tQv9Aekecslo8kI3njbchVZ59Ws2S +nesZhshKI9M7uK6DuXmlQT5AJq5H6KMQ4uSWRr81qSR2sAKI8MjplVeRoCifmpur +J/FetS+GFTDbIi1bthGinz8jTJSHyKoUQLX/QjcIzLLywMefAoIBAQDm6PbIMDRe +k88CtNTPp0947J4jusjN4IUILiL+P4WSHIW3yEz/1YQCZCjHFQfyUupQ8k10m/sz +p0PKnzoQoeLeYz3OE0GQPuYPw+f3CBEyAI0TVMuXSHV6Lyx5WVVcK7+FFHtsi9E/ +iA2NtXTApRFpHmtfn0VtU0ALMi17tMMMVVz/v3B2yzBiUgv47amFzmn63BELsYh0 +j3xzxDl4jmCPLnKrzpUUNgLAp7ag5WgJPlJVsN2arOyT5aHdjxzjvpMFJ0+2vtH4 +vGatCgNYfC6m77P/BjIUnxl4t1/x3dNDZ5z5ZbT4h3bxmEslIN4oK0b+avrQM7WP +MEcsn8agtIN3AoIBACGVAAl+2btXm/kMbA8I/xEIkKVNs6dJZFZEnmQHYRbN2iQh +OhSAyCinwgISA2lRhnBvTxXevExH/c4ViOvOTKwDWKCgBEaBNmQkYtEfl0TfKwpf +gbIclJ86LXyTI/R/6kfGKivMn83yVRE0rbIy72spq1NzRPLumpe4La8dzwiem9Dv +KypFnzcKPzR+ZVndAWniTHicp2eXQx/5dmRZL+7irFG9JegMlNcjhzKDysSIZxil +JPFlf01875i+IzrHrPbzdUX4zsSJIlMXTbcgJVqJeynY6qG45bRX4ITsUfhQDVIJ +SAh3ICt+p7w30JTYMLFAwzONT2FaGWKraB4v/jUCggEBANVIXZnvHJA3SsCfMupT +Cg8Rz8gE4TF4rqgtpVjeZ4vcGI72zliIaZSj2x54CI0FlTULegu5f7/oQa/IfyOZ +xCnR1Oah1q9bybjKJVqB8xASfjxa2Gp2HTyskhHMOBIYvqA3mQFb6/0YuBwqfRrr +3TcRR/Bc9w3SFZfB000uW/Lqbio0M7cmWxyV2EUnkKvtSurRllUMuJOhbTypBMSc +DghyyKx09jJi6Qv2XbQj8YKINBi+cxJJo4PAWVCCvhaGktnxw6lyfBOaQeSXGRwE +KDs9sS5gFKYDhs1/43lOgbMC43VhQlVFGtJw7wfuYgUMZFjfi6f/+zW1TCiltBwA +YGUCggEAToVBBOks7RQtSwdl6E4di5la05LFZMJrWxsPSOyVPENRrWeeq6idYSCj +v+fYz2wHHm4nc7FF4XIhXQN/Bv6nBwz60OuxMnsEc/4NFHYJWwKUkHuX7W/qbLsP +LKlvw2ywy/EIYqdcdyVryQ8s+sNZOYq5FDZ/0x5MgteVtIxd+KmMrtZDvqgZhNiq +VpFeIrHuav4cG45o3qY1e1IwFXZAvJzYxaZFpOIubwmSs4oD34nhNyJG5eswJOht +xz1tG8kDJFy3G/XrT9VN3inEaPT4y9qN8K03/cst32TOvD7nmInuBj6HsLnTLfX3 +20VoGTwfsCfcapPA3/SqCZw7QpavIg== +-----END PRIVATE KEY----- diff --git a/config/ssl/servercert.pem b/config/ssl/servercert.pem new file mode 100644 index 0000000..7f5c08a --- /dev/null +++ b/config/ssl/servercert.pem @@ -0,0 +1,130 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 15 (0xf) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=GB, ST=England, L=London, O=Ubuntu Cloud, OU=Cloud + Validity + Not Before: Feb 11 12:49:11 2021 GMT + Not After : Feb 11 12:49:11 2022 GMT + Subject: C=GB, ST=England, L=London, O=Ubuntu Cloud, OU=Cloud, CN=10.0.1.211 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public-Key: (2048 bit) + Modulus: + 00:c4:e4:ec:1a:64:c1:ec:15:05:72:47:82:ee:c6: + b1:12:a2:be:af:f1:52:d9:e5:97:52:35:11:b7:76: + da:53:54:99:60:87:28:d4:c4:55:70:c4:5a:c3:12: + 2c:62:9a:de:c9:92:30:a4:45:56:83:08:62:6e:18: + df:eb:ea:eb:c3:44:17:fe:6a:dc:c4:f6:98:be:10: + 7a:b4:f5:4d:9a:60:2f:6c:d1:40:e9:a6:57:79:f6: + c1:5f:7e:99:c4:86:12:a2:f0:7a:50:08:20:1e:a3: + 42:77:71:fa:2f:47:8b:26:30:91:cb:fa:55:38:fa: + 76:fe:15:4f:ec:d9:db:df:44:3c:e1:22:84:9f:6e: + cb:75:d2:0e:1d:93:c0:5a:83:ba:e8:13:e7:8f:13: + d9:e5:bc:52:04:d9:ea:68:62:e5:9d:00:11:42:ef: + 70:54:f9:69:02:e3:f7:46:6b:35:ce:2c:9a:80:93: + 7a:37:a5:4e:92:f1:b9:4a:47:13:f0:79:21:82:75: + 86:7b:91:ca:63:d7:60:23:66:26:35:c9:81:5b:d1: + 19:74:a0:e9:e9:72:c5:fb:18:55:f5:a1:ff:89:69: + 04:b1:36:81:f5:5b:a1:1d:b7:6c:c2:74:c9:04:50: + a4:c1:dd:be:14:6a:eb:cc:f6:17:5a:fb:95:39:53: + 46:71 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + CB:9B:67:C1:6B:30:93:13:FC:8A:BB:B0:CA:ED:CE:F4:F6:A2:95:65 + X509v3 Authority Key Identifier: + keyid:31:95:72:EB:06:69:0D:21:96:63:DE:35:AE:70:52:EC:83:BC:D7:54 + + X509v3 Basic Constraints: + CA:FALSE + X509v3 Key Usage: + Digital Signature, Key Encipherment + X509v3 Subject Alternative Name: + IP Address:10.0.1.211, IP Address:10.0.1.212, IP Address:10.0.1.213, IP Address:10.0.1.214, IP Address:10.0.1.215, IP Address:10.0.1.216, IP Address:10.0.1.217, IP Address:10.0.1.218, IP Address:10.0.1.219, IP Address:10.0.1.220, IP Address:10.0.1.221, IP Address:10.0.1.222, IP Address:10.0.1.223, IP Address:10.0.1.224, IP Address:10.0.1.225, IP Address:10.0.1.226, IP Address:10.0.1.227, IP Address:10.0.1.228, IP Address:10.0.1.229, IP Address:10.0.1.230, DNS:glance-internal.example.com, DNS:glance-public.example.com, DNS:glance.example.com, DNS:cinder-internal.example.com, DNS:cinder-public.example.com, DNS:cinder.example.com, DNS:nova-internal.example.com, DNS:nova-public.example.com, DNS:nova.example.com, DNS:keystone-internal.example.com, DNS:keystone-public.example.com, DNS:keystone.example.com, DNS:neutron-internal.example.com, DNS:neutron-public.example.com, DNS:neutron.example.com, DNS:horizon-internal.example.com, DNS:horizon-public.example.com, DNS:horizon.example.com, DNS:swift-internal.example.com, DNS:swift-public.example.com, DNS:swift.example.com, DNS:heat-internal.example.com, DNS:heat-public.example.com, DNS:heat.example.com, DNS:aodh-internal.example.com, DNS:aodh-public.example.com, DNS:aodh.example.com, DNS:ceilometer-internal.example.com, DNS:ceilometer-public.example.com, DNS:ceilometer.example.com, DNS:gnocchi-internal.example.com, DNS:gnocchi-public.example.com, DNS:gnocchi.example.com, DNS:nagios-internal.example.com, DNS:nagios-public.example.com, DNS:nagios.example.com, DNS:vault-int.example.com, DNS:vault-public.example.com, DNS:vault.example.com, DNS:landscape-internal.example.com, DNS:landscape-public.example.com, DNS:landscape.example.com + Netscape Comment: + OpenSSL Generated Certificate + Signature Algorithm: sha256WithRSAEncryption + 24:4f:fa:23:38:70:47:ca:67:a7:b3:df:60:d4:d4:e9:f1:2f: + 83:ca:94:41:cd:60:c2:31:ca:da:0c:1b:32:40:8d:ac:bd:05: + f6:29:39:fd:a3:77:12:76:8d:50:8d:bd:e0:f6:83:d4:1f:fa: + 96:f1:75:56:33:56:7b:9f:a6:c1:c5:5a:0e:28:fe:49:b0:ba: + 5a:56:4b:af:be:c1:6a:8d:78:35:90:d3:c5:69:91:19:61:0c: + 0c:5f:dd:cc:77:0b:6f:51:10:fe:06:cc:0e:f4:c2:65:c6:0b: + 61:2d:95:88:df:a7:9a:d2:9b:dd:96:04:f7:77:41:e2:2a:da: + 9b:a1:33:aa:de:ea:56:bb:78:d9:e5:dd:71:88:57:b2:d1:e8: + 8b:75:da:f8:dc:9c:8a:0b:a5:55:28:a3:4a:d1:a3:c4:06:4e: + b4:8e:e1:44:11:4b:04:5a:07:37:26:0c:2b:a5:03:bb:f6:15: + 8f:f3:e4:0f:a0:2a:b8:f7:c4:4d:e7:03:df:7c:58:0e:ca:67: + 2a:34:4b:5b:33:b2:b6:26:88:20:34:87:cd:fb:e5:27:7f:64: + 88:d5:f3:e8:6c:72:20:05:fb:bc:a1:0d:b7:d3:03:20:85:fb: + 88:ef:ce:a0:cd:8e:35:d9:14:3b:48:be:5c:46:8f:13:bd:53: + 04:93:51:d0:a7:a7:44:c2:81:9c:ff:70:ea:9f:07:73:31:e5: + 4a:e1:ad:2c:53:66:44:34:1e:e0:50:72:ad:28:67:00:2f:86: + c8:11:23:a8:a1:20:d0:b0:51:44:2b:eb:46:61:7f:fd:43:29: + da:d0:f1:8c:d1:b2:d8:6c:34:79:f8:b9:77:89:58:30:b6:00: + de:05:5e:94:f0:c4:d8:05:c0:f3:a9:d4:cf:8f:f3:4a:8a:dd: + 8c:bc:11:86:ae:d3:ec:e3:9a:ea:13:6a:db:2f:d2:53:84:3b: + b9:c5:98:23:d9:b6:4d:f5:c3:32:1f:6a:39:80:c4:66:b9:43: + 9e:9a:39:7d:08:12:ec:87:cb:38:d1:4c:93:0c:ce:d0:b2:0e: + db:a6:00:a2:99:c0:11:06:81:a9:1c:bf:d8:8c:7b:c2:71:3d: + 19:1a:61:c9:dd:ec:f9:44:ff:15:3a:1a:1f:d5:95:55:63:ee: + b2:35:01:81:83:89:b2:1a:a7:8d:5b:11:be:01:fc:3b:54:76: + ce:3b:6f:ae:6c:fc:b1:24:77:9e:4f:1a:82:02:20:0e:c5:24: + c4:5c:3f:23:1e:fe:b2:78:9b:0e:b1:91:4e:60:0f:26:ea:90: + 5d:09:bc:b4:ab:a9:e4:fa:2b:c3:d3:6c:d5:30:7a:e4:f2:eb: + c6:e7:fb:f4:dc:5c:e3:38 +-----BEGIN CERTIFICATE----- +MIIJgjCCB2qgAwIBAgIBDzANBgkqhkiG9w0BAQsFADBXMQswCQYDVQQGEwJHQjEQ +MA4GA1UECAwHRW5nbGFuZDEPMA0GA1UEBwwGTG9uZG9uMRUwEwYDVQQKDAxVYnVu +dHUgQ2xvdWQxDjAMBgNVBAsMBUNsb3VkMB4XDTIxMDIxMTEyNDkxMVoXDTIyMDIx +MTEyNDkxMVowbDELMAkGA1UEBhMCR0IxEDAOBgNVBAgMB0VuZ2xhbmQxDzANBgNV +BAcMBkxvbmRvbjEVMBMGA1UECgwMVWJ1bnR1IENsb3VkMQ4wDAYDVQQLDAVDbG91 +ZDETMBEGA1UEAwwKMTAuMC4xLjIxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMTk7BpkwewVBXJHgu7GsRKivq/xUtnll1I1Ebd22lNUmWCHKNTEVXDE +WsMSLGKa3smSMKRFVoMIYm4Y3+vq68NEF/5q3MT2mL4QerT1TZpgL2zRQOmmV3n2 +wV9+mcSGEqLwelAIIB6jQndx+i9HiyYwkcv6VTj6dv4VT+zZ299EPOEihJ9uy3XS +Dh2TwFqDuugT548T2eW8UgTZ6mhi5Z0AEULvcFT5aQLj90ZrNc4smoCTejelTpLx +uUpHE/B5IYJ1hnuRymPXYCNmJjXJgVvRGXSg6elyxfsYVfWh/4lpBLE2gfVboR23 +bMJ0yQRQpMHdvhRq68z2F1r7lTlTRnECAwEAAaOCBUIwggU+MB0GA1UdDgQWBBTL +m2fBazCTE/yKu7DK7c709qKVZTAfBgNVHSMEGDAWgBQxlXLrBmkNIZZj3jWucFLs +g7zXVDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDCCBLQGA1UdEQSCBKswggSnhwQK +AAHThwQKAAHUhwQKAAHVhwQKAAHWhwQKAAHXhwQKAAHYhwQKAAHZhwQKAAHahwQK +AAHbhwQKAAHchwQKAAHdhwQKAAHehwQKAAHfhwQKAAHghwQKAAHhhwQKAAHihwQK +AAHjhwQKAAHkhwQKAAHlhwQKAAHmghtnbGFuY2UtaW50ZXJuYWwuZXhhbXBsZS5j +b22CGWdsYW5jZS1wdWJsaWMuZXhhbXBsZS5jb22CEmdsYW5jZS5leGFtcGxlLmNv +bYIbY2luZGVyLWludGVybmFsLmV4YW1wbGUuY29tghljaW5kZXItcHVibGljLmV4 +YW1wbGUuY29tghJjaW5kZXIuZXhhbXBsZS5jb22CGW5vdmEtaW50ZXJuYWwuZXhh +bXBsZS5jb22CF25vdmEtcHVibGljLmV4YW1wbGUuY29tghBub3ZhLmV4YW1wbGUu +Y29tgh1rZXlzdG9uZS1pbnRlcm5hbC5leGFtcGxlLmNvbYIba2V5c3RvbmUtcHVi +bGljLmV4YW1wbGUuY29tghRrZXlzdG9uZS5leGFtcGxlLmNvbYIcbmV1dHJvbi1p +bnRlcm5hbC5leGFtcGxlLmNvbYIabmV1dHJvbi1wdWJsaWMuZXhhbXBsZS5jb22C +E25ldXRyb24uZXhhbXBsZS5jb22CHGhvcml6b24taW50ZXJuYWwuZXhhbXBsZS5j +b22CGmhvcml6b24tcHVibGljLmV4YW1wbGUuY29tghNob3Jpem9uLmV4YW1wbGUu +Y29tghpzd2lmdC1pbnRlcm5hbC5leGFtcGxlLmNvbYIYc3dpZnQtcHVibGljLmV4 +YW1wbGUuY29tghFzd2lmdC5leGFtcGxlLmNvbYIZaGVhdC1pbnRlcm5hbC5leGFt +cGxlLmNvbYIXaGVhdC1wdWJsaWMuZXhhbXBsZS5jb22CEGhlYXQuZXhhbXBsZS5j +b22CGWFvZGgtaW50ZXJuYWwuZXhhbXBsZS5jb22CF2FvZGgtcHVibGljLmV4YW1w +bGUuY29tghBhb2RoLmV4YW1wbGUuY29tgh9jZWlsb21ldGVyLWludGVybmFsLmV4 +YW1wbGUuY29tgh1jZWlsb21ldGVyLXB1YmxpYy5leGFtcGxlLmNvbYIWY2VpbG9t +ZXRlci5leGFtcGxlLmNvbYIcZ25vY2NoaS1pbnRlcm5hbC5leGFtcGxlLmNvbYIa +Z25vY2NoaS1wdWJsaWMuZXhhbXBsZS5jb22CE2dub2NjaGkuZXhhbXBsZS5jb22C +G25hZ2lvcy1pbnRlcm5hbC5leGFtcGxlLmNvbYIZbmFnaW9zLXB1YmxpYy5leGFt +cGxlLmNvbYISbmFnaW9zLmV4YW1wbGUuY29tghV2YXVsdC1pbnQuZXhhbXBsZS5j +b22CGHZhdWx0LXB1YmxpYy5leGFtcGxlLmNvbYIRdmF1bHQuZXhhbXBsZS5jb22C +HmxhbmRzY2FwZS1pbnRlcm5hbC5leGFtcGxlLmNvbYIcbGFuZHNjYXBlLXB1Ymxp +Yy5leGFtcGxlLmNvbYIVbGFuZHNjYXBlLmV4YW1wbGUuY29tMCwGCWCGSAGG+EIB +DQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsF +AAOCAgEAJE/6IzhwR8pnp7PfYNTU6fEvg8qUQc1gwjHK2gwbMkCNrL0F9ik5/aN3 +EnaNUI294PaD1B/6lvF1VjNWe5+mwcVaDij+SbC6WlZLr77Bao14NZDTxWmRGWEM +DF/dzHcLb1EQ/gbMDvTCZcYLYS2ViN+nmtKb3ZYE93dB4iram6Ezqt7qVrt42eXd +cYhXstHoi3Xa+NycigulVSijStGjxAZOtI7hRBFLBFoHNyYMK6UDu/YVj/PkD6Aq +uPfETecD33xYDspnKjRLWzOytiaIIDSHzfvlJ39kiNXz6GxyIAX7vKENt9MDIIX7 +iO/OoM2ONdkUO0i+XEaPE71TBJNR0KenRMKBnP9w6p8HczHlSuGtLFNmRDQe4FBy +rShnAC+GyBEjqKEg0LBRRCvrRmF//UMp2tDxjNGy2Gw0efi5d4lYMLYA3gVelPDE +2AXA86nUz4/zSordjLwRhq7T7OOa6hNq2y/SU4Q7ucWYI9m2TfXDMh9qOYDEZrlD +npo5fQgS7IfLONFMkwzO0LIO26YAopnAEQaBqRy/2Ix7wnE9GRphyd3s+UT/FToa +H9WVVWPusjUBgYOJshqnjVsRvgH8O1R2zjtvrmz8sSR3nk8aggIgDsUkxFw/Ix7+ +snibDrGRTmAPJuqQXQm8tKup5Porw9Ns1TB65PLrxuf79Nxc4zg= +-----END CERTIFICATE----- diff --git a/config/ssl/serverkey.pem b/config/ssl/serverkey.pem new file mode 100644 index 0000000..ec00053 --- /dev/null +++ b/config/ssl/serverkey.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDE5OwaZMHsFQVy +R4LuxrESor6v8VLZ5ZdSNRG3dtpTVJlghyjUxFVwxFrDEiximt7JkjCkRVaDCGJu +GN/r6uvDRBf+atzE9pi+EHq09U2aYC9s0UDppld59sFffpnEhhKi8HpQCCAeo0J3 +cfovR4smMJHL+lU4+nb+FU/s2dvfRDzhIoSfbst10g4dk8Bag7roE+ePE9nlvFIE +2epoYuWdABFC73BU+WkC4/dGazXOLJqAk3o3pU6S8blKRxPweSGCdYZ7kcpj12Aj +ZiY1yYFb0Rl0oOnpcsX7GFX1of+JaQSxNoH1W6Edt2zCdMkEUKTB3b4UauvM9hda ++5U5U0ZxAgMBAAECggEAZJWuKC2hA8IZbjAqK2FFxIdvcuKNuNMZYt3JlzgYgMmP +MUjbCxbhryIgW9dE5XrIpaERSFpDv9zgq+35NsRkfPSkbUyaS+TOUwqIJTnL0zmg +AK9FwhdASQZN80LxuvvjOWowkgIUppIwumR3sv+RjojxcosITntlnpe0pQMG2B9W +stEcU+N7jnWtZ9srY5y8ofxGNZ/ro25upFLHI/eE8tlTvWpSOdXS7vFvpa9VhVzo +ZwjcqpIAuW+JwRqeqshDGlgECsaPaAtX/+txIrHDFZCRXAB/GT2y9rpPEH/zjVT8 +TMmaxIanJcxWegdrvq1ZX+eS7luR9FwQfib08VkaIQKBgQDjOKeW6ewUu8CdKIxb +ZC4VYx9sfQsGDsoBDFMU/nGQSYnZT5kQoAL/98ixXiCunmuAkNfl1Uhk1i8zVsvL +lrA57D2KHnrbvCyY97VFFKGwZG6Z5XYRPkgzluR4+eycTkgCXNIgSeYOsGiEvHdl +MZASN7A+coVXO/bt7fUiiJvWDwKBgQDd1PPuV/tY0JHPLdoqEGRysjbpKdggN2Wr +LT6/do3czSTPmkmXhiqdH98H02DkX//hM3Bu+4uAAgDU+pro4gU8OjGi6Rb6Cxwg +k9asyBGRK5jD2iYNI9HXirMtN8ktDetpNJlriFYaIY1UP0ME9jkUHuB2ePwmZfKV +rmvI0QebfwKBgE0Tj83iheG2mqz07z+lKPi6ShOMCyw/4gge/SPW+ADg4TDlDmAU +V1Aq5lo8OsvhE0hmWcYt5kPGX/aDT9g8woSzfWCX3EcjeuFczZGkYQCr4NS1gKpy +vR065z+eT8PpzgV3JRQs3SxIbPvxznJ2MI/tcgyM3mxr++RT3t6bBnnDAoGAbwFw +a3W1fjUosc8VG/WF/ms11SmuUDjIdIc4niDaToKNiCGB8AQgcO4Q8l6RXta3Od8+ +xRq1LQJTnkAloqHv0rqgOhCAAfHrSlYQl/ep6sYxNNiGMA8bo8txbBA9aIFNqyC6 +IThhNcRSb7UUeXgpCeuFkWNrcl+Tq9lTcHNBZksCgYByy+cEi3OBrmPT0K24f7gv +0w249LbB0p4syQ2sC8uDhiz6NdFcB+7YvlZdCVAiSaO4X8AvnL6d1rGcAByS/wOF +Q79YMHq82iYtJnvGDrlgFoKHSYhUcmdN5idBLVWKm281NVIQKJMTlCMjvDdTOCKh +wnpnmlCJWeyRkanwX11K2A== +-----END PRIVATE KEY----- diff --git a/generated/.gitkeep b/generated/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/generated/maas/maas-api b/generated/maas/maas-api new file mode 100644 index 0000000..47fdb30 --- /dev/null +++ b/generated/maas/maas-api @@ -0,0 +1 @@ +QjzaY5ucvSSrydMKnz:aChrPQrVG4tfeAabrm:5ySUKY7c7EMhKhuKhpsXGzeaQLYpMAXx diff --git a/generated/maas/maas-pass b/generated/maas/maas-pass new file mode 100644 index 0000000..91705c0 --- /dev/null +++ b/generated/maas/maas-pass @@ -0,0 +1 @@ +openstack diff --git a/resources/keystone.yaml b/resources/keystone.yaml new file mode 100644 index 0000000..a30c646 --- /dev/null +++ b/resources/keystone.yaml @@ -0,0 +1,1070 @@ +# +#"admin_required": "role:admin or is_admin:1" + +# +#"service_role": "role:service" + +# +#"service_or_admin": "rule:admin_required or rule:service_role" + +# +#"owner": "user_id:%(user_id)s" + +# +#"admin_or_owner": "rule:admin_required or rule:owner" + +# +#"token_subject": "user_id:%(target.token.user_id)s" + +# +#"admin_or_token_subject": "rule:admin_required or rule:token_subject" + +# +#"service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject" + +# Show application credential details. +# GET /v3/users/{user_id}/application_credentials/{application_credential_id} +# HEAD /v3/users/{user_id}/application_credentials/{application_credential_id} +#"identity:get_application_credential": "rule:admin_or_owner" + +# List application credentials for a user. +# GET /v3/users/{user_id}/application_credentials +# HEAD /v3/users/{user_id}/application_credentials +#"identity:list_application_credentials": "rule:admin_or_owner" + +# Create an application credential. +# POST /v3/users/{user_id}/application_credentials +#"identity:create_application_credential": "rule:admin_or_owner" + +# Delete an application credential. +# DELETE /v3/users/{user_id}/application_credentials/{application_credential_id} +#"identity:delete_application_credential": "rule:admin_or_owner" + +# Authorize OAUTH1 request token. +# PUT /v3/OS-OAUTH1/authorize/{request_token_id} +# Intended scope(s): project +#"identity:authorize_request_token": "rule:admin_required" + +# Get OAUTH1 access token for user by access token ID. +# GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} +# Intended scope(s): project +#"identity:get_access_token": "rule:admin_required" + +# Get role for user OAUTH1 access token. +# GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles/{role_id} +# Intended scope(s): project +#"identity:get_access_token_role": "rule:admin_required" + +# List OAUTH1 access tokens for user. +# GET /v3/users/{user_id}/OS-OAUTH1/access_tokens +# Intended scope(s): project +#"identity:list_access_tokens": "rule:admin_required" + +# List OAUTH1 access token roles. +# GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles +# Intended scope(s): project +#"identity:list_access_token_roles": "rule:admin_required" + +# Delete OAUTH1 access token. +# DELETE /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} +# Intended scope(s): project +#"identity:delete_access_token": "rule:admin_required" + +# Get service catalog. +# GET /v3/auth/catalog +# HEAD /v3/auth/catalog +#"identity:get_auth_catalog": "" + +# List all projects a user has access to via role assignments. +# GET /v3/auth/projects +# HEAD /v3/auth/projects +#"identity:get_auth_projects": "" + +# List all domains a user has access to via role assignments. +# GET /v3/auth/domains +# HEAD /v3/auth/domains +#"identity:get_auth_domains": "" + +# List systems a user has access to via role assignments. +# GET /v3/auth/system +# HEAD /v3/auth/system +#"identity:get_auth_system": "" + +# Show OAUTH1 consumer details. +# GET /v3/OS-OAUTH1/consumers/{consumer_id} +# Intended scope(s): system +#"identity:get_consumer": "rule:admin_required" + +# List OAUTH1 consumers. +# GET /v3/OS-OAUTH1/consumers +# Intended scope(s): system +#"identity:list_consumers": "rule:admin_required" + +# Create OAUTH1 consumer. +# POST /v3/OS-OAUTH1/consumers +# Intended scope(s): system +#"identity:create_consumer": "rule:admin_required" + +# Update OAUTH1 consumer. +# PATCH /v3/OS-OAUTH1/consumers/{consumer_id} +# Intended scope(s): system +#"identity:update_consumer": "rule:admin_required" + +# Delete OAUTH1 consumer. +# DELETE /v3/OS-OAUTH1/consumers/{consumer_id} +# Intended scope(s): system +#"identity:delete_consumer": "rule:admin_required" + +# Show credentials details. +# GET /v3/credentials/{credential_id} +#"identity:get_credential": "rule:admin_required" + +# List credentials. +# GET /v3/credentials +#"identity:list_credentials": "rule:admin_required" + +# Create credential. +# POST /v3/credentials +#"identity:create_credential": "rule:admin_required" + +# Update credential. +# PATCH /v3/credentials/{credential_id} +#"identity:update_credential": "rule:admin_required" + +# Delete credential. +# DELETE /v3/credentials/{credential_id} +#"identity:delete_credential": "rule:admin_required" + +# Show domain details. +# GET /v3/domains/{domain_id} +# Intended scope(s): system +#"identity:get_domain": "rule:admin_required or token.project.domain.id:%(target.domain.id)s" + +# List domains. +# GET /v3/domains +# Intended scope(s): system +#"identity:list_domains": "rule:admin_required" + +# Create domain. +# POST /v3/domains +# Intended scope(s): system +#"identity:create_domain": "rule:admin_required" + +# Update domain. +# PATCH /v3/domains/{domain_id} +# Intended scope(s): system +#"identity:update_domain": "rule:admin_required" + +# Delete domain. +# DELETE /v3/domains/{domain_id} +# Intended scope(s): system +#"identity:delete_domain": "rule:admin_required" + +# Create domain configuration. +# PUT /v3/domains/{domain_id}/config +# Intended scope(s): system +#"identity:create_domain_config": "rule:admin_required" + +# Get the entire domain configuration for a domain, an option group +# within a domain, or a specific configuration option within a group +# for a domain. +# GET /v3/domains/{domain_id}/config +# HEAD /v3/domains/{domain_id}/config +# GET /v3/domains/{domain_id}/config/{group} +# HEAD /v3/domains/{domain_id}/config/{group} +# GET /v3/domains/{domain_id}/config/{group}/{option} +# HEAD /v3/domains/{domain_id}/config/{group}/{option} +# Intended scope(s): system +#"identity:get_domain_config": "rule:admin_required" + +# Get security compliance domain configuration for either a domain or +# a specific option in a domain. +# GET /v3/domains/{domain_id}/config/security_compliance +# HEAD /v3/domains/{domain_id}/config/security_compliance +# GET v3/domains/{domain_id}/config/security_compliance/{option} +# HEAD v3/domains/{domain_id}/config/security_compliance/{option} +# Intended scope(s): system, project +#"identity:get_security_compliance_domain_config": "" + +# Update domain configuration for either a domain, specific group or a +# specific option in a group. +# PATCH /v3/domains/{domain_id}/config +# PATCH /v3/domains/{domain_id}/config/{group} +# PATCH /v3/domains/{domain_id}/config/{group}/{option} +# Intended scope(s): system +#"identity:update_domain_config": "rule:admin_required" + +# Delete domain configuration for either a domain, specific group or a +# specific option in a group. +# DELETE /v3/domains/{domain_id}/config +# DELETE /v3/domains/{domain_id}/config/{group} +# DELETE /v3/domains/{domain_id}/config/{group}/{option} +# Intended scope(s): system +#"identity:delete_domain_config": "rule:admin_required" + +# Get domain configuration default for either a domain, specific group +# or a specific option in a group. +# GET /v3/domains/config/default +# HEAD /v3/domains/config/default +# GET /v3/domains/config/{group}/default +# HEAD /v3/domains/config/{group}/default +# GET /v3/domains/config/{group}/{option}/default +# HEAD /v3/domains/config/{group}/{option}/default +# Intended scope(s): system +#"identity:get_domain_config_default": "rule:admin_required" + +# Show ec2 credential details. +# GET /v3/users/{user_id}/credentials/OS-EC2/{credential_id} +#"identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)" + +# List ec2 credentials. +# GET /v3/users/{user_id}/credentials/OS-EC2 +#"identity:ec2_list_credentials": "rule:admin_or_owner" + +# Create ec2 credential. +# POST /v3/users/{user_id}/credentials/OS-EC2 +#"identity:ec2_create_credential": "rule:admin_or_owner" + +# Delete ec2 credential. +# DELETE /v3/users/{user_id}/credentials/OS-EC2/{credential_id} +#"identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)" + +# Show endpoint details. +# GET /v3/endpoints/{endpoint_id} +# Intended scope(s): system +#"identity:get_endpoint": "rule:admin_required" + +# List endpoints. +# GET /v3/endpoints +# Intended scope(s): system +#"identity:list_endpoints": "rule:admin_required" + +# Create endpoint. +# POST /v3/endpoints +# Intended scope(s): system +#"identity:create_endpoint": "rule:admin_required" + +# Update endpoint. +# PATCH /v3/endpoints/{endpoint_id} +# Intended scope(s): system +#"identity:update_endpoint": "rule:admin_required" + +# Delete endpoint. +# DELETE /v3/endpoints/{endpoint_id} +# Intended scope(s): system +#"identity:delete_endpoint": "rule:admin_required" + +# Create endpoint group. +# POST /v3/OS-EP-FILTER/endpoint_groups +# Intended scope(s): system +#"identity:create_endpoint_group": "rule:admin_required" + +# List endpoint groups. +# GET /v3/OS-EP-FILTER/endpoint_groups +# Intended scope(s): system +#"identity:list_endpoint_groups": "rule:admin_required" + +# Get endpoint group. +# GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} +# HEAD /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} +# Intended scope(s): system +#"identity:get_endpoint_group": "rule:admin_required" + +# Update endpoint group. +# PATCH /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} +# Intended scope(s): system +#"identity:update_endpoint_group": "rule:admin_required" + +# Delete endpoint group. +# DELETE /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} +# Intended scope(s): system +#"identity:delete_endpoint_group": "rule:admin_required" + +# List all projects associated with a specific endpoint group. +# GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects +# Intended scope(s): system +#"identity:list_projects_associated_with_endpoint_group": "rule:admin_required" + +# List all endpoints associated with an endpoint group. +# GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints +# Intended scope(s): system +#"identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required" + +# Check if an endpoint group is associated with a project. +# GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} +# HEAD /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} +# Intended scope(s): system +#"identity:get_endpoint_group_in_project": "rule:admin_required" + +# List endpoint groups associated with a specific project. +# GET /v3/OS-EP-FILTER/projects/{project_id}/endpoint_groups +# Intended scope(s): system +#"identity:list_endpoint_groups_for_project": "rule:admin_required" + +# Allow a project to access an endpoint group. +# PUT /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} +# Intended scope(s): system +#"identity:add_endpoint_group_to_project": "rule:admin_required" + +# Remove endpoint group from project. +# DELETE /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} +# Intended scope(s): system +#"identity:remove_endpoint_group_from_project": "rule:admin_required" + +# Check a role grant between a target and an actor. A target can be +# either a domain or a project. An actor can be either a user or a +# group. These terms also apply to the OS-INHERIT APIs, where grants +# on the target are inherited to all projects in the subtree, if +# applicable. +# HEAD /v3/projects/{project_id}/users/{user_id}/roles/{role_id} +# GET /v3/projects/{project_id}/users/{user_id}/roles/{role_id} +# HEAD /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} +# GET /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} +# HEAD /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} +# GET /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} +# HEAD /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} +# GET /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} +# HEAD /v3/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/{role_id}/inherited_to_projects +# GET /v3/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/{role_id}/inherited_to_projects +# HEAD /v3/OS-INHERIT/projects/{project_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects +# GET /v3/OS-INHERIT/projects/{project_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects +# HEAD /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id}/inherited_to_projects +# GET /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id}/inherited_to_projects +# HEAD /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects +# GET /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects +# Intended scope(s): system +#"identity:check_grant": "rule:admin_required" + +# List roles granted to an actor on a target. A target can be either a +# domain or a project. An actor can be either a user or a group. For +# the OS-INHERIT APIs, it is possible to list inherited role grants +# for actors on domains, where grants are inherited to all projects in +# the specified domain. +# GET /v3/projects/{project_id}/users/{user_id}/roles +# HEAD /v3/projects/{project_id}/users/{user_id}/roles +# GET /v3/projects/{project_id}/groups/{group_id}/roles +# HEAD /v3/projects/{project_id}/groups/{group_id}/roles +# GET /v3/domains/{domain_id}/users/{user_id}/roles +# HEAD /v3/domains/{domain_id}/users/{user_id}/roles +# GET /v3/domains/{domain_id}/groups/{group_id}/roles +# HEAD /v3/domains/{domain_id}/groups/{group_id}/roles +# GET /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/inherited_to_projects +# GET /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/inherited_to_projects +# Intended scope(s): system +#"identity:list_grants": "rule:admin_required" + +# Create a role grant between a target and an actor. A target can be +# either a domain or a project. An actor can be either a user or a +# group. These terms also apply to the OS-INHERIT APIs, where grants +# on the target are inherited to all projects in the subtree, if +# applicable. +# PUT /v3/projects/{project_id}/users/{user_id}/roles/{role_id} +# PUT /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} +# PUT /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} +# PUT /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} +# PUT /v3/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/{role_id}/inherited_to_projects +# PUT /v3/OS-INHERIT/projects/{project_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects +# PUT /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id}/inherited_to_projects +# PUT /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects +# Intended scope(s): system +#"identity:create_grant": "rule:admin_required" + +# Revoke a role grant between a target and an actor. A target can be +# either a domain or a project. An actor can be either a user or a +# group. These terms also apply to the OS-INHERIT APIs, where grants +# on the target are inherited to all projects in the subtree, if +# applicable. In that case, revoking the role grant in the target +# would remove the logical effect of inheriting it to the target's +# projects subtree. +# DELETE /v3/projects/{project_id}/users/{user_id}/roles/{role_id} +# DELETE /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} +# DELETE /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} +# DELETE /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} +# DELETE /v3/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/{role_id}/inherited_to_projects +# DELETE /v3/OS-INHERIT/projects/{project_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects +# DELETE /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id}/inherited_to_projects +# DELETE /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects +# Intended scope(s): system +#"identity:revoke_grant": "rule:admin_required" + +# List all grants a specific user has on the system. +# ['HEAD', 'GET'] /v3/system/users/{user_id}/roles +# Intended scope(s): system +#"identity:list_system_grants_for_user": "rule:admin_required" + +# Check if a user has a role on the system. +# ['HEAD', 'GET'] /v3/system/users/{user_id}/roles/{role_id} +# Intended scope(s): system +#"identity:check_system_grant_for_user": "rule:admin_required" + +# Grant a user a role on the system. +# ['PUT'] /v3/system/users/{user_id}/roles/{role_id} +# Intended scope(s): system +#"identity:create_system_grant_for_user": "rule:admin_required" + +# Remove a role from a user on the system. +# ['DELETE'] /v3/system/users/{user_id}/roles/{role_id} +# Intended scope(s): system +#"identity:revoke_system_grant_for_user": "rule:admin_required" + +# List all grants a specific group has on the system. +# ['HEAD', 'GET'] /v3/system/groups/{group_id}/roles +# Intended scope(s): system +#"identity:list_system_grants_for_group": "rule:admin_required" + +# Check if a group has a role on the system. +# ['HEAD', 'GET'] /v3/system/groups/{group_id}/roles/{role_id} +# Intended scope(s): system +#"identity:check_system_grant_for_group": "rule:admin_required" + +# Grant a group a role on the system. +# ['PUT'] /v3/system/groups/{group_id}/roles/{role_id} +# Intended scope(s): system +#"identity:create_system_grant_for_group": "rule:admin_required" + +# Remove a role from a group on the system. +# ['DELETE'] /v3/system/groups/{group_id}/roles/{role_id} +# Intended scope(s): system +#"identity:revoke_system_grant_for_group": "rule:admin_required" + +# Show group details. +# GET /v3/groups/{group_id} +# HEAD /v3/groups/{group_id} +# Intended scope(s): system +#"identity:get_group": "rule:admin_required" + +# List groups. +# GET /v3/groups +# HEAD /v3/groups +# Intended scope(s): system +#"identity:list_groups": "rule:admin_required" + +# List groups to which a user belongs. +# GET /v3/users/{user_id}/groups +# HEAD /v3/users/{user_id}/groups +# Intended scope(s): system +#"identity:list_groups_for_user": "rule:admin_or_owner" + +# Create group. +# POST /v3/groups +# Intended scope(s): system +#"identity:create_group": "rule:admin_required" + +# Update group. +# PATCH /v3/groups/{group_id} +# Intended scope(s): system +#"identity:update_group": "rule:admin_required" + +# Delete group. +# DELETE /v3/groups/{group_id} +# Intended scope(s): system +#"identity:delete_group": "rule:admin_required" + +# List members of a specific group. +# GET /v3/groups/{group_id}/users +# HEAD /v3/groups/{group_id}/users +# Intended scope(s): system +#"identity:list_users_in_group": "rule:admin_required" + +# Remove user from group. +# DELETE /v3/groups/{group_id}/users/{user_id} +# Intended scope(s): system +#"identity:remove_user_from_group": "rule:admin_required" + +# Check whether a user is a member of a group. +# HEAD /v3/groups/{group_id}/users/{user_id} +# GET /v3/groups/{group_id}/users/{user_id} +# Intended scope(s): system +#"identity:check_user_in_group": "rule:admin_required" + +# Add user to group. +# PUT /v3/groups/{group_id}/users/{user_id} +# Intended scope(s): system +#"identity:add_user_to_group": "rule:admin_required" + +# Create identity provider. +# PUT /v3/OS-FEDERATION/identity_providers/{idp_id} +# Intended scope(s): system +#"identity:create_identity_provider": "rule:admin_required" + +# List identity providers. +# GET /v3/OS-FEDERATION/identity_providers +# HEAD /v3/OS-FEDERATION/identity_providers +# Intended scope(s): system +#"identity:list_identity_providers": "rule:admin_required" + +# Get identity provider. +# GET /v3/OS-FEDERATION/identity_providers/{idp_id} +# HEAD /v3/OS-FEDERATION/identity_providers/{idp_id} +# Intended scope(s): system +#"identity:get_identity_provider": "rule:admin_required" + +# Update identity provider. +# PATCH /v3/OS-FEDERATION/identity_providers/{idp_id} +# Intended scope(s): system +#"identity:update_identity_provider": "rule:admin_required" + +# Delete identity provider. +# DELETE /v3/OS-FEDERATION/identity_providers/{idp_id} +# Intended scope(s): system +#"identity:delete_identity_provider": "rule:admin_required" + +# Get information about an association between two roles. When a +# relationship exists between a prior role and an implied role and the +# prior role is assigned to a user, the user also assumes the implied +# role. +# GET /v3/roles/{prior_role_id}/implies/{implied_role_id} +# Intended scope(s): system +#"identity:get_implied_role": "rule:admin_required" + +# List associations between two roles. When a relationship exists +# between a prior role and an implied role and the prior role is +# assigned to a user, the user also assumes the implied role. This +# will return all the implied roles that would be assumed by the user +# who gets the specified prior role. +# GET /v3/roles/{prior_role_id}/implies +# HEAD /v3/roles/{prior_role_id}/implies +# Intended scope(s): system +#"identity:list_implied_roles": "rule:admin_required" + +# Create an association between two roles. When a relationship exists +# between a prior role and an implied role and the prior role is +# assigned to a user, the user also assumes the implied role. +# PUT /v3/roles/{prior_role_id}/implies/{implied_role_id} +# Intended scope(s): system +#"identity:create_implied_role": "rule:admin_required" + +# Delete the association between two roles. When a relationship exists +# between a prior role and an implied role and the prior role is +# assigned to a user, the user also assumes the implied role. Removing +# the association will cause that effect to be eliminated. +# DELETE /v3/roles/{prior_role_id}/implies/{implied_role_id} +# Intended scope(s): system +#"identity:delete_implied_role": "rule:admin_required" + +# List all associations between two roles in the system. When a +# relationship exists between a prior role and an implied role and the +# prior role is assigned to a user, the user also assumes the implied +# role. +# GET /v3/role_inferences +# HEAD /v3/role_inferences +# Intended scope(s): system +#"identity:list_role_inference_rules": "rule:admin_required" + +# Check an association between two roles. When a relationship exists +# between a prior role and an implied role and the prior role is +# assigned to a user, the user also assumes the implied role. +# HEAD /v3/roles/{prior_role_id}/implies/{implied_role_id} +# Intended scope(s): system +#"identity:check_implied_role": "rule:admin_required" + +# Show limit details. +# GET /v3/limits/{limit_id} +# HEAD /v3/limits/{limit_id} +# Intended scope(s): system, project +#"identity:get_limit": "" + +# List limits. +# GET /v3/limits +# HEAD /v3/limits +# Intended scope(s): system, project +#"identity:list_limits": "" + +# Create limits. +# POST /v3/limits +# Intended scope(s): system +#"identity:create_limits": "rule:admin_required" + +# Update limits. +# PUT /v3/limits/{limit_id} +# Intended scope(s): system +#"identity:update_limits": "rule:admin_required" + +# Delete limit. +# DELETE /v3/limits/{limit_id} +# Intended scope(s): system +#"identity:delete_limit": "rule:admin_required" + +# Create a new federated mapping containing one or more sets of rules. +# PUT /v3/OS-FEDERATION/mappings/{mapping_id} +# Intended scope(s): system +#"identity:create_mapping": "rule:admin_required" + +# Get a federated mapping. +# GET /v3/OS-FEDERATION/mappings/{mapping_id} +# HEAD /v3/OS-FEDERATION/mappings/{mapping_id} +# Intended scope(s): system +#"identity:get_mapping": "rule:admin_required" + +# List federated mappings. +# GET /v3/OS-FEDERATION/mappings +# HEAD /v3/OS-FEDERATION/mappings +# Intended scope(s): system +#"identity:list_mappings": "rule:admin_required" + +# Delete a federated mapping. +# DELETE /v3/OS-FEDERATION/mappings/{mapping_id} +# Intended scope(s): system +#"identity:delete_mapping": "rule:admin_required" + +# Update a federated mapping. +# PATCH /v3/OS-FEDERATION/mappings/{mapping_id} +# Intended scope(s): system +#"identity:update_mapping": "rule:admin_required" + +# Show policy details. +# GET /v3/policy/{policy_id} +# Intended scope(s): system +#"identity:get_policy": "rule:admin_required" + +# List policies. +# GET /v3/policies +# Intended scope(s): system +#"identity:list_policies": "rule:admin_required" + +# Create policy. +# POST /v3/policies +# Intended scope(s): system +#"identity:create_policy": "rule:admin_required" + +# Update policy. +# PATCH /v3/policies/{policy_id} +# Intended scope(s): system +#"identity:update_policy": "rule:admin_required" + +# Delete policy. +# DELETE /v3/policies/{policy_id} +# Intended scope(s): system +#"identity:delete_policy": "rule:admin_required" + +# Associate a policy to a specific endpoint. +# PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} +# Intended scope(s): system +#"identity:create_policy_association_for_endpoint": "rule:admin_required" + +# Check policy association for endpoint. +# GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} +# HEAD /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} +# Intended scope(s): system +#"identity:check_policy_association_for_endpoint": "rule:admin_required" + +# Delete policy association for endpoint. +# DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} +# Intended scope(s): system +#"identity:delete_policy_association_for_endpoint": "rule:admin_required" + +# Associate a policy to a specific service. +# PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} +# Intended scope(s): system +#"identity:create_policy_association_for_service": "rule:admin_required" + +# Check policy association for service. +# GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} +# HEAD /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} +# Intended scope(s): system +#"identity:check_policy_association_for_service": "rule:admin_required" + +# Delete policy association for service. +# DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} +# Intended scope(s): system +#"identity:delete_policy_association_for_service": "rule:admin_required" + +# Associate a policy to a specific region and service combination. +# PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} +# Intended scope(s): system +#"identity:create_policy_association_for_region_and_service": "rule:admin_required" + +# Check policy association for region and service. +# GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} +# HEAD /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} +# Intended scope(s): system +#"identity:check_policy_association_for_region_and_service": "rule:admin_required" + +# Delete policy association for region and service. +# DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} +# Intended scope(s): system +#"identity:delete_policy_association_for_region_and_service": "rule:admin_required" + +# Get policy for endpoint. +# GET /v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy +# HEAD /v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy +# Intended scope(s): system +#"identity:get_policy_for_endpoint": "rule:admin_required" + +# List endpoints for policy. +# GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints +# Intended scope(s): system +#"identity:list_endpoints_for_policy": "rule:admin_required" + +# Show project details. +# GET /v3/projects/{project_id} +#"identity:get_project": "rule:admin_required or project_id:%(target.project.id)s" + +# List projects. +# GET /v3/projects +# Intended scope(s): system +#"identity:list_projects": "rule:admin_required" + +# List projects for user. +# GET /v3/users/{user_id}/projects +#"identity:list_user_projects": "rule:admin_or_owner" + +# Create project. +# POST /v3/projects +# Intended scope(s): system +#"identity:create_project": "rule:admin_required" + +# Update project. +# PATCH /v3/projects/{project_id} +# Intended scope(s): system +#"identity:update_project": "rule:admin_required" + +# Delete project. +# DELETE /v3/projects/{project_id} +# Intended scope(s): system +#"identity:delete_project": "rule:admin_required" + +# List tags for a project. +# GET /v3/projects/{project_id}/tags +# HEAD /v3/projects/{project_id}/tags +#"identity:list_project_tags": "rule:admin_required or project_id:%(target.project.id)s" + +# Check if project contains a tag. +# GET /v3/projects/{project_id}/tags/{value} +# HEAD /v3/projects/{project_id}/tags/{value} +#"identity:get_project_tag": "rule:admin_required or project_id:%(target.project.id)s" + +# Replace all tags on a project with the new set of tags. +# PUT /v3/projects/{project_id}/tags +# Intended scope(s): system +#"identity:update_project_tags": "rule:admin_required" + +# Add a single tag to a project. +# PUT /v3/projects/{project_id}/tags/{value} +# Intended scope(s): system +#"identity:create_project_tag": "rule:admin_required" + +# Remove all tags from a project. +# DELETE /v3/projects/{project_id}/tags +# Intended scope(s): system +#"identity:delete_project_tags": "rule:admin_required" + +# Delete a specified tag from project. +# DELETE /v3/projects/{project_id}/tags/{value} +# Intended scope(s): system +#"identity:delete_project_tag": "rule:admin_required" + +# List projects allowed to access an endpoint. +# GET /v3/OS-EP-FILTER/endpoints/{endpoint_id}/projects +# Intended scope(s): system +#"identity:list_projects_for_endpoint": "rule:admin_required" + +# Allow project to access an endpoint. +# PUT /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} +# Intended scope(s): system +#"identity:add_endpoint_to_project": "rule:admin_required" + +# Check if a project is allowed to access an endpoint. +# GET /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} +# HEAD /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} +# Intended scope(s): system +#"identity:check_endpoint_in_project": "rule:admin_required" + +# List the endpoints a project is allowed to access. +# GET /v3/OS-EP-FILTER/projects/{project_id}/endpoints +# Intended scope(s): system +#"identity:list_endpoints_for_project": "rule:admin_required" + +# Remove access to an endpoint from a project that has previously been +# given explicit access. +# DELETE /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} +# Intended scope(s): system +#"identity:remove_endpoint_from_project": "rule:admin_required" + +# Create federated protocol. +# PUT /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} +# Intended scope(s): system +#"identity:create_protocol": "rule:admin_required" + +# Update federated protocol. +# PATCH /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} +# Intended scope(s): system +#"identity:update_protocol": "rule:admin_required" + +# Get federated protocol. +# GET /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} +# Intended scope(s): system +#"identity:get_protocol": "rule:admin_required" + +# List federated protocols. +# GET /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols +# Intended scope(s): system +#"identity:list_protocols": "rule:admin_required" + +# Delete federated protocol. +# DELETE /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} +# Intended scope(s): system +#"identity:delete_protocol": "rule:admin_required" + +# Show region details. +# GET /v3/regions/{region_id} +# HEAD /v3/regions/{region_id} +# Intended scope(s): system, project +#"identity:get_region": "" + +# List regions. +# GET /v3/regions +# HEAD /v3/regions +# Intended scope(s): system, project +#"identity:list_regions": "" + +# Create region. +# POST /v3/regions +# PUT /v3/regions/{region_id} +# Intended scope(s): system +#"identity:create_region": "rule:admin_required" + +# Update region. +# PATCH /v3/regions/{region_id} +# Intended scope(s): system +#"identity:update_region": "rule:admin_required" + +# Delete region. +# DELETE /v3/regions/{region_id} +# Intended scope(s): system +#"identity:delete_region": "rule:admin_required" + +# Show registered limit details. +# GET /v3/registered_limits/{registered_limit_id} +# HEAD /v3/registered_limits/{registered_limit_id} +# Intended scope(s): system, project +#"identity:get_registered_limit": "" + +# List registered limits. +# GET /v3/registered_limits +# HEAD /v3/registered_limits +# Intended scope(s): system, project +#"identity:list_registered_limits": "" + +# Create registered limits. +# POST /v3/registered_limits +# Intended scope(s): system +#"identity:create_registered_limits": "rule:admin_required" + +# Update registered limits. +# PUT /v3/registered_limits/{registered_limit_id} +# Intended scope(s): system +#"identity:update_registered_limits": "rule:admin_required" + +# Delete registered limit. +# DELETE /v3/registered_limits/{registered_limit_id} +# Intended scope(s): system +#"identity:delete_registered_limit": "rule:admin_required" + +# List revocation events. +# GET /v3/OS-REVOKE/events +# Intended scope(s): system +#"identity:list_revoke_events": "rule:service_or_admin" + +# Show role details. +# GET /v3/roles/{role_id} +# HEAD /v3/roles/{role_id} +# Intended scope(s): system +#"identity:get_role": "rule:admin_required" + +# List roles. +# GET /v3/roles +# HEAD /v3/roles +# Intended scope(s): system +#"identity:list_roles": "rule:admin_required" + +# Create role. +# POST /v3/roles +# Intended scope(s): system +#"identity:create_role": "rule:admin_required" + +# Update role. +# PATCH /v3/roles/{role_id} +# Intended scope(s): system +#"identity:update_role": "rule:admin_required" + +# Delete role. +# DELETE /v3/roles/{role_id} +# Intended scope(s): system +#"identity:delete_role": "rule:admin_required" + +# Show domain role. +# GET /v3/roles/{role_id} +# HEAD /v3/roles/{role_id} +# Intended scope(s): system +#"identity:get_domain_role": "rule:admin_required" + +# List domain roles. +# GET /v3/roles?domain_id={domain_id} +# HEAD /v3/roles?domain_id={domain_id} +# Intended scope(s): system +#"identity:list_domain_roles": "rule:admin_required" + +# Create domain role. +# POST /v3/roles +# Intended scope(s): system +#"identity:create_domain_role": "rule:admin_required" + +# Update domain role. +# PATCH /v3/roles/{role_id} +# Intended scope(s): system +#"identity:update_domain_role": "rule:admin_required" + +# Delete domain role. +# DELETE /v3/roles/{role_id} +# Intended scope(s): system +#"identity:delete_domain_role": "rule:admin_required" + +# List role assignments. +# GET /v3/role_assignments +# HEAD /v3/role_assignments +# Intended scope(s): system +#"identity:list_role_assignments": "rule:admin_required" + +# List all role assignments for a given tree of hierarchical projects. +# GET /v3/role_assignments?include_subtree +# HEAD /v3/role_assignments?include_subtree +# Intended scope(s): project +#"identity:list_role_assignments_for_tree": "rule:admin_required" + +# Show service details. +# GET /v3/services/{service_id} +# Intended scope(s): system +#"identity:get_service": "rule:admin_required" + +# List services. +# GET /v3/services +# Intended scope(s): system +#"identity:list_services": "rule:admin_required" + +# Create service. +# POST /v3/services +# Intended scope(s): system +#"identity:create_service": "rule:admin_required" + +# Update service. +# PATCH /v3/services/{service_id} +# Intended scope(s): system +#"identity:update_service": "rule:admin_required" + +# Delete service. +# DELETE /v3/services/{service_id} +# Intended scope(s): system +#"identity:delete_service": "rule:admin_required" + +# Create federated service provider. +# PUT /v3/OS-FEDERATION/service_providers/{service_provider_id} +# Intended scope(s): system +#"identity:create_service_provider": "rule:admin_required" + +# List federated service providers. +# GET /v3/OS-FEDERATION/service_providers +# HEAD /v3/OS-FEDERATION/service_providers +# Intended scope(s): system +#"identity:list_service_providers": "rule:admin_required" + +# Get federated service provider. +# GET /v3/OS-FEDERATION/service_providers/{service_provider_id} +# HEAD /v3/OS-FEDERATION/service_providers/{service_provider_id} +# Intended scope(s): system +#"identity:get_service_provider": "rule:admin_required" + +# Update federated service provider. +# PATCH /v3/OS-FEDERATION/service_providers/{service_provider_id} +# Intended scope(s): system +#"identity:update_service_provider": "rule:admin_required" + +# Delete federated service provider. +# DELETE /v3/OS-FEDERATION/service_providers/{service_provider_id} +# Intended scope(s): system +#"identity:delete_service_provider": "rule:admin_required" + +# List revoked PKI tokens. +# GET /v3/auth/tokens/OS-PKI/revoked +# Intended scope(s): system, project +#"identity:revocation_list": "rule:service_or_admin" + +# Check a token. +# HEAD /v3/auth/tokens +#"identity:check_token": "rule:admin_or_token_subject" + +# Validate a token. +# GET /v3/auth/tokens +#"identity:validate_token": "rule:service_admin_or_token_subject" + +# Revoke a token. +# DELETE /v3/auth/tokens +#"identity:revoke_token": "rule:admin_or_token_subject" + +# Create trust. +# POST /v3/OS-TRUST/trusts +# Intended scope(s): project +#"identity:create_trust": "user_id:%(trust.trustor_user_id)s" + +# List trusts. +# GET /v3/OS-TRUST/trusts +# HEAD /v3/OS-TRUST/trusts +# Intended scope(s): project +#"identity:list_trusts": "" + +# List roles delegated by a trust. +# GET /v3/OS-TRUST/trusts/{trust_id}/roles +# HEAD /v3/OS-TRUST/trusts/{trust_id}/roles +# Intended scope(s): project +#"identity:list_roles_for_trust": "" + +# Check if trust delegates a particular role. +# GET /v3/OS-TRUST/trusts/{trust_id}/roles/{role_id} +# HEAD /v3/OS-TRUST/trusts/{trust_id}/roles/{role_id} +# Intended scope(s): project +#"identity:get_role_for_trust": "" + +# Revoke trust. +# DELETE /v3/OS-TRUST/trusts/{trust_id} +# Intended scope(s): project +#"identity:delete_trust": "" + +# Get trust. +# GET /v3/OS-TRUST/trusts/{trust_id} +# HEAD /v3/OS-TRUST/trusts/{trust_id} +# Intended scope(s): project +#"identity:get_trust": "" + +# Show user details. +# GET /v3/users/{user_id} +# HEAD /v3/users/{user_id} +#"identity:get_user": "rule:admin_or_owner" + +# List users. +# GET /v3/users +# HEAD /v3/users +# Intended scope(s): system +#"identity:list_users": "rule:admin_required" + +# List all projects a user has access to via role assignments. +# GET /v3/auth/projects +#"identity:list_projects_for_user": "" + +# List all domains a user has access to via role assignments. +# GET /v3/auth/domains +#"identity:list_domains_for_user": "" + +# Create a user. +# POST /v3/users +# Intended scope(s): system +#"identity:create_user": "rule:admin_required" + +# Update a user, including administrative password resets. +# PATCH /v3/users/{user_id} +# Intended scope(s): system +"identity:update_user": "rule:admin_or_owner" + +# Delete a user. +# DELETE /v3/users/{user_id} +# Intended scope(s): system +#"identity:delete_user": "rule:admin_required" diff --git a/resources/keystone.zip b/resources/keystone.zip new file mode 100644 index 0000000000000000000000000000000000000000..1a17f673dd3887dd206a4cb39ad18fdf9bcee017 GIT binary patch literal 4937 zcmZ|TuHazx zp~Oh|^vy_#`RYMk(}60q@%x2Hb#9H__lt*{wr_=!53%}ohF2ONmyC!`;JX8w$(_j_ z#Y>tkD};oT1D2l@mt~&}8sz51FEdXp9`3nKck41aNs{AX6amX@6pcwW%L~2r4(rs; zeHljYfK=OE1vF+wCXU)F#)`#?_1G)c~JK zaq}}SQ39{czi%;r!dtH}izm<9FD_L8D)_Rmn#3SXQTyAK`4&g21D6TbK0mgVFD`!_ zYlHbw>v4e!z~9Dtjljk>(x2Q;TxunB#5%N7VpHR$M;oDpSNB%Zjft{N?Am3$MKH%^Umr&JH`J*gE{hmmC-wgX1G@m&S;xXi$}{; z_Z^K-tNb=TeqhjYTuO=*##1>i!*wv;Ja{LFO(&9iRY*~&>L{ju( z*td1xq4vIV`PEVU6qNl4d!z#5M1fuTfq@__2j99pEn1dvVU%W^Foyk!79Slq`F!~^ z=&(ay{ZhW>Q)&w{^9f&IAO497$(uP+^m}<%8EJOX5?8wT%ixuEA`PK%b5Yj9p^Pn*jEVBm&xY%2BqL<&b{}RqXfx%fj&j7^xf}1p&@7kxnQKngIE=QzQU;4 z%WNF;Su5VRmenlMmy`{fwcfJ3)hu5F!Nfi9ORMRRrN@OvBERqTcm%P+yH`|LQFrr4 zBF}f8CXHu&K@SFVSQd#0qPQDC7(2U{E-eLg=DfmC)w}C7-TA0*rf9ve)fxA8p}IIz zx}{IghkptgMqnN!xkUas&qhr1$EWx{G8#-zGaSMS0&BBFaw|oSN-TcF+h&3I{)9u_ zyppxXu15n;qU1}PiH1Qot?>rh3Wd=l=^A>ZTv|J3{PH_Ue3ewR{kkTLqk@4YYQJ5M zBND#;a+bkQO1<0rYF#Jz8&-{C6K^rD>1#@u1j(|^A|h94^RR8>JzjC^S5+{jK7C{;vSV{QAjLwaDjuM zRv(Y2b)fkEonD1q$51=D6CAc4E+pc{E&L1RnkaeV*81Yh~y-4=Y_Erqk@tA7gUY_+G5vc z226N>70ndSy^ezXJ|TuUAH7$RFMBfsC4)nA2jxQRP5G9Dv$?9wYZB6VzJP;RC*rDE zUqA;5tv1S;j2NK)8ScjNCps_H+}*fEd=(9AtMRi^QQ5YRehuqY0Ia%uxMk#5 zk5E3J>VbEbPvUcKY!3_w$6uG3lTwpfmwH+W`zVu~yr5i5Vl$bIaVuF=g0+>pKbJ*SZt_syss!~cApK-|saM{g`>JYy9 z^N;G{mzNdAz}9zFfz`ftl6fShb;UJ!{B&w^3n#KZr4i= zhm}z?qZGmSC=tS`B7o1^2QQb{-0%e7v4pNc)?3kOsL@35`Rq|Ac!FCUx^#e!=$>g^ zqGsqXS;Ir|>9}{6s^Koc&kRPFz!a%KCR}p+U7ek+Br$eQv#g3=hD~uGo7Y;^SSKz) z7`X{%O@+i|k+C5dtTIzp1>1&2Tls(N! zb3DOfG?RVdxTCt~b_^Hle*0N6gNtbx5<@^%$N)$i)3xNIOe4M}&bZ8N76jNeN6mAa zVh4otZu7^`5+=1U%5-UEfP;sXf=@VojFVfJPZbD{+U|#?=?{Ju&jaI;(d;z0LpF61 z8=M>`4#N3}gT5n90{H5|PZRRMgvY{C6&i=~7><5MLv5b0@Enny2~tJo1S~oM)gIg1 zHWhI2h9T;Bv!Y^cmQW262dY^Vxl~&r#;5Vo#ZrJRp+(g7C23VZk`vawo2ng9!jZzG zaj3N~d0Yg-ohVL-vDLtfY$TC+JTT6-WzUPt7pf;?PsRs$`~fq7VMk&**1S=HTYv3` z$#LJ9boaDN^>%dM*K1jSuL6%4eEf8ZOBKJMHT@f$r)ROvIQ@%6w~}Wa^X$0TO15J3 zYu+J!MHA7wrp05zF%yVl?cQpSjtiS8Dv`BDBu%6tf%$nnbZDFT6}VDm=u?QFPaacm zZ6Xzz%L0+^T}eXQHA3-I_Uki}6g#CmBr;v2a-B3P?rAK&b%6x@T9poQ5SmNBS7J@4 z7W^4k`d8e8m4Vd>_Sk8=h5j}>ok*KL5ID}pT?1*|aSy;&Z%{-j&sat6L0XF7)Ye52 z6IRlqfj=ND2dt+}Yn{9oMHHHD;B!nW8*ghwRLx_p=ZRoS<~G89b~_DQR~wNYBjurN z7wu*oq_#g?PdEoK+IHBP-zhdX9zcjrR%X=Bf-^UKBEw##$GNl{)unhIL}y$EEw$Wh z_|uDPqVmn?OuM}*4bmwdGi$FL%pT#TF@xkjvU=)-H*ArhvCw;(kA;py_F5B0VhC^I zDL6~k$ZXA4(r+F_T#t4V1=KFg9ufp=Rz&jPcM0ce6P+LE*6i!(uay5zt3@1U*m@IZ z(P$Er>Q#orI$kgIQ{rJf8ztABI6mHaRXT1Um?%AIVM&SQSXC@1zja{U6o+`#qUP*) zg;D}E(aVCdg)COogb7jhy9zJRN1g?;#e64_!eKGu#C{X!WQl#-qhA67MWR*rRiQA! zn!#_K78V0fNT>D@BA8~tORwct!Yiwfk^I5E1Q=E|xamY>OdqmLk+QL6zecS3^gK zKRBc)`Zo=rsV$*Z7(=i2i$LP|n(3Gcau)mnzF<{%9CIN}EP)(IFGmU6+u;N}RA~ls z0GW>fKd^`{96ztx(vJR1%Bzi2#?37xtF0&Uus!FrPO2V7$TbS!f=8q%K z%!u>U;~ua-pF9{h@bDwoT}Vba6cwx-`VxQO09JQF6RqgRWaHTp@&N4YG4L<7Z1p_% z3eK;b8$)pp2{`4roNAaB^f3e%7{G$|ZiLB9@CMeAdvrCQ8DNmAW|5&jawg@GKl#tX+cT)96931{0L0_dh=$Yu9@d z8S03`mP9wdR%*31@(9$j5X1$(y_9QgAYd=293eAL1E?zFys9_71EstOC#yij8>*!U z>D}8U!&m)XDkeIISo|~RM69v}hi6!BUC@*H*tp$q7a&d@-%RRx5R@fQfl?|9jXa{S zPx#|$2U4}EqjG8rkvZ27O?$e3o>V`ix+-N%=1mJsL|qy`Wky{qH>$8Or?L-fW0y|O>}GEB=6?bE!ytep=R^OR0$!Gs`< z`xJc|-K-@BQDk@r(#T0&O!)VwkM;Ie;)I)I^@O#R>mC2SGLd$jd<}|v)D@YdBl{mc zi}*_^r`|t-$XmM{21S_4nO=-%0K`G_YuelGEPo*ZVG$nBXxxKuyzqe9^)UD`#ZQ4ztg8Ca)^ ztxRc-XuPh|{cpkF8oLMiN{)N_^7Q2%F;u?M z+k#JoEt7$_ofzj#(7g5Tr{WPfaHXx{WY;)qf1V zgl<5oq8OydLPUSA9>`53kfJx}#@QT0<+ZdhvnXWMh9;;pt#DMxk@bz|L%KiL#g!IE z4SvrWj|%_xuK!$<%#FvZy9FmmjG)5r4FZX_RNbuxvNdzRx4=K5+}IMRvvV%6+Enj8 zF$VV75G;o$Xr&}4lviTUASK_1?UiH2`IZ(A1R-kpjf}z=y1%e{n&q{bhF_=K`Df{=U+6Pa8KwHa z-5#yfjEH45brU9|z6JLoY&YD8_6_zEI56IBJ({)@>3@P%km!NBi>a=kcw7^y8Lh?+ z(NuFTm~NsISYla}K+N=*PupmG+o`n}pp$$>W_Mf)1B_;K?dBs{bV-dcflW#RwdQK& z2i0Fu^ta#MzjQvDG!o!bNRv_DfShIpZ@j*76=;>8ynk=)F2PHY0s?tG_ zhBC?vYSjNvxXAv=7XX0rFU=wU+5XGB(Erc7{>!>Fl+iK%bByw@wExYme_#dx{156u BNMry2 literal 0 HcmV?d00001 diff --git a/scripts/arif-scripts/99-post-setup.sh b/scripts/arif-scripts/99-post-setup.sh new file mode 100755 index 0000000..5fdf5ab --- /dev/null +++ b/scripts/arif-scripts/99-post-setup.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +juju_status_json=$(juju status --format json) + +# ldap +for i in `seq 1 3`; do + ldap_ip=$(echo $juju_status_json | jq .applications[\"ldap-domain${i}\"].units[][\"public-address\"] | sed s/\"//g) + juju config keystone-ldap-domain${i} ldap-server=ldap://${ldap_ip} +done + +# landscape +landscape_ip=$(echo $juju_status_json | jq .applications[\"landscape-haproxy\"].units[][\"public-address\"] | sed s/\"//g) + +juju run --all "echo ${landsape_ip} landscape.example.com | sudo tee -a /etc/hosts" + +# fix ceilometer, so that it can get to keystone +juju run --application ceilometer "echo 10.0.1.216 keystone.example.com | sudo tee -a /etc/hosts" +juju run-action ceilometer/0 ceilometer-upgrade + +# ensure openstack-service-checks can get to keystone +juju run --application openstack-service-checks "echo 10.0.1.216 keystone.example.com | sudo tee -a /etc/hosts" +juju run --application openstack-service-checks "echo 10.0.1.216 keystone-internal.example.com | sudo tee -a /etc/hosts" + +# ensure ceph-osd can get to vault +juju run --application ceph-osd "echo 10.0.1.222 vault.example.com | sudo tee -a /etc/hosts" +juju run --application ceph-osd "echo 10.0.1.222 vault-internal.example.com | sudo tee -a /etc/hosts" + diff --git a/scripts/arif-scripts/get_passwords.sh b/scripts/arif-scripts/get_passwords.sh new file mode 100755 index 0000000..310ce16 --- /dev/null +++ b/scripts/arif-scripts/get_passwords.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +keystone_passwd=$(juju run --unit keystone/leader 'leader-get admin_passwd') +nagios_passwd=$(juju run --unit nagios/leader 'sudo cat /var/lib/juju/nagios.passwd') +grafana_passwd=$(juju run-action --wait grafana/leader get-admin-password | grep password | awk '{print $2}') +graylog_passwd=$(juju run-action --wait graylog/leader show-admin-password | grep admin-password | awk '{print $2}') +mysql_passwd=$(juju run --unit mysql/leader 'leader-get root-password') + +echo "Keystone admin password: ... ${keystone_passwd}" +echo "nagios password: ... ${nagios_passwd}" +echo "grafana password: ... ${grafana_passwd}" +echo "graylog password: ... ${graylog_passwd}" +echo "mysql password: ... ${mysql_passwd}" diff --git a/scripts/arif-scripts/landscape-certs.sh b/scripts/arif-scripts/landscape-certs.sh new file mode 100755 index 0000000..6e5e6ec --- /dev/null +++ b/scripts/arif-scripts/landscape-certs.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# This is when landscape-haproxy the cert is SELFSIGNED. This will ensure that landscape will work +landscape_crt=$(juju run --application landscape-haproxy 'sudo openssl x509 -in /var/lib/haproxy/default.pem' | base64) +juju config landscape-client ssl-public-key="base64:${landscape_crt}" + +# And yes, this needs to use the IP address, otherwise the the registration will fail +landscape_ip=$(juju run --application landscape-haproxy 'unit-get private-address') +juju config landscape-client url="https://${landscape_ip}/message-system" ping-url="http://${landscape_ip}/ping" + +# May need to restart all the landscape-clients +#juju run --application landscape-client 'sudo systemctl restart landscape-client.service' diff --git a/scripts/arif-scripts/ldap/nova_ldap.rc b/scripts/arif-scripts/ldap/nova_ldap.rc new file mode 100644 index 0000000..c408ed1 --- /dev/null +++ b/scripts/arif-scripts/ldap/nova_ldap.rc @@ -0,0 +1,9 @@ +export OS_AUTH_URL=http://keystone.example.com:5000/v3 +export OS_REGION_NAME=RegionOne +export OS_PROJECT_NAME=ldap1_proj1 +export OS_PROJECT_DOMAIN_NAME=domain1 +export OS_USER_DOMAIN_NAME=domain1 +export OS_IDENTITY_API_VERSION=3 +export OS_PASSWORD=crapper +export OS_USERNAME=johndoe + diff --git a/scripts/arif-scripts/ldap/nova_ldap_user20.rc b/scripts/arif-scripts/ldap/nova_ldap_user20.rc new file mode 100644 index 0000000..dbd0922 --- /dev/null +++ b/scripts/arif-scripts/ldap/nova_ldap_user20.rc @@ -0,0 +1,14 @@ +keystone_addr=`juju config keystone vip` +if [ -z "$keystone_addr" ]; then + keystone_addr=`jq -r '.applications.keystone.units."keystone/0"."public-address"' $juju_status_json_cache` +fi + +export OS_AUTH_URL=http://${keystone_addr}:5000/v3 +export OS_REGION_NAME=RegionOne +export OS_PROJECT_NAME=ldap_proj2 +export OS_PROJECT_DOMAIN_NAME=userdomain +export OS_USER_DOMAIN_NAME=userdomain +export OS_IDENTITY_API_VERSION=3 +export OS_PASSWORD=crapper +export OS_USERNAME=user20 + diff --git a/scripts/arif-scripts/reset_certs.sh b/scripts/arif-scripts/reset_certs.sh new file mode 100755 index 0000000..a534f62 --- /dev/null +++ b/scripts/arif-scripts/reset_certs.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +for proj in cinder glance heat keystone openstack-dashboard neutron-api nova-cloud-controller nagios landscape-haproxy; do + + juju config $proj --reset ssl_ca + juju config $proj --reset ssl_cert + juju config $proj --reset ssl_key + +done diff --git a/scripts/arif-scripts/update_landscape_certs.sh b/scripts/arif-scripts/update_landscape_certs.sh new file mode 100755 index 0000000..2501a02 --- /dev/null +++ b/scripts/arif-scripts/update_landscape_certs.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +TMPDIR=$(mktemp -d) + +DATE=$(date +%s) +lds_cert_pub=$TMPDIR/landscape_cert_${DATE}_pub.pem + +juju config landscape-haproxy ssl_cert=$(base64 -w 0 ../config/ssl/servercert.pem) ssl_key=$(base64 -w 0 ../config/ssl/serverkey.pem) + +cat ../config/ssl/servercert.pem | openssl x509 -pubkey -noout > ${lds_cert_pub} + +pub_key_base64=$(cat ${lds_cert_pub} | base64 -w 0) + +rm -rf ${lds_cert_pub} + +juju config landscape-client ssl-public-key="base64:$pub_key_base64" diff --git a/scripts/arif-scripts/update_landscape_certs_self.sh b/scripts/arif-scripts/update_landscape_certs_self.sh new file mode 100755 index 0000000..50a55e8 --- /dev/null +++ b/scripts/arif-scripts/update_landscape_certs_self.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# This is when landscape-haproxy the cert is SELFSIGNED. This will ensure that landscape will work +landscape_crt=$(juju run --application landscape-haproxy 'sudo openssl x509 -in /var/lib/haproxy/default.pem' | base64) +juju config landscape-client ssl-public-key="base64:${landscape_crt}" + +# And yes, this needs to use the IP address, otherwise the the registration will fail +landscape_ip=$(juju run --application landscape-haproxy 'unit-get private-address') +juju config landscape-client url="https://${landscape_ip}/message-system" ping-url="http://${landscape_ip}/ping" + +# May need to restart all the landscape-clients +#juju run --application landscape-client 'sudo systemctl restart landscape-client.service' + diff --git a/scripts/arif-scripts/update_ldap.sh b/scripts/arif-scripts/update_ldap.sh new file mode 100755 index 0000000..238de2b --- /dev/null +++ b/scripts/arif-scripts/update_ldap.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +juju_status_json=$(juju status --format json) + +for i in `seq 1 3`; do + ldap_ip=$(echo $juju_status_json | jq .applications[\"ldap-domain${i}\"].units[][\"public-address\"] | sed s/\"//g) + juju config keystone-ldap-domain${i} ldap-server=ldap://${ldap_ip} +done diff --git a/scripts/other-scripts/force_ip b/scripts/other-scripts/force_ip new file mode 100755 index 0000000..640e3bc --- /dev/null +++ b/scripts/other-scripts/force_ip @@ -0,0 +1,13 @@ +#!/bin/bash + +export MACHINE_NAME=$1 +export STATIC_IP=$2 +export SUBNET_NAME=$3 + +export SUBNET_ID=$(maas root subnets read | jq '.[] | {id:.id,name:.name}' --compact-output | grep $SUBNET_NAME | jq .id) +export SYSTEM_ID=$(maas root nodes read | jq '.[] | {hostname:.hostname,system_id: .system_id, status:.status}' --compact-output | grep $MACHINE_NAME | jq .system_id | awk -F"\"" '{print $2}') +export EXISTING_LINK_ID=$(maas root interfaces read ${SYSTEM_ID} | jq '.[] | .links[] | {link_id:.id, mode:.mode, subnet:.subnet.name}' --compact-output | grep $SUBNET_NAME | jq .link_id) +export NIC_ID=$(maas root interfaces read ${SYSTEM_ID} | jq '.[] | {iface_id:.id, name:.name, mac:.mac_address, subnet:.subnet.name, link: .links}' --compact-output | grep ${EXISTING_LINK_ID} | jq .iface_id) + +maas root interface unlink-subnet ${SYSTEM_ID} ${NIC_ID} id=${EXISTING_LINK_ID} +maas root interface link-subnet ${SYSTEM_ID} ${NIC_ID} mode=STATIC subnet=${SUBNET_ID} ip_address=${STATIC_IP} diff --git a/scripts/other-scripts/remove_alias b/scripts/other-scripts/remove_alias new file mode 100755 index 0000000..594d5d7 --- /dev/null +++ b/scripts/other-scripts/remove_alias @@ -0,0 +1,24 @@ +#!/usr/bin/python3 + +import json +import subprocess + + +machines = json.loads(subprocess.check_output("maas root nodes read", shell=True)) +for machine in machines: + system_id = machine["system_id"] + hostname = machine["hostname"] + print("Checking interfaces for machine {} with ID {}".format(hostname, system_id)) + interfaces = json.loads(subprocess.check_output("maas root interfaces read {}".format(system_id), shell=True)) + for interface in interfaces: + link_id = None + links = interface["links"] + if len(links) == 2: + for link in links: + if link["mode"] == "auto": + link_id = link["id"] + nic_id = interface["id"] + print("Removing link_id {} for NIC {} for machine {} with name {}".format(link_id, nic_id, system_id, hostname)) + subprocess.check_call("maas root interface unlink-subnet {} {} id={}".format(system_id, nic_id, link_id), shell=True) + break + diff --git a/scripts/other-scripts/vault.sh b/scripts/other-scripts/vault.sh new file mode 100755 index 0000000..d95ec9d --- /dev/null +++ b/scripts/other-scripts/vault.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +vault_vip=$(juju config vault vip) +echo export VAULT_ADDR="http://${vault_vip}:8200" +export VAULT_ADDR="http://${vault_vip}:8200" + +echo " " + +IPS=$(juju status vault --format json | jq '.applications.vault.units | to_entries[] | .value."public-address"' | sed s/\"//g) + +for ip in $IPS;do + echo export VAULT_ADDR=http://${ip}:8200; + export VAULT_ADDR=http://${ip}:8200; + for vault_key in $(head -n3 ../../secrets/vault.txt | awk '{print $4}');do + echo vault operator unseal -tls-skip-verify $vault_key + vault operator unseal -tls-skip-verify $vault_key + done; +done; + + diff --git a/scripts/post-deployment/01-create-flavors.sh b/scripts/post-deployment/01-create-flavors.sh new file mode 100755 index 0000000..dffc700 --- /dev/null +++ b/scripts/post-deployment/01-create-flavors.sh @@ -0,0 +1,45 @@ +#!/bin/bash + + +# The following flavors will be configured initially in the cloud. Note that +# these flavors will need to be configured per host aggregate, multiplying the +# list below by the host aggregates listed in the previous section. When +# combined with the host aggregate, this maps to 24 combinations which need +# to be defined, for brevity and clarity these combinations are not +# individually listed here. + +# Name vCPU RAM (MB) Disk (GB) Disk Type +# m1.small 1 2048 20 Ceph +# m1.medium 2 4096 40 Ceph +# m1.large 4 8192 80 Ceph +# m1.xlarge 8 16384 160 Ceph + +HOST_AGGREGATES=( + "default" +) + +for host_aggregate in ${HOST_AGGREGATES[*]}; do + # m1.small.ceph 1 2048 20 Ceph + openstack flavor create \ + --vcpus 1 --ram 2048 --disk 20 \ + --property aggregate_instance_extra_specs:host_aggregate=${host_aggregate} \ + ${host_aggregate}.m1.small + + # m1.medium.ceph 2 4096 40 Ceph + openstack flavor create \ + --vcpus 2 --ram 4096 --disk 40 \ + --property aggregate_instance_extra_specs:host_aggregate=${host_aggregate} \ + ${host_aggregate}.m1.medium + + # m1.large.ceph 4 8192 80 Ceph + openstack flavor create \ + --vcpus 4 --ram 8192 --disk 80 \ + --property aggregate_instance_extra_specs:host_aggregate=${host_aggregate} \ + ${host_aggregate}.m1.large + + # m1.xlarge.ceph 8 16384 160 Ceph + openstack flavor create \ + --vcpus 8 --ram 16384 --disk 160 \ + --property aggregate_instance_extra_specs:host_aggregate=${host_aggregate} \ + ${host_aggregate}.m1.xlarge +done diff --git a/scripts/post-deployment/02-create-host-types.sh b/scripts/post-deployment/02-create-host-types.sh new file mode 100755 index 0000000..121d167 --- /dev/null +++ b/scripts/post-deployment/02-create-host-types.sh @@ -0,0 +1,27 @@ +#!/bin/bash + + +# Name Description Compute Nodes +# default 2:1 CPU 04,05,06 + +declare -A COMPUTE_NODES +COMPUTE_NODES=( + ["default"]="04 05 06" +) + +NODE_NAME_PREFIX="node0" + +for aggregate in "${!COMPUTE_NODES[@]}"; do + # Create aggregate + echo "Creating host aggregate ${aggregate}..." + openstack aggregate create \ + --property host_aggregate=${aggregate} \ + ${aggregate} + + # Add COMPUTE_NODES to the host aggregate + for node in ${COMPUTE_NODES[$aggregate]}; do + echo "Adding node ${NODE_NAME_PREFIX}${node} to host aggregate ${aggregate}..." + openstack aggregate add host ${aggregate} ${NODE_NAME_PREFIX}${node} + done +done + diff --git a/scripts/post-deployment/03-set-availability-zones.sh b/scripts/post-deployment/03-set-availability-zones.sh new file mode 100755 index 0000000..720f463 --- /dev/null +++ b/scripts/post-deployment/03-set-availability-zones.sh @@ -0,0 +1,51 @@ +#!/bin/bash + + +# Name Description Compute Nodes +# asrock01 Nodes in AZ1 02 04 +# asrock02 Nodes in AZ2 02 03 +# asrock03 Nodes in AZ3 02 03 + +declare -A COMPUTE_NODES +COMPUTE_NODES=( + # Host Aggregate name: Compute Nodes + ["asrock01"]="02 03" + ["asrock02"]="02 03" + ["asrock03"]="02 03" +) + +declare -A AVAILABILITY_ZONES +AVAILABILITY_ZONES=( + # Host Aggregate name: Availability Zone + ["asrock01"]="asrock01" + ["asrock02"]="asrock02" + ["asrock03"]="asrock03" +) + +declare -A NODE_NAME_PREFIXES +NODE_NAME_PREFIXES=( + # Host Aggregate name: Compute Nodes + ["asrock01"]="as1-maas-node-" + ["asrock02"]="as2-maas-node-" + ["asrock03"]="as3-maas-node-" +) + +for aggregate in "${!COMPUTE_NODES[@]}"; do + # Create aggregate + echo "Creating host aggregate ${aggregate}..." + openstack aggregate create \ + --zone ${AVAILABILITY_ZONES[$aggregate]} \ + ${aggregate} + + # Add COMPUTE_NODES to the host aggregate + for node in ${COMPUTE_NODES[$aggregate]}; do + echo "Adding node ${NODE_NAME_PREFIXES[$aggregate]}${node} to host aggregate ${aggregate}..." + openstack aggregate add host ${aggregate} ${NODE_NAME_PREFIXES[$aggregate]}${node} + done + + openstack flavor show ${AVAILABILITY_ZONES[$aggregate]}.m1.cirros || openstack flavor create \ + --vcpus 1 --ram 64 --disk 1 \ + --property aggregate_instance_extra_specs:host_aggregate=${AVAILABILITY_ZONES[$aggregate]} \ + ${AVAILABILITY_ZONES[$aggregate]}.m1.cirros +done + diff --git a/scripts/sriov/networking-sriov.service b/scripts/sriov/networking-sriov.service new file mode 100644 index 0000000..e304e09 --- /dev/null +++ b/scripts/sriov/networking-sriov.service @@ -0,0 +1,18 @@ +[Unit] +Description=Configure SRIOV Virtual Functions +DefaultDependencies=no +Wants=network.target +After=local-fs.target network-pre.target apparmor.service systemd-sysctl.service systemd-modules-load.service +Before=network.target shutdown.target network-online.target +Conflicts=shutdown.target + +[Install] +WantedBy=multi-user.target +WantedBy=network-online.target + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/networking-sriov.sh systemd-start +ExecStop=/usr/local/bin/networking-sriov.sh systemd-stop +RemainAfterExit=true +TimeoutStartSec=5min diff --git a/scripts/sriov/networking-sriov.sh b/scripts/sriov/networking-sriov.sh new file mode 100755 index 0000000..864c3a5 --- /dev/null +++ b/scripts/sriov/networking-sriov.sh @@ -0,0 +1,34 @@ +#!/bin/bash +DESC="Configure SRIOV Virtual Functions" + +. /lib/lsb/init-functions + +do_start() { + echo '12' > /sys/class/net/enp175s0f1/device/sriov_numvfs +} + + +do_stop() { + echo '0' > /sys/class/net/enp175s0f1/device/sriov_numvfs +} + + +case "$1" in + systemd-start) + do_start + ;; + systemd-stop) + do_stop + ;; + restart) + log_daemon_msg "Re-$DESC" + do_stop + do_start + ;; + *) + N=/usr/local/bin/networking-sriov.sh + echo "Usage: $N {restart|systemd-start|systemd-stop}" >&2 + ;; +esac + +exit 0 diff --git a/secrets/.gitkeep b/secrets/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/secrets/vault.txt b/secrets/vault.txt new file mode 100644 index 0000000..3e0a668 --- /dev/null +++ b/secrets/vault.txt @@ -0,0 +1,18 @@ +Unseal Key 1: L3OvWpS8dYyIl9mxJ/rn46cn5uVlf9FVZOfngf6K03b+ +Unseal Key 2: OYnjKwMDar1pAWB8XFuwq0x6TyTBRaT5BvcG6J1jNKDJ +Unseal Key 3: aKvnqpX+6kWIJe1GWR8M/joJpDissExSk1oYC1vO5lmy +Unseal Key 4: 76IAnSGfbnugZCDBgtoLMsAnhmErr6N9aJnuEAQrUP// +Unseal Key 5: +VhM7LYgcUpB8pkM+Xtceit6L6CPldbRCokPeWfCtynI + +Initial Root Token: s.MC3kjNzrLhBuPk2DCrOzVrcw + +Vault initialized with 5 key shares and a key threshold of 3. Please securely +distribute the key shares printed above. When the Vault is re-sealed, +restarted, or stopped, you must supply at least 3 of these keys to unseal it +before it can start servicing requests. + +Vault does not store the generated master key. Without at least 3 key to +reconstruct the master key, Vault will remain permanently sealed! + +It is possible to generate new unseal keys, provided you have a quorum of +existing unseal keys shares. See "vault operator rekey" for more information.