heat_template_version: 2013-05-23 description: > HOT template to deploy one compute node into an xisting neutron tenant network and assign floating IP address to the server so they are routable from the public network. parameters: host_prefix: type: string description: The hostname will be prefixed by this string key_name: type: string description: Name of keypair to assign to servers image: type: string description: Name of image to use for servers flavor: type: string description: Flavor to use for servers public_net_id: type: string description: > ID of public network for which floating IP addresses will be allocated private_net_id: type: string description: ID of private network into which servers get deployed private_subnet_id: type: string description: ID of private sub network into which servers get deployed master_node_ip: type: string description: IP address of the Master node. resources: node_wait_handle: type: "AWS::CloudFormation::WaitConditionHandle" node_wait_condition: type: "AWS::CloudFormation::WaitCondition" depends_on: - compute_node properties: Handle: get_resource: node_wait_handle Timeout: "1200" secgroup_all_open: type: "OS::Neutron::SecurityGroup" properties: rules: - protocol: icmp - protocol: tcp - protocol: udp compute_node: type: OS::Nova::Server properties: name: get_param: host_prefix image: get_param: image flavor: get_param: flavor key_name: get_param: key_name user_data_format: RAW user_data: str_replace: template: | #!/bin/sh yum -y remove NetworkManager chkconfig network on cat > /etc/yum.repos.d/torque.repo << EOF [torque] name=torque baseurl=http://192.168.95.200/install/post/otherpkgs/el7/torque enabled=1 gpgcheck=0 EOF yum -y install torque-client chkconfig trqauthd on chkconfig pbs_mom on echo $MASTER_NODE_IP > /var/spool/torque/server_name cat > /var/spool/torque/mom_priv/config << EOF \$logevent 0x1ff \$pbsserver $MASTER_NODE_IP EOF myip=$(ip addr show eth0 | awk '$1 == "inet" {print $2}' | cut -f1 -d/) myip_last_octet=${myip##*.} hostname $HOST_PREFIX${myip_last_octet} echo $myip $HOST_PREFIX${myip_last_octet} >> /etc/hosts service trquathd restart service pbs_mom restart groupadd -g 4001 testuser useradd -g 4001 -u 4001 -m testuser cat > /tmp/wait-data << EOF { "Status" : "SUCCESS", "Reason" : "Setup Complete", "UniqueId" : "None", "Data" : "OK" } EOF curl -T /tmp/wait-data '$WAIT_HANDLE' params: "$MASTER_NODE_IP": get_param: master_node_ip "$WAIT_HANDLE": get_resource: node_wait_handle "$HOST_PREFIX": get_param: host_prefix networks: - port: get_resource: compute_node_eth0 compute_node_eth0: type: OS::Neutron::Port properties: network_id: get_param: private_net_id fixed_ips: - subnet_id: get_param: private_subnet_id security_groups: - get_resource: secgroup_all_open compute_floating_ip: type: OS::Neutron::FloatingIP properties: floating_network_id: get_param: public_net_id port_id: get_resource: compute_node_eth0 outputs: compute_node_name: description: The name of the compute node value: { get_attr: [ compute_node, instance_name ] } compute_node_ip: description: IP address of compute node in private network value: { get_attr: [ compute_node_eth0, fixed_ips, 0, ip_address ] } compute_node_external_ip: description: Floating IP address of compute node in public network value: { get_attr: [ compute_floating_ip, floating_ip_address ] }