From 6d347f66dd6e58320792bf0355e47314147c5e9b Mon Sep 17 00:00:00 2001 From: penguhyang Date: Fri, 18 Sep 2015 02:24:03 -0400 Subject: [PATCH 01/19] correct code to return right value --- xCAT-server/sbin/runcmdinstaller | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xCAT-server/sbin/runcmdinstaller b/xCAT-server/sbin/runcmdinstaller index 3b51020de..d194d3c00 100755 --- a/xCAT-server/sbin/runcmdinstaller +++ b/xCAT-server/sbin/runcmdinstaller @@ -10,7 +10,7 @@ action = "sh" if( node=="-h" || argc !=2 || ! node || ! cmd){ print "Usage:\n\n runcmdinstaller \"\"\n" print " make sure all the commands are quoted by \"\"\n"; - exit 1; + exit 0; } ns = "/inet/tcp/0/" node "/" port From cc4442402d44df79646d6d3996c897e007b2b388 Mon Sep 17 00:00:00 2001 From: huweihua Date: Tue, 1 Sep 2015 22:04:00 -0400 Subject: [PATCH 02/19] add content --- .../common/deployment/cfg_partition.rst | 515 +++++++++++++++++- .../common/deployment/cfg_second_adapter.rst | 128 +++++ .../common/deployment/create_img.rst | 45 +- .../common/deployment/deploy_os.rst | 31 +- .../common/deployment/driver_update_disk.rst | 125 ++++- .../common/deployment/raid_cfg.rst | 442 +++++++++++++++ 6 files changed, 1278 insertions(+), 8 deletions(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst index ba7f5ba82..84b2f5c10 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst @@ -1,2 +1,513 @@ -Configure Disk Partition -======================== +.. BEGIN_Overview +By default, xCAT will install the operating system on the first disk and with default partitions layout in the node. However, you may choose to customize the disk partitioning during the install process and define a specific disk layout. You can do this in one of two ways: '**partition definition file**' or '**partition definition script**'. + +**Notes** + +- 'Partition definition file' way can be used for RedHat, SLES and Ubuntu. +- 'partition definition script' way was tested only for RedHat and Ubuntu, use this feature on SLES at your own risk. +- Cause disk configuration for Ubuntu is different from RedHat, so there are some specific section for ubuntu. +.. END_Overview + +.. BEGIN_partition_definition_file_Overview +You could create a customized osimage partition file, say /install/custom/my-partitions, that contains the disk partitioning definition, then associate the partition file with osimage, the nodeset command will insert the contents of this file directly into the generated autoinst configuration file that will be used by the OS installer. +.. END_partition_definition_file_Overview + +.. BEGIN_partition_definition_file_content + +The partition file must follow the partitioning syntax of the installer(e.g. kickstart for RedHat, AutoYaST for SLES, Preseed for Ubuntu). you could refer to the `Kickstart documentation `_ or `Autoyast documentation `_ or `Preseed documentation `_ write your own partitions layout. Meanwhile, RedHat and SuSE provides some tools that could help generate kickstart/autoyast templates, in which you could refer to the partition section for the partitions layout information: + +#. RedHat: + +- The file /root/anaconda-ks.cfg is a sample kickstart file created by RedHat installer during the installation process based on the options that you selected. +- system-config-kickstart is a tool with graphical interface for creating kickstart files + +#. SLES + +- Use yast2 autoyast in GUI or CLI mode to customize the installation options and create autoyast file +- Use yast2 clone_system to create autoyast configuration file /root/autoinst.xml to clone an existing system + +#. Ubuntu + +- For detailed information see the files partman-auto-recipe.txt and partman-auto-raid-recipe.txt included in the debian-installer package. Both files are also available from the debian-installer source repository. Note that the supported functionality may change between releases. + +.. END_partition_definition_file_content + +.. BEGIN_partition_definition_file_example_RedHat_Standard_Partitions_for_IBM_Power_machines +Here is partition definition file example for RedHat standard partition in IBM Power machines +:: + # Uncomment this PReP line for IBM Power servers + #part None --fstype "PPC PReP Boot" --size 8 --ondisk sda + # Uncomment this efi line for x86_64 servers + #part /boot/efi --size 50 --ondisk /dev/sda --fstype efi + part /boot --size 256 --fstype ext4 + part swap --recommended --ondisk sda + part / --size 1 --grow --fstype ext4 --ondisk sda + +.. END_partition_definition_file_example_RedHat_Standard_Partitions_for_IBM_Power_machines + +.. BEGIN_partition_definition_file_example_RedHat_LVM_for_IBM_Power_machines +Here is partition definition file example for RedHat LVM partition in IBM Power machines +:: + # Uncomment this PReP line for IBM Power servers + #part None --fstype "PPC PReP Boot" --ondisk /dev/sda --size 8 + # Uncomment this efi line for x86_64 servers + #part /boot/efi --size 50 --ondisk /dev/sda --fstype efi + part /boot --size 256 --fstype ext4 --ondisk /dev/sda + part swap --recommended --ondisk /dev/sda + part pv.01 --size 1 --grow --ondisk /dev/sda + volgroup system pv.01 + logvol / --vgname=system --name=root --size 1 --grow --fstype ext4 + +.. END_partition_definition_file_example_RedHat_LVM_for_IBM_Power_machines + +.. BEGIN_partition_definition_file_example_RedHat_RAID1_for_IBM_Power_machines +Partition definition file example for RedHat RAID1 please refer to `Configure RAID before Deploy OS `_ +.. END_partition_definition_file_example_RedHat_RAID1_for_IBM_Power_machines + +.. BEGIN_partition_definition_file_example_SLES_Standard_Partitions_for_X86_64 +Here is partition definition file example for SLES standard partition in X86_64 machines +:: + + /dev/sda + true + all + + + true + swap + true + swap + path + 1 + primary + 32G + + + true + ext3 + true + / + path + 2 + primary + 64G + + + + +.. END_partition_definition_file_example_SLES_Standard_Partitions_for_X86_64 + +.. BEGIN_partition_definition_file_example_SLES_LVM_for_X86_64 +Here is partition definition file example for SLES LVM partition in X86_64 machines +:: + + /dev/sda + true + + + true + false + ext3 + true + false + device + 65 + 1 + false + + false + 8M + 1 + 4 + + + + true + false + ext3 + true + false + /boot + device + 131 + 2 + false + + false + 256M + 1 + 4 + + + + true + false + false + false + vg0 + device + 142 + 3 + false + + false + max + 1 + 4 + + + + + CT_DISK + all + + + /dev/vg0 + true + + + true + false + swap + true + false + swap + swap + device + 130 + 5 + false + + false + auto + 1 + 4 + + + + true + false + ext3 + true + false + root + / + device + 131 + 1 + false + + false + max + 1 + 4 + + + + + CT_LVM + all + + +.. END_partition_definition_file_example_SLES_LVM_for_X86_64 + +.. BEGIN_partition_definition_file_example_SLES_Standard_partition_for_ppc64 +Here is partition definition file example for SLES standard partition in ppc64 machines +:: + + /dev/sda + true + + + true + false + ext3 + false + false + device + 65 + 1 + false + auto + + + true + false + swap + true + defaults + false + swap + id + 130 + 2 + false + auto + + + true + false + ext3 + true + acl,user_xattr + false + / + id + 131 + 3 + false + max + + + + CT_DISK + all + + +.. END_partition_definition_file_example_SLES_Standard_partition_for_ppc64 + +.. BEGIN_partition_definition_file_example_SLES_RAID1 +Partition definition file example for SLES RAID1 please refer to `Configure RAID before Deploy OS `_ +.. END_partition_definition_file_example_SLES_RAID1 + +.. BEGIN_partition_definition_file_example_Ubuntu_Standard_partition_for_PPC64le +Here is partition definition file example for Ubuntu standard partition in ppc64le machines +:: + 8 1 32 prep + $primary{ } + $bootable{ } + method{ prep } . + + 256 256 512 ext3 + $primary{ } + method{ format } + format{ } + use_filesystem{ } + filesystem{ ext3 } + mountpoint{ /boot } . + + 64 512 300% linux-swap + method{ swap } + format{ } . + + 512 1024 4096 ext3 + $primary{ } + method{ format } + format{ } + use_filesystem{ } + filesystem{ ext4 } + mountpoint{ / } . + + 100 10000 1000000000 ext3 + method{ format } + format{ } + use_filesystem{ } + filesystem{ ext4 } + mountpoint{ /home } . + +.. END_partition_definition_file_example_Ubuntu_Standard_partition_for_PPC64le + +.. BEGIN_partition_definition_file_example_Ubuntu_Standard_partition_for_x86_64 +Here is partition definition file example for Ubuntu standard partition in x86_64 machines +:: + 256 256 512 vfat + $primary{ } + method{ format } + format{ } + use_filesystem{ } + filesystem{ vfat } + mountpoint{ /boot/efi } . + + 256 256 512 ext3 + $primary{ } + method{ format } + format{ } + use_filesystem{ } + filesystem{ ext3 } + mountpoint{ /boot } . + + 64 512 300% linux-swap + method{ swap } + format{ } . + + 512 1024 4096 ext3 + $primary{ } + method{ format } + format{ } + use_filesystem{ } + filesystem{ ext4 } + mountpoint{ / } . + + 100 10000 1000000000 ext3 + method{ format } + format{ } + use_filesystem{ } + filesystem{ ext4 } + mountpoint{ /home } . + +.. END_partition_definition_file_example_Ubuntu_Standard_partition_for_x86_64 + +.. BEGIN_partition_definition_file_Associate_partition_file_with_osimage_common +Run below commands to associate the partition with the osimage +:: + chdef -t osimage partitionfile=/install/custom/my-partitions + nodeset osimage= + +- For Redhat, when nodeset runs and generates the /install/autoinst file for a node, it will replace the #XCAT_PARTITION_START#...#XCAT_PARTITION_END# directives from your osimage template with the contents of your custom partitionfile. + +- For Ubuntu, when nodeset runs and generates the /install/autoinst file for a node, it will generate a script to write the partition configuration to /tmp/partitionfile, this script will replace the #XCA_PARTMAN_RECIPE_SCRIPT# directive in /install/autoinst/.pre. + +.. END_partition_definition_file_Associate_partition_file_with_osimage_common + + +.. BEGIN_Partition_Definition_Script_overview +Create a shell script that will be run on the node during the install process to dynamically create the disk partitioning definition. This script will be run during the OS installer %pre script on Redhat or preseed/early_command on Unbuntu execution and must write the correct partitioning definition into the file /tmp/partitionfile on the node +.. END_Partition_Definition_Script_overview + +.. BEGIN_Partition_Definition_Script_Create_partition_script_content +The purpose of the partition script is to create the /tmp/partionfile that will be inserted into the kickstart/autoyast/preseed template, the script could include complex logic like select which disk to install and even configure RAID, etc + +**Note**: the partition script feature is not thoroughly tested on SLES, there might be problems, use this feature on SLES at your own risk. +.. END_Partition_Definition_Script_Create_partition_script_content + +.. BEGIN_Partition_Definition_Script_Create_partition_script_example_redhat_sles +Here is an example of the partition script on Redhat and SLES, the partitioning script is /install/custom/my-partitions.sh: +:: + instdisk="/dev/sda" + + modprobe ext4 >& /dev/null + modprobe ext4dev >& /dev/null + if grep ext4dev /proc/filesystems > /dev/null; then + FSTYPE=ext3 + elif grep ext4 /proc/filesystems > /dev/null; then + FSTYPE=ext4 + else + FSTYPE=ext3 + fi + BOOTFSTYPE=ext3 + EFIFSTYPE=vfat + if uname -r|grep ^3.*el7 > /dev/null; then + FSTYPE=xfs + BOOTFSTYPE=xfs + EFIFSTYPE=efi + fi + + if [ `uname -m` = "ppc64" ]; then + echo 'part None --fstype "PPC PReP Boot" --ondisk '$instdisk' --size 8' >> /tmp/partitionfile + fi + if [ -d /sys/firmware/efi ]; then + echo 'bootloader --driveorder='$instdisk >> /tmp/partitionfile + echo 'part /boot/efi --size 50 --ondisk '$instdisk' --fstype $EFIFSTYPE' >> /tmp/partitionfile + else + echo 'bootloader' >> /tmp/partitionfile + fi + + echo "part /boot --size 512 --fstype $BOOTFSTYPE --ondisk $instdisk" >> /tmp/partitionfile + echo "part swap --recommended --ondisk $instdisk" >> /tmp/partitionfile + echo "part / --size 1 --grow --ondisk $instdisk --fstype $FSTYPE" >> /tmp/partitionfile + +.. END_Partition_Definition_Script_Create_partition_script_example_redhat_sles + +.. BEGIN_Partition_Definition_Script_Create_partition_script_example_ubuntu +The following is an example of the partition script on Ubuntu, the partitioning script is /install/custom/my-partitions.sh: +:: + if [ -d /sys/firmware/efi ]; then + echo "ubuntu-efi ::" > /tmp/partitionfile + echo " 512 512 1024 fat16" >> /tmp/partitionfile + echo ' $iflabel{ gpt } $reusemethod{ } method{ efi } format{ }' >> /tmp/partitionfile + echo " ." >> /tmp/partitionfile + else + echo "ubuntu-boot ::" > /tmp/partitionfile + echo "100 50 100 ext3" >> /tmp/partitionfile + echo ' $primary{ } $bootable{ } method{ format } format{ } use_filesystem{ } filesystem{ ext3 } mountpoint{ /boot }' >> /tmp/partitionfile + echo " ." >> /tmp/partitionfile + fi + echo "500 10000 1000000000 ext3" >> /tmp/partitionfile + echo " method{ format } format{ } use_filesystem{ } filesystem{ ext3 } mountpoint{ / }" >> /tmp/partitionfile + echo " ." >> /tmp/partitionfile + echo "2048 512 300% linux-swap" >> /tmp/partitionfile + echo " method{ swap } format{ }" >> /tmp/partitionfile + echo " ." >> /tmp/partitionfile + +.. END_Partition_Definition_Script_Create_partition_script_example_ubuntu + +.. BEGIN_Partition_Definition_Script_Associate_partition_script_with_osimage_common +Run below commands to associate partition script with osimage: +:: + chdef -t osimage partitionfile='s:/install/custom/my-partitions.sh' + nodeset osimage= + +- The "s:" preceding the filename tells nodeset that this is a script. +- For Redhat, when nodeset runs and generates the /install/autoinst file for a node, it will add the execution of the contents of this script to the %pre section of that file. The nodeset command will then replace the #XCAT_PARTITION_START#...#XCAT_PARTITION_END# directives from the osimage template file with "%include /tmp/partitionfile" to dynamically include the tmp definition file your script created. +- For Ubuntu, when nodeset runs and generates the /install/autoinst file for a node, it will replace the "#XCA_PARTMAN_RECIPE_SCRIPT#" directive and add the execution of the contents of this script to the /install/autoinst/.pre, the /install/autoinst/.pre script will be run in the preseed/early_command. +.. END_Partition_Definition_Script_Associate_partition_script_with_osimage_common + +.. BEGIN_Partition_Disk_File_ubuntu_only +The disk file contains the name of the disks to partition in traditional, non-devfs format and delimited with space " ", for example, +:: + /dev/sda /dev/sdb + +If not specified, the default value will be used. + +**Associate partition disk file with osimage** +:: + chdef -t osimage -p partitionfile='d:/install/custom/partitiondisk' + nodeset osimage= + +- the 'd:' preceding the filename tells nodeset that this is a partition disk file. +- For Ubuntu, when nodeset runs and generates the /install/autoinst file for a node, it will generate a script to write the content of the partition disk file to /tmp/boot_disk, this context to run the script will replace the #XCA_PARTMAN_DISK_SCRIPT# directive in /install/autoinst/.pre. +.. END_Partition_Disk_File_ubuntu_only + +.. BEGIN_Partition_Disk_Script_ubuntu_only +The disk script contains a script to generate a partitioning disk file named "/tmp/boot_disk". for example, +:: + rm /tmp/devs-with-boot 2>/dev/null || true; + for d in $(list-devices partition); do + mkdir -p /tmp/mymount; + rc=0; + mount $d /tmp/mymount || rc=$?; + if [[ $rc -eq 0 ]]; then + [[ -d /tmp/mymount/boot ]] && echo $d >>/tmp/devs-with-boot; + umount /tmp/mymount; + fi + done; + if [[ -e /tmp/devs-with-boot ]]; then + head -n1 /tmp/devs-with-boot | egrep -o '\S+[^0-9]' > /tmp/boot_disk; + rm /tmp/devs-with-boot 2>/dev/null || true; + else + DEV=`ls /dev/disk/by-path/* -l | egrep -o '/dev.*[s|h|v]d[^0-9]$' | sort -t : -k 1 -k 2 -k 3 -k 4 -k 5 -k 6 -k 7 -k 8 -g | head -n1 | egrep -o '[s|h|v]d.*$'`; + if [[ "$DEV" == "" ]]; then DEV="sda"; fi; + echo "/dev/$DEV" > /tmp/boot_disk; + fi; + +If not specified, the default value will be used. + +**Associate partition disk script with osimage** +:: + chdef -t osimage -p partitionfile='s:d:/install/custom/partitiondiskscript' + nodeset osimage= + +- the 's:' prefix tells nodeset that is a script, the 's:d:' preceding the filename tells nodeset that this is a script to generate the partition disk file. +- For Ubuntu, when nodeset runs and generates the /install/autoinst file for a node, this context to run the script will replace the #XCA_PARTMAN_DISK_SCRIPT# directive in /install/autoinst/.pre. +.. END_Partition_Disk_Script_ubuntu_only + + +.. BEGIN_Additional_preseed_configuration_file_ubuntu_only +To support other specific partition methods such as RAID or LVM in Ubuntu, some additional preseed configuration entries should be specified. +If using file way, 'c:', the additional preseed config file contains the additional preseed entries in "d-i ..." syntax. When "nodeset", the #XCA_PARTMAN_ADDITIONAL_CFG# directive in /install/autoinst/ will be replaced with content of the config file, an example: +:: + d-i partman-auto/method string raid + d-i partman-md/confirm boolean true + +If not specified, the default value will be used. +.. END_Additional_preseed_configuration_file_ubuntu_only + +.. BEGIN_Additional_preseed_configuration_script_ubuntu_only +To support other specific partition methods such as RAID or LVM in Ubuntu, some additional preseed configuration entries should be specified. +If using script way, 's:c:', the additional preseed config script is a script to set the preseed values with "debconf-set". When "nodeset", the #XCA_PARTMAN_ADDITIONAL_CONFIG_SCRIPT# directive in /install/autoinst/.pre will be replaced with the content of the script, an example: +:: + debconf-set partman-auto/method string raid + debconf-set partman-md/confirm boolean true + +If not specified, the default value will be used. +.. END_Additional_preseed_configuration_script_ubuntu_only diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst index 3d373b210..848999675 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst @@ -1,2 +1,130 @@ Configure Secondary Network Adapter =================================== + +Introduction +------------ +The **nics** table and the **confignics** postscript can be used to automatically configure additional **ethernet** and **Infiniband** adapters on nodes as they are being deployed. ("Additional adapters" means adapters other than the primary adapter that the node is being installed/booted over.) + +The way the confignics postscript decides what IP address to give the secondary adapter is by checking the nics table, in which the nic configuration information is stored. + +To use the nics table and confignics postscript to define a secondary adapter on one or more nodes, follow these steps: + + +Define configuration information for the Secondary Adapters in the nics table +----------------------------------------------------------------------------- + +There are 3 ways to complete this operation. + +**First way is use command line input. below is a example** +:: + [root@ls21n01 ~]# mkdef cn1 groups=all nicips.eth1="11.1.89.7|12.1.89.7" nicnetworks.eth1="net11|net12" nictypes.eth1="Ethernet" + 1 object definitions have been created or modified. + + [root@ls21n01 ~]# chdef cn1 nicips.eth2="13.1.89.7|14.1.89.7" nicnetworks.eth2="net13|net14" nictypes.eth2="Ethernet" + 1 object definitions have been created or modified. + +**Second way is using stanza file** + +prepare your stanza file .stanza. the content of .stanza like below: +:: + # + cn1: + objtype=node + arch=x86_64 + groups=kvm,vm,all + nichostnamesuffixes.eth1=-eth1-1|-eth1-2 + nichostnamesuffixes.eth2=-eth2-1|-eth2-2 + nicips.eth1=11.1.89.7|12.1.89.7 + nicips.eth2=13.1.89.7|14.1.89.7 + nicnetworks.eth1=net11|net12 + nicnetworks.eth2=net13|net14 + nictypes.eth1=Ethernet + nictypes.eth2=Ethernet + +define configuration information by .stanza +:: + cat .stanza | mkdef -z + +**Third way is use 'tabedit' to edit the nics table directly** + +The 'tabedit' command opens the specified table in the user's editor(such as VI), allows user to edit any text, and then writes changes back to the database table. But it's tedious and error prone, so don't recommended this way. if using this way, notices the **nicips**, **nictypes** and **nicnetworks** attributes are required. + +Here is a sample nics table content: +:: + [root@ls21n01 ~]# tabdump nics + #node,nicips,nichostnamesuffixes,nictypes,niccustomscripts,nicnetworks,nicaliases,comments,disable + "cn1","eth1!11.1.89.7|12.1.89.7,eth2!13.1.89.7|14.1.89.7","eth1!-eth1-1|-eth1-2,eth2!-eth2-1|-eth2-2,"eth1!Ethernet,eth2!Ethernet",,"eth1!net11|net12,eth2!net13|net14",,, + +After you have define configuration information by any way above, you can run below command to put configuration information into /etc/hosts: +:: + makehosts cn1 + +Then /etc/hosts will looks like: +:: + 11.1.89.7 cn1-eth1-1 cn1-eth1-1.ppd.pok.ibm.com + 12.1.89.7 cn1-eth1-2 cn1-eth1-2.ppd.pok.ibm.com + 13.1.89.7 cn1-eth2-1 cn1-eth2-1.ppd.pok.ibm.com + 14.1.89.7 cn1-eth2-2 cn1-eth2-2.ppd.pok.ibm.com + +Add confignics into the node's postscripts list +----------------------------------------------- + +Using below command to add confignics into the node's postscripts list +:: + chdef cn1 -p postscripts=confignics + +By default, confignics does not configure the install nic. if need, using flag "-s" to allow the install nic to be configured. +:: + chdef cn1 -p prostscripts="confignics -s" + +Option "-s" write the install nic's information into configuration file for persistance. All install nic's data defined in nics table will be written also. + + +Add network object into the networks table +------------------------------------------ + +The nicnetworks attribute only defined the network object name which used by the ip address. Other information about the network should be define in the networks table. Can use tabedit to add/ modify the networks objects. +:: + #netname,net,mask,mgtifname,gateway,dhcpserver,tftpserver,nameservers,ntpservers,logservers,dynamicrange,staticrange,staticrangeincrement,nodehostname,ddnsdomain,vlanid,domain,comments,disable + ... + "net11", "11.1.89.0", "255.255.255.0", "eth1",,,,,,,,,,,,,,, + "net12", "12.1.89.0", "255.255.255.0", "eth1",,,,,,,,,,,,,,, + "net13", "13.1.89.0", "255.255.255.0", "eth2",,,,,,,,,,,,,,, + "net14", "14.1.89.0", "255.255.255.0", "eth2",,,,,,,,,,,,,,, + +Option -r to remove the undefined NICS +--------------------------------------- +If the compute node's nics were configured by confignics, and the nics configuration changed in the nics table, can use "confignics -r" to remove the undefined nics. For example: On the compute node the eth0, eth1 and eth2 were configured +:: + # ifconfig + eth0 Link encap:Ethernet HWaddr 00:14:5e:d9:6c:e6 + ... + eth1 Link encap:Ethernet HWaddr 00:14:5e:d9:6c:e7 + ... + eth2 Link encap:Ethernet HWaddr 00:14:5e:d9:6c:e8 + ... + +Delete the eth2 definition in nics table with chdef command. Run +:: + updatenode -P "confignics -r" to remove the undefined eth2 on the compute node. + +The complete result is: +:: + # ifconfig + eth0 Link encap:Ethernet HWaddr 00:14:5e:d9:6c:e6 + ... + eth1 Link encap:Ethernet HWaddr 00:14:5e:d9:6c:e7 + ... + +Deleting the install nic will import some strange problems. So confignics -r can not delete the install nic. + + + + + + + + + + + diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst index bafa70f7a..ddb06de57 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst @@ -1,4 +1,47 @@ Select or Create an osimage Definition ====================================== -Run copycds to create image + +Before creating image by xCAT, distro media should be prepared ahead. That can be ISOs or DVDs. + +XCAT use 'copycds' command to create image which will be available to install nodes. 'copycds' command copies the contents of distro from media to /install// on management node. + +If using an ISO, copy it to (or NFS mount it on) the management node, and then run: +:: + copycds /.iso + +If using a DVD, put it in the DVD drive of the management node and run: +:: + copycds /dev/ + +The 'copycds' command automatically creates several osimage defintions in the database that can be used for node deployment. +To see the list of osimages, run +:: + lsdef -t osimage + +To see the attributes of a particular osimage, run +:: + lsdef -t osimage + +Initially, some attributes of osimage is assigned to default value by xCAT, they all can work correctly, cause the files or templates invoked by those attributes are shipped with xCAT by default. If need to customize those attribute, refer to next section "Customize osimage". + + +**[Below tips maybe helpful for you]** + +**[Tips 1]** +If this is the same distro version as what your management node used, create a .repo file in /etc/yum.repos.d with content similar to: +:: + [local--] + name=xCAT local + baseurl=file:/install// + enabled=1 + gpgcheck=0 + +In this way, if you need install some additional RPMs into your MN later, you can simply install them by yum. Or if you are installing a software on your MN that depends some RPMs from the this disto, those RPMs will be found and installed automatically. + +**[Tips 2]** +If need to change osimage name to your favorite name, below statement maybe help: +:: + lsdef -t osimage -z rhels6.2-x86_64-install-compute | sed 's/^[^ ]\+:/mycomputeimage:/' | mkdef -z + + diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst index 4ea116030..fdf9b83d8 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst @@ -1,10 +1,35 @@ Initialize the Compute for Deployment ===================================== -nodeset + +XCAT use '**nodeset**' command to associate a specific image to a node which will be installed with this image. +:: + nodeset osimage= + +There are more attributes of nodeset used for some specific purpose or specific machines, for example: + +* **runimage**: If you would like to run a task after deployment, you can define that task with this attribute. +* **runcmd**: This instructs the node to boot to the xCAT nbfs environment and proceed to configure BMC for basic remote access. This causes the IP, netmask, gateway, username, and password to be programmed according to the configuration table. +* **shell**: This instructs tho node to boot to the xCAT genesis environment, and present a shell prompt on console. The node will also be able to be sshed into and have utilities such as wget, tftp, scp, nfs, and cifs. It will have storage drivers available for many common systems. + +Choose such additional attribute of nodeset according to your requirement, if want to get more informantion about nodeset, refer to nodeset's man page. Start the OS Deployment ======================= -rsetboot net -rpower on +Start the deployment involves two key operations. One is setup node boot from network, another is reboot ndoe: + +For Power machine, those two operations can be completed by one command '**rnetboot**', +:: + rnetboot + +But for x server, those two operations need two independent commands. +Set x server boot from network, run +:: + rsetboot net + +Reboot x server: +:: + rpower reset + + diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/driver_update_disk.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/driver_update_disk.rst index 9f3a2d240..87b3a190b 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/driver_update_disk.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/driver_update_disk.rst @@ -1,2 +1,123 @@ -Using Driver Update Disk -======================== +.. BEGIN_Overview + +During the installing or netbooting of a node, the drivers in the initrd will be used to drive the devices like network cards and IO devices to perform the installation/netbooting tasks. But sometimes the drivers for the new devices were not included in the default initrd shipped by Red Hat or Suse. A solution is to inject the new drivers into the initrd to drive the new device during the installation/netbooting process. + +Generally there are two approaches to inject the new drivers: **Driver Update Disk** and **Drive RPM package**. + +A "**Driver Update Disk**" is media which contains the drivers, firmware and related configuration files for certain devices. The driver update disk is always supplied by the vendor of the device. One driver update disk can contain multiple drivers for different OS releases and different hardware architectures. Red Hat and Suse have different driver update disk formats. + +The '**Driver RPM Package**' is the rpm package which includes the drivers and firmware for the specific devices. The Driver RPM is the rpm package which is shipped by the Vendor of the device for a new device or a new kernel version. + +xCAT supports both. But for '**Driver RPM Package**' is only supported in xCAT 2.8 and later. + +No matter which approach chosen, there are two steps to make new drivers work. one is locate the new driver's path, another is inject the new drivers into the initrd. + +.. END_Overview + + +.. BEGIN_locate_driver_for_DUD +There are two approaches for xCAT to find the driver disk (pick one): + +- Specify the location of the driver disk in the osimage object (*This is ONLY supported in 2.8 and later*) + +The value for the 'driverupdatesrc' attribute is a comma separated driver disk list. The tag 'dud' must be specified before the full path of 'driver update disk' to specify the type of the file: +:: + chdef -t osimage driverupdatesrc=dud: + +- Put the driver update disk in the directory /driverdisk// (*e.g. /install/driverdisk/sles11.1/x86_64*). During the running of the 'genimage', 'geninitrd' or 'nodeset' command, xCAT will look for driver update disks in the directory /driverdisk//. + +.. END_locate_driver_for_DUD + +.. BEGIN_locate_driver_for_RPM +The Driver RPM packages must be specified in the osimage object. + +Three attributes of osimage object can be used to specify the Driver RPM location and Driver names. If you want to load new drivers in the initrd, the '**netdrivers**' attribute must be set. And one or both of the '**driverupdatesrc**' and '**osupdatename**' attributes must be set. If both of 'driverupdatesrc' and 'osupdatename' are set, the drivers in the 'driverupdatesrc' have higher priority. + +- netdrivers - comma separated driver names that need to be injected into the initrd. The postfix '.ko' can be ignored. + +The 'netdrivers' attribute must be set to specify the new driver list. If you want to load all the drivers from the driver rpms, use the keyword allupdate. Another keyword for the netdrivers attribute is updateonly, which means only the drivers located in the original initrd will be added to the newly built initrd from the driver rpms. This is useful to reduce the size of the new built initrd when the distro is updated, since there are many more drivers in the new kernel rpm than in the original initrd. Examples: +:: + chdef -t osimage netdrivers=megaraid_sas.ko,igb.ko + chdef -t osimage netdrivers=allupdate + chdef -t osimage netdrivers=updateonly,igb.ko,new.ko + +- driverupdatesrc - comma separated driver rpm packages (full path should be specified) + +A tag named 'rpm' can be specified before the full path of the rpm to specify the file type. The tag is optional since the default format is 'rpm' if no tag is specified. Example: +:: + chdef -t osimage driverupdatesrc=rpm:,rpm: + +- osupdatename - comma separated 'osdistroupdate' objects. Each 'osdistroupdate' object specifies a Linux distro update. + +When geninitrd is run, 'kernel-*.rpm' will be searched in the osdistroupdate.dirpath to get all the rpm packages and then those rpms will be searched for drivers. Example: +:: + mkdef -t osdistroupdate update1 dirpath=/install// + chdef -t osimage osupdatename=update1 + +If 'osupdatename' is specified, the kernel shipped with the 'osupdatename' will be used to load the newly built initrd, then only the drivers matching the new kernel will be kept in the newly built initrd. If trying to use the 'osupdatename', the 'allupdate' or 'updateonly' should be added in the 'netdrivers' attribute, or all the necessary driver names for the new kernel need to be added in the 'netdrivers' attribute. Otherwise the new drivers for the new kernel will be missed in newly built initrd. +.. END_locate_driver_for_RPM + + +.. BEGIN_inject_into_initrd__for_diskfull_for_DUD +- If specifying the driver disk location in the osimage, there are two ways to inject drivers: + +The first way is: +:: + nodeset osimage= + +The Second way is: +:: + geninitrd + nodeset osimage= --noupdateinitrd + +**Note:** 'geninitrd' + 'nodeset --noupdateinitrd' is useful when you need to run nodeset frequently for a diskful node. 'geninitrd' only needs be run once to rebuild the initrd and 'nodeset --noupdateinitrd' will not touch the initrd and kernel in /tftpboot/xcat/osimage//. + +- If putting the driver disk in /driverdisk//: + +Running 'nodeset ' in anyway will load the driver disk + +.. END_inject_into_initrd__for_diskfull_for_DUD + +.. BEGIN__inject_into_initrd__for_diskfull_for_RPM + +There are two ways to inject drivers, one is: +:: + nodeset osimage= [--ignorekernelchk] + +Another is: +:: + geninitrd [--ignorekernelchk] + nodeset osimage= --noupdateinitrd + +**Note:** 'geninitrd' + 'nodeset --noupdateinitrd' is useful when you need to run nodeset frequently for diskful nodes. 'geninitrd' only needs to be run once to rebuild the initrd and 'nodeset --noupdateinitrd' will not touch the initrd and kernel in /tftpboot/xcat/osimage//. + +The option '--ignorekernelchk' is used to skip the kernel version checking when injecting drivers from osimage.driverupdatesrc. To use this flag, you should make sure the drivers in the driver rpms are usable for the target kernel. +.. END_inject_into_initrd__for_diskfull_for_RPM + +.. BEGIN_inject_into_initrd__for_diskless_for_DUD +- If specifying the driver disk location in the osimage + +Run the below command: +:: + genimage + +- If putting the driver disk in /driverdisk//: + +Running 'genimage' in anyway will load the driver disk +.. END_inject_into_initrd__for_diskless_for_DUD + +.. BEGIN_inject_into_initrd__for_diskless_for_RPM +Run the below command: +:: + genimage [--ignorekernelchk] + +The option '--ignorekernelchk' is used to skip the kernel version checking when injecting drivers from osimage.driverupdatesrc. To use this flag, you should make sure the drivers in the driver rpms are usable for the target kernel. +.. END_inject_into_initrd__for_diskless_for_RPM + +.. BEGIN_node +- If the drivers from the driver disk or driver rpm are not already part of the installed or booted system, it's necessary to add the rpm packages for the drivers to the .pkglist or .otherpkglist of the osimage object to install them in the system. + +- If a driver rpm needs to be loaded, the osimage object must be used for the 'nodeset' and 'genimage' command, instead of the older style profile approach. + +- Both a Driver disk and a Driver rpm can be loaded in one 'nodeset' or 'genimage' invocation. +.. END_node diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/raid_cfg.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/raid_cfg.rst index 2a08772e1..cf3ad7af7 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/raid_cfg.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/raid_cfg.rst @@ -1,2 +1,444 @@ Configure RAID before Deploy OS =============================== + +Overview +-------- + +This section describes how to use xCAT to deploy diskful nodes with RAID1 setup, and the procedure for RAID1 maintainence activities such as replacing a failed disk. + +All the examples in this section are based on three configuration scenarios: + +- RHEL6 on a system p machine with two SCSI disks sda and sdb + +- RHEL6 on a system p machine with two SAS disks and multipath configuration. + +- SLES 11 SP1 on a system p machine with two SCSI disks sda and sdb + +If you are not using the configuration scenarios listed above, you may need to modify some of the steps in this documentation to make it work in your environment. + +Deploy Diskful Nodes with RAID1 Setup on RedHat +----------------------------------------------- + +xCAT provides two sample kickstart template files with the RAID1 settings, /opt/xcat/share/xcat/install/rh/service.raid1.rhel6.ppc64.tmpl is for the configuration scenario #1 listed above and /opt/xcat/share/xcat/install/rh/service.raid1.multipath.rhel6.ppc64.tmpl is for the configuration scenario #2 listed above. You can customize the template file and put it under /install/custom/install// if the default one does not match your requirements. + +Here is the RAID1 partitioning section in service.raid1.rhel6.ppc64.tmpl: +:: + #Full RAID 1 Sample + part None --fstype "PPC PReP Boot" --size 8 --ondisk sda --asprimary + part None --fstype "PPC PReP Boot" --size 8 --ondisk sdb --asprimary + + part raid.01 --size 200 --fstype ext4 --ondisk sda + part raid.02 --size 200 --fstype ext4 --ondisk sdb + raid /boot --level 1 --device md0 raid.01 raid.02 + + part raid.11 --size 1024 --ondisk sda + part raid.12 --size 1024 --ondisk sdb + raid swap --level 1 --device md1 raid.11 raid.12 + + part raid.21 --size 1 --fstype ext4 --grow --ondisk sda + part raid.22 --size 1 --fstype ext4 --grow --ondisk sdb + raid / --level 1 --device md2 raid.21 raid.22 + +And here is the RAID1 partitioning section in service.raid1.multipath.rhel6.ppc64.tmpl +:: + #Full RAID 1 Sample + part None --fstype "PPC PReP Boot" --size 8 --ondisk mpatha --asprimary + part None --fstype "PPC PReP Boot" --size 8 --ondisk mpathb --asprimary + + part raid.01 --size 200 --fstype ext4 --ondisk mpatha + part raid.02 --size 200 --fstype ext4 --ondisk mpathb + raid /boot --level 1 --device md0 raid.01 raid.02 + + part raid.11 --size 1024 --ondisk mpatha + part raid.12 --size 1024 --ondisk mpathb + raid swap --level 1 --device md1 raid.11 raid.12 + + part raid.21 --size 1 --fstype ext4 --grow --ondisk mpatha + part raid.22 --size 1 --fstype ext4 --grow --ondisk mpathb + raid / --level 1 --device md2 raid.21 raid.22 + +The samples above created one PReP partition, one 200MB /boot partition and one / partition on sda/sda and mpatha/mpathb. If you want to use different partitioning scheme in your cluster, modify this RAID1 section in the kickstart template file accordingly. + +After the diskful nodes are up and running, you can check the RAID1 settings with the following commands: + +Mount command shows the /dev/mdx devices are mounted to various file systems, the /dev/mdx indicates that the RAID is being used on this node. +:: + [root@server ~]# mount + /dev/md2 on / type ext4 (rw) + proc on /proc type proc (rw) + sysfs on /sys type sysfs (rw) + devpts on /dev/pts type devpts (rw,gid=5,mode=620) + tmpfs on /dev/shm type tmpfs (rw) + /dev/md0 on /boot type ext4 (rw) + none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) + +The file /proc/mdstat includes the RAID devices status on the system, here is an example of /proc/mdstat in the non-multipath environment: +:: + [root@server ~]# cat /proc/mdstat + Personalities : [raid1] + md2 : active raid1 sda5[0] sdb5[1] + 19706812 blocks super 1.1 [2/2] [UU] + bitmap: 1/1 pages [64KB], 65536KB chunk + + md1 : active raid1 sda2[0] sdb2[1] + 1048568 blocks super 1.1 [2/2] [UU] + + md0 : active raid1 sda3[0] sdb3[1] + 204788 blocks super 1.0 [2/2] [UU] + + unused devices: + +On the system with multipath configuration, the /proc/mdstat looks like: +:: + [root@server ~]# cat /proc/mdstat + Personalities : [raid1] + md2 : active raid1 dm-11[0] dm-6[1] + 291703676 blocks super 1.1 [2/2] [UU] + bitmap: 1/1 pages [64KB], 65536KB chunk + + md1 : active raid1 dm-8[0] dm-3[1] + 1048568 blocks super 1.1 [2/2] [UU] + + md0 : active raid1 dm-9[0] dm-4[1] + 204788 blocks super 1.0 [2/2] [UU] + + unused devices: + +The command mdadm can query the detailed configuration for the RAID partitions: +:: + mdadm --detail /dev/md2 + +Deploy Diskful Nodes with RAID1 Setup on SLES +--------------------------------------------- + +xCAT provides one sample autoyast template files with the RAID1 settings /opt/xcat/share/xcat/install/sles/service.raid1.sles11.tmpl. You can customize the template file and put it under /install/custom/install// if the default one does not match your requirements. + +Here is the RAID1 partitioning section in service.raid1.sles11.tmpl: +:: + + + /dev/sda + + + false + 65 + 1 + primary + 24M + + + false + 253 + 2 + /dev/md0 + raid + 2G + + + false + 253 + 3 + /dev/md1 + raid + max + + + all + + + /dev/sdb + + + false + 131 + 1 + primary + 24M + + + false + 253 + 2 + /dev/md0 + raid + 2G + + + false + 253 + 3 + /dev/md1 + raid + max + + + all + + + /dev/md + + + reiser + true + swap + 131 + 0 + + 4 + left-asymmetric + raid1 + + + + reiser + true + / + 131 + 1 + + 4 + left-asymmetric + raid1 + + + + all + + + +The samples above created one 24MB PReP partition on each disk, one 2GB mirroed swap partition and one mirroed / partition uses all the disk space. If you want to use different partitioning scheme in your cluster, modify this RAID1 section in the autoyast template file accordingly. + +Since the PReP partition can not be mirroed between the two disks, some additional postinstall commands should be run to make the second disk bootable, here the the commands needed to make the second disk bootable: +:: + # Set the second disk to be bootable for RAID1 setup + parted -s /dev/sdb mkfs 1 fat16 + parted /dev/sdb set 1 type 6 + parted /dev/sdb set 1 boot on + dd if=/dev/sda1 of=/dev/sdb1 + bootlist -m normal sda sdb + +The procedure listed above has been added to the file /opt/xcat/share/xcat/install/scripts/post.sles11.raid1 to make it be automated. The autoyast template file service.raid1.sles11.tmpl will include the content of post.sles11.raid1, so no manual steps are needed here. + +After the diskful nodes are up and running, you can check the RAID1 settings with the following commands: + +Mount command shows the /dev/mdx devices are mounted to various file systems, the /dev/mdx indicates that the RAID is being used on this node. +:: + server:~ # mount + /dev/md1 on / type reiserfs (rw) + proc on /proc type proc (rw) + sysfs on /sys type sysfs (rw) + debugfs on /sys/kernel/debug type debugfs (rw) + devtmpfs on /dev type devtmpfs (rw,mode=0755) + tmpfs on /dev/shm type tmpfs (rw,mode=1777) + devpts on /dev/pts type devpts (rw,mode=0620,gid=5) + +The file /proc/mdstat includes the RAID devices status on the system, here is an example of /proc/mdstat: +:: + server:~ # cat /proc/mdstat + Personalities : [raid1] [raid0] [raid10] [raid6] [raid5] [raid4] + md0 : active (auto-read-only) raid1 sda2[0] sdb2[1] + 2104500 blocks super 1.0 [2/2] [UU] + bitmap: 0/1 pages [0KB], 128KB chunk + + md1 : active raid1 sda3[0] sdb3[1] + 18828108 blocks super 1.0 [2/2] [UU] + bitmap: 0/9 pages [0KB], 64KB chunk + + unused devices: + +The command mdadm can query the detailed configuration for the RAID partitions: +:: + mdadm --detail /dev/md1 + +Disk Replacement Procedure +-------------------------- + +If any one disk fails in the RAID1 arrary, do not panic. Follow the procedure listed below to replace the failed disk and you will be fine. + +Faulty disks should appear marked with an (F) if you look at /proc/mdstat: +:: + [root@server ~]# cat /proc/mdstat + Personalities : [raid1] + md2 : active raid1 dm-11[0](F) dm-6[1] + 291703676 blocks super 1.1 [2/1] [_U] + bitmap: 1/1 pages [64KB], 65536KB chunk + + md1 : active raid1 dm-8[0](F) dm-3[1] + 1048568 blocks super 1.1 [2/1] [_U] + + md0 : active raid1 dm-9[0](F) dm-4[1] + 204788 blocks super 1.0 [2/1] [_U] + + unused devices: + +We can see that the first disk is broken because all the RAID partitions on this disk are marked as (F). + +Remove the failed disk from RAID arrary +--------------------------------------- + +mdadm is the command that can be used to query and manage the RAID arrays on Linux. To remove the failed disk from RAID array, use the command: +:: + mdadm --manage /dev/mdx --remove /dev/xxx + +Where the /dev/mdx are the RAID partitions listed in /proc/mdstat file, such as md0, md1 and md2; the /dev/xxx are the backend devices like dm-11, dm-8 and dm-9 in the multipath configuration and sda5, sda3 and sda2 in the non-multipath configuration. + +Here is the example of removing failed disk from the RAID1 array in the non-multipath configuration: +:: + mdadm --manage /dev/md0 --remove /dev/sda3 + mdadm --manage /dev/md1 --remove /dev/sda2 + mdadm --manage /dev/md2 --remove /dev/sda5 + +Here is the example of removing failed disk from the RAID1 array in the multipath configuration: +:: + mdadm --manage /dev/md0 --remove /dev/dm-9 + mdadm --manage /dev/md1 --remove /dev/dm-8 + mdadm --manage /dev/md2 --remove /dev/dm-11 + +After the failed disk is removed from the RAID1 array, the partitions on the failed disk will be removed from /proc/mdstat and the "mdadm --detail" output also. +:: + [root@server ~]# cat /proc/mdstat + Personalities : [raid1] + md2 : active raid1 dm-6[1] + 291703676 blocks super 1.1 [2/1] [_U] + bitmap: 1/1 pages [64KB], 65536KB chunk + + md1 : active raid1 dm-3[1] + 1048568 blocks super 1.1 [2/1] [_U] + + md0 : active raid1 dm-4[1] + 204788 blocks super 1.0 [2/1] [_U] + + unused devices: + + [root@server ~]# mdadm --detail /dev/md0 + /dev/md0: + Version : 1.0 + Creation Time : Tue Jul 19 02:39:03 2011 + Raid Level : raid1 + Array Size : 204788 (200.02 MiB 209.70 MB) + Used Dev Size : 204788 (200.02 MiB 209.70 MB) + Raid Devices : 2 + Total Devices : 1 + Persistence : Superblock is persistent + + Update Time : Wed Jul 20 02:00:04 2011 + State : clean, degraded + Active Devices : 1 + Working Devices : 1 + Failed Devices : 0 + Spare Devices : 0 + + Name : c250f17c01ap01:0 (local to host c250f17c01ap01) + UUID : eba4d8ad:8f08f231:3c60e20f:1f929144 + Events : 26 + + Number Major Minor RaidDevice State + 0 0 0 0 removed + 1 253 4 1 active sync /dev/dm-4 + + +Replace the disk +---------------- + +Depends on the hot swap capability, you may simply unplug the disk and replace with a new one if the hot swap is supported; otherwise, you will need to power off the machine and replace the disk and the power on the machine. +Create partitions on the new disk + +The first thing we must do now is to create the exact same partitioning as on the new disk. We can do this with one simple command: +:: + sfdisk -d /dev/ | sfdisk /dev/ + +For the non-mulipath configuration, here is an example: +:: + sfdisk -d /dev/sdb | sfdisk /dev/sda + +For the multipath configuration, here is an example: +:: + sfdisk -d /dev/dm-1 | sfdisk /dev/dm-0 + +If you got error message "sfdisk: I don't like these partitions - nothing changed.", you can add "--force" option to the sfdisk command: +:: + sfdisk -d /dev/sdb | sfdisk /dev/sda --force + +You can run +:: + fdisk -l + +to check if both hard drives have the same partitioning now. + +Add the new disk into the RAID1 array +------------------------------------- + +After the partitions are created on the new disk, you can use command +:: + mdadm --manage /dev/mdx --add /dev/xxx + +to add the new disk to the RAID1 array. Where the /dev/mdx are the RAID partitions like md0, md1 and md2; the /dev/xxx are the backend devices like dm-11, dm-8 and dm-9 in the multipath configuration and sda5, sda3 and sda2 in the non-multipath configuration. + +Here is an example for the non-multipath configuration: +:: + mdadm --manage /dev/md0 --add /dev/sda3 + mdadm --manage /dev/md1 --add /dev/sda2 + mdadm --manage /dev/md2 --add /dev/sda5 + +Here is an example for the multipath configuration: +:: + mdadm --manage /dev/md0 --add /dev/dm-9 + mdadm --manage /dev/md1 --add /dev/dm-8 + mdadm --manage /dev/md2 --add /dev/dm-11 + +All done! You can have a cup of coffee to watch the fully automatic reconstruction running... + +While the RAID1 array is reconstructing, you will see some progress information in /proc/mdstat: +:: + [root@server raid1]# cat /proc/mdstat + Personalities : [raid1] + md2 : active raid1 dm-11[0] dm-6[1] + 291703676 blocks super 1.1 [2/1] [_U] + [>....................] recovery = 0.7% (2103744/291703676) finish=86.2min speed=55960K/sec + bitmap: 1/1 pages [64KB], 65536KB chunk + + md1 : active raid1 dm-8[0] dm-3[1] + 1048568 blocks super 1.1 [2/1] [_U] + [=============>.......] recovery = 65.1% (683904/1048568) finish=0.1min speed=48850K/sec + + md0 : active raid1 dm-9[0] dm-4[1] + 204788 blocks super 1.0 [2/1] [_U] + [===================>.] recovery = 96.5% (198016/204788) finish=0.0min speed=14144K/sec + + unused devices: + +After the reconstruction is done, the /proc/mdstat becomes like: +:: + [root@server ~]# cat /proc/mdstat + Personalities : [raid1] + md2 : active raid1 dm-11[0] dm-6[1] + 291703676 blocks super 1.1 [2/2] [UU] + bitmap: 1/1 pages [64KB], 65536KB chunk + + md1 : active raid1 dm-8[0] dm-3[1] + 1048568 blocks super 1.1 [2/2] [UU] + + md0 : active raid1 dm-9[0] dm-4[1] + 204788 blocks super 1.0 [2/2] [UU] + + unused devices: + +Make the new disk bootable +-------------------------- + +If the new disk does not have a PReP partition or the PReP partition has some problem, it will not be bootable, here is an example on how to make the new disk bootable, you may need to substitute the device name with your own values. + +**RedHat:** +:: + mkofboot .b /dev/sda + bootlist -m normal sda sdb + +**SLES:** +:: + parted -s /dev/sda mkfs 1 fat16 + parted /dev/sda set 1 type 6 + parted /dev/sda set 1 boot on + dd if=/dev/sdb1 of=/dev/sda1 + bootlist -m normal sda sdb + + From ac91e8bcfe1957c89ad6a108ad443d936cbc648b Mon Sep 17 00:00:00 2001 From: huweihua Date: Tue, 1 Sep 2015 22:05:04 -0400 Subject: [PATCH 03/19] add content --- .../diskful/customize_image/cfg_partition.rst | 75 +++++++++++++++++++ .../customize_image/driver_update_disk.rst | 51 ++++++++++++- 2 files changed, 125 insertions(+), 1 deletion(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/cfg_partition.rst b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/cfg_partition.rst index ae27d5333..8343f32c0 100644 --- a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/cfg_partition.rst +++ b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/cfg_partition.rst @@ -1,2 +1,77 @@ +Configure Disk Partition +======================== .. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_Overview + :end-before: END_Overview +Partition Definition File +------------------------- +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_partition_definition_file_Overview + :end-before: END_partition_definition_file_Overview + +Create Partition File +^^^^^^^^^^^^^^^^^^^^^ +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_partition_definition_file_content + :end-before: END_partition_definition_file_content + +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_partition_definition_file_example_Ubuntu_Standard_partition_for_PPC64le + :end-before: END_partition_definition_file_example_Ubuntu_Standard_partition_for_PPC64le + +Associate Partition File with Osimage +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_partition_definition_file_Associate_partition_file_with_osimage_common + :end-before: END_partition_definition_file_Associate_partition_file_with_osimage_common + +Partitioning disk file(For Ubuntu only) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_Partition_Disk_File_ubuntu_only + :end-before: END_Partition_Disk_File_ubuntu_only + +Additional preseed configuration file(For Ubuntu only) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_Additional_preseed_configuration_file_ubuntu_only + :end-before: END_Additional_preseed_configuration_file_ubuntu_only + +Partition Definition Script +--------------------------- +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_Partition_Definition_Script_overview + :end-before: END_Partition_Definition_Script_overview + +Create Partition Script +^^^^^^^^^^^^^^^^^^^^^^^ +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_Partition_Definition_Script_Create_partition_script_content + :end-before: END_Partition_Definition_Script_Create_partition_script_content + +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_Partition_Definition_Script_Create_partition_script_example_redhat_sles + :end-before: END_Partition_Definition_Script_Create_partition_script_example_redhat_sles + +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_Partition_Definition_Script_Create_partition_script_example_ubuntu + :end-before: END_Partition_Definition_Script_Create_partition_script_example_ubuntu + +Associate partition script with osimage +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_Partition_Definition_Script_Associate_partition_script_with_osimage_common + :end-before: END_Partition_Definition_Script_Associate_partition_script_with_osimage_common + +Partitioning disk script(For Ubuntu only) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_Partition_Disk_Script_ubuntu_only + :end-before: END_Partition_Disk_Script_ubuntu_only + +Additional preseed configuration script(For Ubuntu only) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. include:: ../../../common/deployment/cfg_partition.rst + :start-after: BEGIN_Additional_preseed_configuration_script_ubuntu_only + :end-before: END_Additional_preseed_configuration_script_ubuntu_only \ No newline at end of file diff --git a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/driver_update_disk.rst b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/driver_update_disk.rst index fbbe6539f..35c6e9fe2 100644 --- a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/driver_update_disk.rst +++ b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/driver_update_disk.rst @@ -1,2 +1,51 @@ -.. include:: ../../../common/deployment/driver_update_disk.rst +Load Additional Drivers +======================== +Overview +-------- +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_Overview + :end-before: END_Overview + +Locate the New Drivers +---------------------- + +For Driver Update Disk +^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_locate_driver_for_DUD + :end-before: END_locate_driver_for_DUD + +For Driver RPM Packages +^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_locate_driver_for_RPM + :end-before: END_locate_driver_for_RPM + +Inject the Drivers into the initrd +---------------------------------- + +For Driver Update Disk +^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_inject_into_initrd__for_diskfull_for_DUD + :end-before: END_inject_into_initrd__for_diskfull_for_DUD + +For Driver RPM Packages +^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN__inject_into_initrd__for_diskfull_for_RPM + :end-before: END_inject_into_initrd__for_diskfull_for_RPM + +Notes +----- + +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_node + :end-before: END_node + + From 4b4315863f9df4590264ed93705d5f9f00e0d1d1 Mon Sep 17 00:00:00 2001 From: huweihua Date: Tue, 1 Sep 2015 22:05:55 -0400 Subject: [PATCH 04/19] add content --- .../customize_image/driver_update_disk.rst | 51 ++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskless/customize_image/driver_update_disk.rst b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskless/customize_image/driver_update_disk.rst index fbbe6539f..529d0f6b4 100644 --- a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskless/customize_image/driver_update_disk.rst +++ b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskless/customize_image/driver_update_disk.rst @@ -1,2 +1,51 @@ -.. include:: ../../../common/deployment/driver_update_disk.rst +Load Additional Drivers +======================== +Overview +-------- +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_Overview + :end-before: END_Overview + +Locate the New Drivers +---------------------- + +For Driver Update Disk +^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_locate_driver_for_DUD + :end-before: END_locate_driver_for_DUD + +For Driver RPM Packages +^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_locate_driver_for_RPM + :end-before: END_locate_driver_for_RPM + +Inject the Drivers into the initrd +---------------------------------- + +For Driver Update Disk +^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_inject_into_initrd__for_diskless_for_DUD + :end-before: END_inject_into_initrd__for_diskless_for_DUD + +For Driver RPM Packages +^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_inject_into_initrd__for_diskless_for_RPM + :end-before: END_inject_into_initrd__for_diskless_for_RPM + +Notes +----- + +.. include:: ../../../common/deployment/driver_update_disk.rst + :start-after: BEGIN_node + :end-before: END_node + + From 7cb394ae77fb5756c0b25d10ee2e392e389ad68d Mon Sep 17 00:00:00 2001 From: huweihua Date: Wed, 2 Sep 2015 02:42:17 -0400 Subject: [PATCH 05/19] add label for invoking --- .../manage_clusters/common/deployment/create_img.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst index ddb06de57..e28499eb9 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst @@ -1,3 +1,5 @@ +.. _create_img: + Select or Create an osimage Definition ====================================== From aa9863162be403992574ce33ba9164912c0c1b9e Mon Sep 17 00:00:00 2001 From: huweihua Date: Wed, 2 Sep 2015 04:06:46 -0400 Subject: [PATCH 06/19] add label for invoking --- .../manage_clusters/common/deployment/deploy_os.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst index fdf9b83d8..1106bafd1 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst @@ -1,3 +1,5 @@ +.. _deploy_os: + Initialize the Compute for Deployment ===================================== From 93b5fb8eab2c0878c5628a1c4499b655b180f3be Mon Sep 17 00:00:00 2001 From: huweihua Date: Sun, 6 Sep 2015 03:35:56 -0400 Subject: [PATCH 07/19] add content for sysclone --- docs/source/advanced/sysclone.rst | 220 ++++++++++++++++++++++++++++++ 1 file changed, 220 insertions(+) diff --git a/docs/source/advanced/sysclone.rst b/docs/source/advanced/sysclone.rst index 6b26ec335..2ff518694 100644 --- a/docs/source/advanced/sysclone.rst +++ b/docs/source/advanced/sysclone.rst @@ -1,2 +1,222 @@ Using System Clone to Deploy Diskful Node ========================================= + +When we want to deploy large numbers of nodes which have the same configuration, the simplest way is to clone. This means the user can customize and tweak one node's configuration according to his needs. They can verify it's proper operation, then make this node as template. They can capture an osimage from this template node, and deploy the rest of the nodes with this osimage quickly. xCat (2.8.2 and above) provides this feature which we call Sysclone to help you handle this scenario. + +List of Supported Arch and OS +----------------------------- + ++------------------+-------------+----------------+-------------+------------------------------------------------------+ +| xCAT version | OS | Tested Version | ARCH | Feature | ++==================+=============+================+=============+======================================================+ +| 2.8.2 and later | Centos | 6.3 5.9 | x86_64 | Basic clone node | ++ +-------------+----------------+-------------+------------------------------------------------------+ +| | redhat | 6.4 5.9 | x86_64 | Basic clone node | ++------------------+-------------+----------------+-------------+------------------------------------------------------+ +| 2.8.3 and later | sles | 11.3 10.4 | x86_64 | Basic clone node | ++------------------+-------------+----------------+-------------+------------------------------------------------------+ +| 2.8.5 and later | Centos | 6.3 | x86_64 | Add feature: update delta changes(has limitation) | ++ +-------------+----------------+-------------+------------------------------------------------------+ +| | redhat | 6.4 | x86_64 | Add feature: update delta changes(has limitation) | ++ +-------------+----------------+-------------+------------------------------------------------------+ +| | sles | 11.3 | x86_64 | Add feature: update delta changes | ++ +-------------+----------------+-------------+------------------------------------------------------+ +| | sles | 10.x | x86_64 | Not support any more | ++------------------+-------------+----------------+-------------+------------------------------------------------------+ +| 2.9 and later | redhat | 6.4 | ppc64 | Basic clone node/update delta changes/LVM | ++ +-------------+----------------+-------------+------------------------------------------------------+ +| | sles | 11.3 | ppc64 | Basic clone node/update delta changes | ++ +-------------+----------------+-------------+------------------------------------------------------+ +| | redhat | 7.0 | ppc64 | Basic clone node/update delta changes/LVM | ++ +-------------+----------------+-------------+------------------------------------------------------+ +| | redhat | 6.4 7.0 | x86_64 | support LVM | ++------------------+-------------+----------------+-------------+------------------------------------------------------+ + +Using Sysclone to Install Nodes +------------------------------- + +This document describes how to install and configure a template node (called golden client), capture an image from this template node. Then using this image to deploy other same nodes (called target nodes) quickly. + +Prepare the xCAT Management Node for Support Sysclone +````````````````````````````````````````````````````` + +How to configure xCAT management node please refer to section [TODO] :ref:`Install Guides ` + +For support Sysclone, we need to install some extra rpms on management node and the golden client. + +1. Download the xcat-dep tarball (xcat-dep-XXX.tar.bz2) which includes extra rpms needed by Sysclone. (You might already have the xcat-dep tarball on management node. If not, go to `xcat-dep `_ and get the latest xCAT dependency tarball.) + +2. Install systemimager server on management node + +* **[RH/CentOS]**:: + + yum -y install systemimager-server + +* **[SLES]**:: + + zypper -n install systemimager-server + +*[Note] Maybe you will encounter below failed message when you install systemimager-server, just ignore it.*:: + + Can't locate AppConfig.pm in @INC (@INC contains: /usr/lib/systemimager/perl /usr/local/lib64/perl5 /usr/local/share/perl5 /usr/lib64/perl5/vendor_perl /usr/share/perl5/vendor_perl /usr/lib64/perl5 /usr/share/perl5 .) at /usr/lib/systemimager/perl/SystemImager/Config.pm line 13. + BEGIN failed--compilation aborted at /usr/lib/systemimager/perl/SystemImager/Config.pm line 13. + Compilation failed in require at /usr/lib/systemimager/perl/SystemImager/Server.pm line 17. + BEGIN failed--compilation aborted at /usr/lib/systemimager/perl/SystemImager/Server.pm line 17. + Compilation failed in require at /usr/sbin/si_mkrsyncd_conf line 28. + BEGIN failed--compilation aborted at /usr/sbin/si_mkrsyncd_conf line 28. + +3. Do some preparation for install and configure golden client in management node. Copy the xcat-dep-XXX.tar.bz2 file to directory "/install/post/otherpkgs///xcat/" of the management node according your golden client's OS version and system architecture, then decompression it. For example: + +* **[Centos6.3 and x86_64]**:: + + mkdir -p /install/post/otherpkgs/centos6.3/x86_64/xcat + cp xcat-dep-*.tar.bz2 /install/post/otherpkgs/centos6.3/x86_64/xcat + cd /install/post/otherpkgs/centos6.3/x86_64/xcat + tar jxvf xcat-dep-*.tar.bz2 + +* **[SLES11.3 and x86_64]**:: + + mkdir -p /install/post/otherpkgs/sles11.3/x86_64/xcat + cp xcat-dep-*.tar.bz2 /install/post/otherpkgs/sles11.3/x86_64/xcat + cd /install/post/otherpkgs/sles11.3/x86_64/xcat + tar jxvf xcat-dep-*.tar.bz2 + +* **[Redhat6.4 and ppc64 system]**:: + + mkdir -p /install/post/otherpkgs/rhels6.4/ppc64/xcat + cp xcat-dep-*.tar.bz2 /install/post/otherpkgs/rhels6.4/ppc64/xcat + cd /install/post/otherpkgs/rhels6.4/ppc64/xcat + tar jxvf xcat-dep-*.tar.bz2 + +Install and Configure the Golden Client +``````````````````````````````````````` + +The Golden Client acts as a regular node for xCAT, just have some extra rpms to support clone. When you deploy golden client with xCAT, you just need to add a few additional definitions to the image which will be used to deploy golden client. + +For information of how to install a regular node, please refer to section :ref:`Diskful Installation ` + +For support clone, add 'otherpkglist' and 'otherpkgdir' attributes to the image definition which will be used to deploy golden client, then deploy golden client as normal. then the golden client will have extra rpms to support clone. If you have deployed your golden client already, using 'updatenode' command to push these extra rpms to golden client. Centos share the same pkglist file with redhat. For example: + +* **[RH6.4 and x86_64 system]**:: + + chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/instal /rh/sysclone.rhels6.x86_64.otherpkgs.pkglist + chdef -t osimage -o -p otherpkgdir=/install/post/otherpkgs/rhels6.4/x86_64 + updatenode -S + +* **[Centos6.3 and x86_64]**:: + + chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/install /rh/sysclone.rhels6.x86_64.otherpkgs.pkglist + chdef -t osimage -o -p otherpkgdir=/install/post/otherpkgs/centos6.3/x86_64 + updatenode -S + +* **[SLES11.3 and x86_64 system]**:: + + chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/install /sles/sysclone.sles11.x86_64.otherpkgs.pkglist + chdef -t osimage -o -p otherpkgdir=/install/post/otherpkgs/sles11.3/x86_64 + updatenode -S + +* **[For RH6.3 and ppc64 system]**:: + + chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/install /rh/sysclone.rhels6.ppc64.otherpkgs.pkglist + chdef -t osimage -o -p otherpkgdir=/install/post/otherpkgs/rhels6.3/ppc64 + updatenode -S + +*[Note]: If you install systemimager RPMs on Centos 6.5 node by above steps, you maybe hit failure. this is a known issue because some defect of Centos6.5 itself. Please refer to known issue section for help.* + +Capture Image from Golden Client +```````````````````````````````` + +On Management node, use xCAT command 'imgcapture' to capture an image from the golden-client. +:: + imgcapture -t sysclone -o + +When imgcapture is running, it pulls the image from the golden-client, and creates a image files system and a corresponding osimage definition on the xcat management node. You can use below command to check the osimage attributes. +:: + lsdef -t osimage + +Install the target nodes with the image from the golden-client +`````````````````````````````````````````````````````````````` + +following below commands to install the target nodes with the image captured from golden client. + +* **[x86_64 system]**:: + + nodeset osimage= + rsetboot net + rpower boot + +* **[ppc64]**:: + + nodeset osimage= + rnetboot + + +Update Nodes Later On +--------------------- + +If, at a later time, you need to make changes to the golden client (install new rpms, change config files, etc.), you can capture the changes and push them to the already cloned nodes without need to restart cloned nodes. This process will only transfer the deltas, so it will be much faster than the original cloning. + +**[Limitation]**: In xcat2.8.5, this feature has limitation in redhat and centos. when your delta changes related bootloader, it would encounter error. This issue will be fixed in xcat higher version. So up to now, in redhat and centos, this feature just update files not related bootloader. + +Update delta changes please follow below steps: + +1. Make changes to your golden node (install new rpms, change config files, etc.). + +2. From the mgmt node, capture the image using the same command as before. Assuming is an existing image, this will only sync the changes to the image on the Management node:: + + imgcapture -t sysclone -o + +3. To synchronize the changes to your target nodes do the following: + +a) If you are running xCAT 2.8.4 or older: + +From one of the nodes you want to update, test the update to see which files will be updated:: + + xdsh -s 'si_updateclient --server --dry-run --yes' + + +If it lists files and directories that you do not think should be updated, you need to add them to the exclude list in 3 places + +* On the golden node: /etc/systemimager/updateclient.local.exclude +* On the mgmt node: /install/sysclone/images//etc/systemimager/updateclient.local.exclude +* On all of the nodes to be updated: /etc/systemimager/updateclient.local.exclude + +From the mgmt node, push the updates out to the other nodes:: + + xdsh -s 'si_updateclient --server --yes' + +b) If you are running xCAT 2.8.5 or later: + +you could push the updates out to the other nodes quickly by below command:: + + updatenode -S + + +Known Issue +----------- + +Can not install systemimager RPMs in centos6.5 by yum +`````````````````````````````````````````````````````` + +If you install systemimager RPMs on Centos 6.5 node by yum, you maybe hit failure because some defect of Centos6.5 itself. So please copy related RPMs to Centos 6.5 node and install them by hand. + +* **On management node**:: + + [root@MN]# cd //xcat-dep + [root@MN xcat-dep]# scp systemimager-client-4.3.0-0.1.noarch.rpm \ + systemconfigurator-2.2.11-1.noarch.rpm \ + systemimager-common-4.3.0-0.1.noarch.rpm \ + perl-AppConfig-1.52-4.noarch.rpm :/ + +* **On golden client**:: + + [root@Centos6.5 node]# cd / + [root@Centos6.5 node]# rpm -ivh perl-AppConfig-1.52-4.noarch.rpm + [root@Centos6.5 node]# rpm -ivh systemconfigurator-2.2.11-1.noarch.rpm + [root@Centos6.5 node]# rpm -ivh systemimager-common-4.3.0-0.1.noarch.rpm + [root@Centos6.5 node]# rpm -ivh systemimager-client-4.3.0-0.1.noarch.rpm + +Kernel panic at times when install target node with rhels7.0 in power 7 server +`````````````````````````````````````````````````````````````````````````````` + +When you clone rhels7.0 image to target node which is power 7 server lpar, maybe you will hit Kernel panic problem at times after boot loader grub2 download kernel and initrd. This is an known issue but without resolve yet. up to now, we recommend you try again. \ No newline at end of file From 665a5fdfdfd8f2bbcf04af6e2641a1f37fb4089c Mon Sep 17 00:00:00 2001 From: huweihua Date: Sun, 6 Sep 2015 03:39:11 -0400 Subject: [PATCH 08/19] add invoking label --- .../admin-guides/manage_clusters/ppc64le/diskful/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/index.rst b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/index.rst index 4b3907c02..3ecf2f10c 100644 --- a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/index.rst +++ b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/index.rst @@ -1,4 +1,4 @@ -.. _diskful_installation: +.. _diskfull_installation: Diskful Installation ==================== From 211aec7617fe10222f5f03dd88eb31d9f83a0bf8 Mon Sep 17 00:00:00 2001 From: huweihua Date: Sun, 6 Sep 2015 04:25:39 -0400 Subject: [PATCH 09/19] modify some statement according comments --- .../manage_clusters/common/deployment/create_img.rst | 5 ++--- .../manage_clusters/common/deployment/deploy_os.rst | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst index e28499eb9..d23677adf 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst @@ -5,7 +5,7 @@ Select or Create an osimage Definition Before creating image by xCAT, distro media should be prepared ahead. That can be ISOs or DVDs. -XCAT use 'copycds' command to create image which will be available to install nodes. 'copycds' command copies the contents of distro from media to /install// on management node. +XCAT use 'copycds' command to create image which will be available to install nodes. "copycds" will copy all contents of Distribution DVDs/ISOs or Service Pack DVDs/ISOs to a destination directory, and create several relevant osimage definitions by default. If using an ISO, copy it to (or NFS mount it on) the management node, and then run: :: @@ -15,7 +15,6 @@ If using a DVD, put it in the DVD drive of the management node and run: :: copycds /dev/ -The 'copycds' command automatically creates several osimage defintions in the database that can be used for node deployment. To see the list of osimages, run :: lsdef -t osimage @@ -45,5 +44,5 @@ If need to change osimage name to your favorite name, below statement maybe help :: lsdef -t osimage -z rhels6.2-x86_64-install-compute | sed 's/^[^ ]\+:/mycomputeimage:/' | mkdef -z - + diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst index 1106bafd1..3c8ed47fb 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst @@ -18,14 +18,14 @@ Choose such additional attribute of nodeset according to your requirement, if wa Start the OS Deployment ======================= -Start the deployment involves two key operations. One is setup node boot from network, another is reboot ndoe: +Start the deployment involves two key operations. First specify the boot device of the next boot to be network, then reboot the node: For Power machine, those two operations can be completed by one command '**rnetboot**', :: rnetboot But for x server, those two operations need two independent commands. -Set x server boot from network, run +Specify the boot device boot from network next time, run :: rsetboot net From 59e1cc0f7c722728d5aea1ee8dfc8a74e8fe1042 Mon Sep 17 00:00:00 2001 From: huweihua Date: Mon, 7 Sep 2015 02:56:20 -0400 Subject: [PATCH 10/19] modify format according Guidelines for xCAT Documentation --- .../diskful/customize_image/cfg_partition.rst | 16 ++++++++-------- .../customize_image/driver_update_disk.rst | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/cfg_partition.rst b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/cfg_partition.rst index 8343f32c0..5855a231e 100644 --- a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/cfg_partition.rst +++ b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/cfg_partition.rst @@ -11,7 +11,7 @@ Partition Definition File :end-before: END_partition_definition_file_Overview Create Partition File -^^^^^^^^^^^^^^^^^^^^^ +````````````````````` .. include:: ../../../common/deployment/cfg_partition.rst :start-after: BEGIN_partition_definition_file_content :end-before: END_partition_definition_file_content @@ -21,19 +21,19 @@ Create Partition File :end-before: END_partition_definition_file_example_Ubuntu_Standard_partition_for_PPC64le Associate Partition File with Osimage -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +````````````````````````````````````` .. include:: ../../../common/deployment/cfg_partition.rst :start-after: BEGIN_partition_definition_file_Associate_partition_file_with_osimage_common :end-before: END_partition_definition_file_Associate_partition_file_with_osimage_common Partitioning disk file(For Ubuntu only) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``````````````````````````````````````` .. include:: ../../../common/deployment/cfg_partition.rst :start-after: BEGIN_Partition_Disk_File_ubuntu_only :end-before: END_Partition_Disk_File_ubuntu_only Additional preseed configuration file(For Ubuntu only) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +`````````````````````````````````````````````````````` .. include:: ../../../common/deployment/cfg_partition.rst :start-after: BEGIN_Additional_preseed_configuration_file_ubuntu_only :end-before: END_Additional_preseed_configuration_file_ubuntu_only @@ -45,7 +45,7 @@ Partition Definition Script :end-before: END_Partition_Definition_Script_overview Create Partition Script -^^^^^^^^^^^^^^^^^^^^^^^ +``````````````````````` .. include:: ../../../common/deployment/cfg_partition.rst :start-after: BEGIN_Partition_Definition_Script_Create_partition_script_content :end-before: END_Partition_Definition_Script_Create_partition_script_content @@ -59,19 +59,19 @@ Create Partition Script :end-before: END_Partition_Definition_Script_Create_partition_script_example_ubuntu Associate partition script with osimage -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``````````````````````````````````````` .. include:: ../../../common/deployment/cfg_partition.rst :start-after: BEGIN_Partition_Definition_Script_Associate_partition_script_with_osimage_common :end-before: END_Partition_Definition_Script_Associate_partition_script_with_osimage_common Partitioning disk script(For Ubuntu only) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +````````````````````````````````````````` .. include:: ../../../common/deployment/cfg_partition.rst :start-after: BEGIN_Partition_Disk_Script_ubuntu_only :end-before: END_Partition_Disk_Script_ubuntu_only Additional preseed configuration script(For Ubuntu only) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +```````````````````````````````````````````````````````` .. include:: ../../../common/deployment/cfg_partition.rst :start-after: BEGIN_Additional_preseed_configuration_script_ubuntu_only :end-before: END_Additional_preseed_configuration_script_ubuntu_only \ No newline at end of file diff --git a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/driver_update_disk.rst b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/driver_update_disk.rst index 35c6e9fe2..e16af5838 100644 --- a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/driver_update_disk.rst +++ b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskful/customize_image/driver_update_disk.rst @@ -11,14 +11,14 @@ Locate the New Drivers ---------------------- For Driver Update Disk -^^^^^^^^^^^^^^^^^^^^^^ +`````````````````````` .. include:: ../../../common/deployment/driver_update_disk.rst :start-after: BEGIN_locate_driver_for_DUD :end-before: END_locate_driver_for_DUD For Driver RPM Packages -^^^^^^^^^^^^^^^^^^^^^^^ +``````````````````````` .. include:: ../../../common/deployment/driver_update_disk.rst :start-after: BEGIN_locate_driver_for_RPM @@ -28,14 +28,14 @@ Inject the Drivers into the initrd ---------------------------------- For Driver Update Disk -^^^^^^^^^^^^^^^^^^^^^^ +`````````````````````` .. include:: ../../../common/deployment/driver_update_disk.rst :start-after: BEGIN_inject_into_initrd__for_diskfull_for_DUD :end-before: END_inject_into_initrd__for_diskfull_for_DUD For Driver RPM Packages -^^^^^^^^^^^^^^^^^^^^^^^ +``````````````````````` .. include:: ../../../common/deployment/driver_update_disk.rst :start-after: BEGIN__inject_into_initrd__for_diskfull_for_RPM From bcb954f8e466bc4bd2e085ee54bc3b4ef2adfac4 Mon Sep 17 00:00:00 2001 From: huweihua Date: Mon, 7 Sep 2015 03:06:48 -0400 Subject: [PATCH 11/19] modify format according Guidelines for xCAT Documentation --- .../diskless/customize_image/driver_update_disk.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskless/customize_image/driver_update_disk.rst b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskless/customize_image/driver_update_disk.rst index 529d0f6b4..10b8a667b 100644 --- a/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskless/customize_image/driver_update_disk.rst +++ b/docs/source/guides/admin-guides/manage_clusters/ppc64le/diskless/customize_image/driver_update_disk.rst @@ -11,14 +11,14 @@ Locate the New Drivers ---------------------- For Driver Update Disk -^^^^^^^^^^^^^^^^^^^^^^ +`````````````````````` .. include:: ../../../common/deployment/driver_update_disk.rst :start-after: BEGIN_locate_driver_for_DUD :end-before: END_locate_driver_for_DUD For Driver RPM Packages -^^^^^^^^^^^^^^^^^^^^^^^ +``````````````````````` .. include:: ../../../common/deployment/driver_update_disk.rst :start-after: BEGIN_locate_driver_for_RPM @@ -28,14 +28,14 @@ Inject the Drivers into the initrd ---------------------------------- For Driver Update Disk -^^^^^^^^^^^^^^^^^^^^^^ +`````````````````````` .. include:: ../../../common/deployment/driver_update_disk.rst :start-after: BEGIN_inject_into_initrd__for_diskless_for_DUD :end-before: END_inject_into_initrd__for_diskless_for_DUD For Driver RPM Packages -^^^^^^^^^^^^^^^^^^^^^^^ +``````````````````````` .. include:: ../../../common/deployment/driver_update_disk.rst :start-after: BEGIN_inject_into_initrd__for_diskless_for_RPM From a5d273b37eb2a976c47e6e7d3609cebda3c5c10a Mon Sep 17 00:00:00 2001 From: huweihua Date: Tue, 8 Sep 2015 22:54:12 -0400 Subject: [PATCH 12/19] modify some part depending on the comments --- .../common/deployment/create_img.rst | 25 +++++++++++++++++-- .../common/deployment/deploy_os.rst | 5 ++-- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst index d23677adf..e0c58ca64 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/create_img.rst @@ -23,12 +23,26 @@ To see the attributes of a particular osimage, run :: lsdef -t osimage -Initially, some attributes of osimage is assigned to default value by xCAT, they all can work correctly, cause the files or templates invoked by those attributes are shipped with xCAT by default. If need to customize those attribute, refer to next section "Customize osimage". +Initially, some attributes of osimage is assigned to default value by xCAT, they all can work correctly, cause the files or templates invoked by those attributes are shipped with xCAT by default. If need to customize those attribute, refer to next section :doc:`Customize osimage ` +Below is an example of osimage definitions created by 'copycds' +:: + [root@server ~]# lsdef -t osimage + rhels7.2-ppc64le-install-compute (osimage) + rhels7.2-ppc64le-install-service (osimage) + rhels7.2-ppc64le-netboot-compute (osimage) + rhels7.2-ppc64le-stateful-mgmtnode (osimage) + +In these osimage definitions shown above + +* **--install-compute** is the default osimage definition used for diskfull installation +* **--netboot-compute** is the default osimage definition used for diskless installation +* **--install-service** is the default osimage definition used for service node deployment which shall be used in hierarchical environment **[Below tips maybe helpful for you]** **[Tips 1]** + If this is the same distro version as what your management node used, create a .repo file in /etc/yum.repos.d with content similar to: :: [local--] @@ -40,7 +54,14 @@ If this is the same distro version as what your management node used, create a . In this way, if you need install some additional RPMs into your MN later, you can simply install them by yum. Or if you are installing a software on your MN that depends some RPMs from the this disto, those RPMs will be found and installed automatically. **[Tips 2]** -If need to change osimage name to your favorite name, below statement maybe help: + +Sometime you can create/modify a osimage definition easily based on the default osimage definition. the general steps can be: + +* lsdef -t osimage -z --install-compute > .stanza +* modify .stanza depending on your requirement +* cat .stanza| mkdef -z + +For example, if need to change osimage name to your favorite name, below statement maybe helpful: :: lsdef -t osimage -z rhels6.2-x86_64-install-compute | sed 's/^[^ ]\+:/mycomputeimage:/' | mkdef -z diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst index 3c8ed47fb..f035f9626 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/deploy_os.rst @@ -6,6 +6,7 @@ Initialize the Compute for Deployment XCAT use '**nodeset**' command to associate a specific image to a node which will be installed with this image. :: nodeset osimage= + There are more attributes of nodeset used for some specific purpose or specific machines, for example: @@ -20,11 +21,11 @@ Start the OS Deployment Start the deployment involves two key operations. First specify the boot device of the next boot to be network, then reboot the node: -For Power machine, those two operations can be completed by one command '**rnetboot**', +For **Power machine**, those two operations can be completed by one command '**rnetboot**' :: rnetboot -But for x server, those two operations need two independent commands. +But for **x86_64 server**, those two operations need two independent commands. Specify the boot device boot from network next time, run :: rsetboot net From fdec28273670109ccde943e15872a649f576ac71 Mon Sep 17 00:00:00 2001 From: huweihua Date: Tue, 8 Sep 2015 23:00:42 -0400 Subject: [PATCH 13/19] modify some part depending on the comments --- .../common/deployment/cfg_second_adapter.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst index 848999675..283dcdf35 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst @@ -15,7 +15,7 @@ Define configuration information for the Secondary Adapters in the nics table There are 3 ways to complete this operation. -**First way is use command line input. below is a example** +**First way is to use command line input. below is a example** :: [root@ls21n01 ~]# mkdef cn1 groups=all nicips.eth1="11.1.89.7|12.1.89.7" nicnetworks.eth1="net11|net12" nictypes.eth1="Ethernet" 1 object definitions have been created or modified. @@ -23,7 +23,7 @@ There are 3 ways to complete this operation. [root@ls21n01 ~]# chdef cn1 nicips.eth2="13.1.89.7|14.1.89.7" nicnetworks.eth2="net13|net14" nictypes.eth2="Ethernet" 1 object definitions have been created or modified. -**Second way is using stanza file** +**Second way is to use stanza file** prepare your stanza file .stanza. the content of .stanza like below: :: @@ -45,7 +45,7 @@ define configuration information by .stanza :: cat .stanza | mkdef -z -**Third way is use 'tabedit' to edit the nics table directly** +**Third way is to use 'tabedit' to edit the nics table directly** The 'tabedit' command opens the specified table in the user's editor(such as VI), allows user to edit any text, and then writes changes back to the database table. But it's tedious and error prone, so don't recommended this way. if using this way, notices the **nicips**, **nictypes** and **nicnetworks** attributes are required. From 899031ab68b4079a3cca624210ec4fe5c17b99ae Mon Sep 17 00:00:00 2001 From: huweihua Date: Tue, 8 Sep 2015 23:03:53 -0400 Subject: [PATCH 14/19] modify some part depending on the comments --- .../common/deployment/cfg_partition.rst | 55 ++++++------------- 1 file changed, 17 insertions(+), 38 deletions(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst index 84b2f5c10..f36ef1e05 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst @@ -5,7 +5,7 @@ By default, xCAT will install the operating system on the first disk and with de - 'Partition definition file' way can be used for RedHat, SLES and Ubuntu. - 'partition definition script' way was tested only for RedHat and Ubuntu, use this feature on SLES at your own risk. -- Cause disk configuration for Ubuntu is different from RedHat, so there are some specific section for ubuntu. +- Because disk configuration for ubuntu is different from Redhat, there maybe some section special for ubuntu. .. END_Overview .. BEGIN_partition_definition_file_Overview @@ -36,7 +36,7 @@ The partition file must follow the partitioning syntax of the installer(e.g. kic Here is partition definition file example for RedHat standard partition in IBM Power machines :: # Uncomment this PReP line for IBM Power servers - #part None --fstype "PPC PReP Boot" --size 8 --ondisk sda + part None --fstype "PPC PReP Boot" --size 8 --ondisk sda # Uncomment this efi line for x86_64 servers #part /boot/efi --size 50 --ondisk /dev/sda --fstype efi part /boot --size 256 --fstype ext4 @@ -49,7 +49,7 @@ Here is partition definition file example for RedHat standard partition in IBM P Here is partition definition file example for RedHat LVM partition in IBM Power machines :: # Uncomment this PReP line for IBM Power servers - #part None --fstype "PPC PReP Boot" --ondisk /dev/sda --size 8 + part None --fstype "PPC PReP Boot" --ondisk /dev/sda --size 8 # Uncomment this efi line for x86_64 servers #part /boot/efi --size 50 --ondisk /dev/sda --fstype efi part /boot --size 256 --fstype ext4 --ondisk /dev/sda @@ -61,7 +61,7 @@ Here is partition definition file example for RedHat LVM partition in IBM Power .. END_partition_definition_file_example_RedHat_LVM_for_IBM_Power_machines .. BEGIN_partition_definition_file_example_RedHat_RAID1_for_IBM_Power_machines -Partition definition file example for RedHat RAID1 please refer to `Configure RAID before Deploy OS `_ +Partition definition file example for RedHat RAID1 please refer to :doc:`Configure RAID before Deploy OS ` .. END_partition_definition_file_example_RedHat_RAID1_for_IBM_Power_machines .. BEGIN_partition_definition_file_example_SLES_Standard_Partitions_for_X86_64 @@ -97,8 +97,8 @@ Here is partition definition file example for SLES standard partition in X86_64 .. END_partition_definition_file_example_SLES_Standard_Partitions_for_X86_64 -.. BEGIN_partition_definition_file_example_SLES_LVM_for_X86_64 -Here is partition definition file example for SLES LVM partition in X86_64 machines +.. BEGIN_partition_definition_file_example_SLES_LVM_for_ppc64 +Here is partition definition file example for SLES LVM partition in P server :: /dev/sda @@ -209,7 +209,7 @@ Here is partition definition file example for SLES LVM partition in X86_64 machi all -.. END_partition_definition_file_example_SLES_LVM_for_X86_64 +.. END_partition_definition_file_example_SLES_LVM_for_ppc64 .. BEGIN_partition_definition_file_example_SLES_Standard_partition_for_ppc64 Here is partition definition file example for SLES standard partition in ppc64 machines @@ -273,37 +273,16 @@ Partition definition file example for SLES RAID1 please refer to `Configure RAID .. BEGIN_partition_definition_file_example_Ubuntu_Standard_partition_for_PPC64le Here is partition definition file example for Ubuntu standard partition in ppc64le machines :: - 8 1 32 prep - $primary{ } - $bootable{ } - method{ prep } . - - 256 256 512 ext3 - $primary{ } - method{ format } - format{ } - use_filesystem{ } - filesystem{ ext3 } - mountpoint{ /boot } . - - 64 512 300% linux-swap - method{ swap } - format{ } . - - 512 1024 4096 ext3 - $primary{ } - method{ format } - format{ } - use_filesystem{ } - filesystem{ ext4 } - mountpoint{ / } . - - 100 10000 1000000000 ext3 - method{ format } - format{ } - use_filesystem{ } - filesystem{ ext4 } - mountpoint{ /home } . + ubuntu-boot :: + 8 1 1 prep + $primary{ } $bootable{ } method{ prep } + . + 500 10000 1000000000 ext4 + method{ format } format{ } use_filesystem{ } filesystem{ ext4 } mountpoint{ / } + . + 2048 512 300% linux-swap + method{ swap } format{ } + . .. END_partition_definition_file_example_Ubuntu_Standard_partition_for_PPC64le From 52203292a1bfa8076633780897074e62a80e182c Mon Sep 17 00:00:00 2001 From: huweihua Date: Tue, 8 Sep 2015 23:13:01 -0400 Subject: [PATCH 15/19] modify some part depending on the comments --- .../common/deployment/cfg_partition.rst | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst index f36ef1e05..b682e813e 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_partition.rst @@ -16,20 +16,19 @@ You could create a customized osimage partition file, say /install/custom/my-par The partition file must follow the partitioning syntax of the installer(e.g. kickstart for RedHat, AutoYaST for SLES, Preseed for Ubuntu). you could refer to the `Kickstart documentation `_ or `Autoyast documentation `_ or `Preseed documentation `_ write your own partitions layout. Meanwhile, RedHat and SuSE provides some tools that could help generate kickstart/autoyast templates, in which you could refer to the partition section for the partitions layout information: -#. RedHat: +* **[RedHat]** -- The file /root/anaconda-ks.cfg is a sample kickstart file created by RedHat installer during the installation process based on the options that you selected. -- system-config-kickstart is a tool with graphical interface for creating kickstart files + - The file /root/anaconda-ks.cfg is a sample kickstart file created by RedHat installer during the installation process based on the options that you selected. + - system-config-kickstart is a tool with graphical interface for creating kickstart files -#. SLES +* **[SLES]** -- Use yast2 autoyast in GUI or CLI mode to customize the installation options and create autoyast file -- Use yast2 clone_system to create autoyast configuration file /root/autoinst.xml to clone an existing system + - Use yast2 autoyast in GUI or CLI mode to customize the installation options and create autoyast file + - Use yast2 clone_system to create autoyast configuration file /root/autoinst.xml to clone an existing system -#. Ubuntu - -- For detailed information see the files partman-auto-recipe.txt and partman-auto-raid-recipe.txt included in the debian-installer package. Both files are also available from the debian-installer source repository. Note that the supported functionality may change between releases. +* **[Ubuntu]** + - For detailed information see the files partman-auto-recipe.txt and partman-auto-raid-recipe.txt included in the debian-installer package. Both files are also available from the debian-installer source repository. Note that the supported functionality may change between releases. .. END_partition_definition_file_content .. BEGIN_partition_definition_file_example_RedHat_Standard_Partitions_for_IBM_Power_machines From 2805011affde02a4778c8537d8fd68bb687206c0 Mon Sep 17 00:00:00 2001 From: huweihua Date: Thu, 10 Sep 2015 03:29:49 -0400 Subject: [PATCH 16/19] modify some parts depending on the comments --- docs/source/advanced/sysclone.rst | 98 +++++++++++++++---------------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/docs/source/advanced/sysclone.rst b/docs/source/advanced/sysclone.rst index 2ff518694..10112f06e 100644 --- a/docs/source/advanced/sysclone.rst +++ b/docs/source/advanced/sysclone.rst @@ -9,27 +9,27 @@ List of Supported Arch and OS +------------------+-------------+----------------+-------------+------------------------------------------------------+ | xCAT version | OS | Tested Version | ARCH | Feature | +==================+=============+================+=============+======================================================+ -| 2.8.2 and later | Centos | 6.3 5.9 | x86_64 | Basic clone node | +| 2.8.2 and later | CentOS | 6.3 5.9 | x86_64 | Basic clone node | + +-------------+----------------+-------------+------------------------------------------------------+ -| | redhat | 6.4 5.9 | x86_64 | Basic clone node | +| | RHEL | 6.4 5.9 | x86_64 | Basic clone node | +------------------+-------------+----------------+-------------+------------------------------------------------------+ -| 2.8.3 and later | sles | 11.3 10.4 | x86_64 | Basic clone node | +| 2.8.3 and later | SLES | 11.3 10.4 | x86_64 | Basic clone node | +------------------+-------------+----------------+-------------+------------------------------------------------------+ -| 2.8.5 and later | Centos | 6.3 | x86_64 | Add feature: update delta changes(has limitation) | +| 2.8.5 and later | CentOS | 6.3 | x86_64 | Add feature: update delta changes(has limitation) | + +-------------+----------------+-------------+------------------------------------------------------+ -| | redhat | 6.4 | x86_64 | Add feature: update delta changes(has limitation) | +| | RHEL | 6.4 | x86_64 | Add feature: update delta changes(has limitation) | + +-------------+----------------+-------------+------------------------------------------------------+ -| | sles | 11.3 | x86_64 | Add feature: update delta changes | +| | SLES | 11.3 | x86_64 | Add feature: update delta changes | + +-------------+----------------+-------------+------------------------------------------------------+ -| | sles | 10.x | x86_64 | Not support any more | +| | SLES | 10.x | x86_64 | Not support any more | +------------------+-------------+----------------+-------------+------------------------------------------------------+ -| 2.9 and later | redhat | 6.4 | ppc64 | Basic clone node/update delta changes/LVM | +| 2.9 and later | RHEL | 6.4 | ppc64 | Basic clone node/update delta changes/LVM | + +-------------+----------------+-------------+------------------------------------------------------+ -| | sles | 11.3 | ppc64 | Basic clone node/update delta changes | +| | SLES | 11.3 | ppc64 | Basic clone node/update delta changes | + +-------------+----------------+-------------+------------------------------------------------------+ -| | redhat | 7.0 | ppc64 | Basic clone node/update delta changes/LVM | +| | RHEL | 7.0 | ppc64 | Basic clone node/update delta changes/LVM | + +-------------+----------------+-------------+------------------------------------------------------+ -| | redhat | 6.4 7.0 | x86_64 | support LVM | +| | RHEL | 6.4 7.0 | x86_64 | support LVM | +------------------+-------------+----------------+-------------+------------------------------------------------------+ Using Sysclone to Install Nodes @@ -40,7 +40,7 @@ This document describes how to install and configure a template node (called gol Prepare the xCAT Management Node for Support Sysclone ````````````````````````````````````````````````````` -How to configure xCAT management node please refer to section [TODO] :ref:`Install Guides ` +How to configure xCAT management node please refer to section :ref:`Install Guides ` For support Sysclone, we need to install some extra rpms on management node and the golden client. @@ -65,23 +65,23 @@ For support Sysclone, we need to install some extra rpms on management node and Compilation failed in require at /usr/sbin/si_mkrsyncd_conf line 28. BEGIN failed--compilation aborted at /usr/sbin/si_mkrsyncd_conf line 28. -3. Do some preparation for install and configure golden client in management node. Copy the xcat-dep-XXX.tar.bz2 file to directory "/install/post/otherpkgs///xcat/" of the management node according your golden client's OS version and system architecture, then decompression it. For example: +3. Do some preparation for install and configure golden client in management node. Copy the xcat-dep-XXX.tar.bz2 file to directory ``/install/post/otherpkgs///xcat/`` of the management node according your golden client's OS version and system architecture, then decompression it. For example: -* **[Centos6.3 and x86_64]**:: +* **[CentOS6.3 and x86_64]**:: - mkdir -p /install/post/otherpkgs/centos6.3/x86_64/xcat - cp xcat-dep-*.tar.bz2 /install/post/otherpkgs/centos6.3/x86_64/xcat - cd /install/post/otherpkgs/centos6.3/x86_64/xcat + mkdir -p /install/post/otherpkgs/CentOS6.3/x86_64/xcat + cp xcat-dep-*.tar.bz2 /install/post/otherpkgs/CentOS6.3/x86_64/xcat + cd /install/post/otherpkgs/CentOS6.3/x86_64/xcat tar jxvf xcat-dep-*.tar.bz2 * **[SLES11.3 and x86_64]**:: - mkdir -p /install/post/otherpkgs/sles11.3/x86_64/xcat - cp xcat-dep-*.tar.bz2 /install/post/otherpkgs/sles11.3/x86_64/xcat - cd /install/post/otherpkgs/sles11.3/x86_64/xcat + mkdir -p /install/post/otherpkgs/SLES11.3/x86_64/xcat + cp xcat-dep-*.tar.bz2 /install/post/otherpkgs/SLES11.3/x86_64/xcat + cd /install/post/otherpkgs/SLES11.3/x86_64/xcat tar jxvf xcat-dep-*.tar.bz2 -* **[Redhat6.4 and ppc64 system]**:: +* **[RHEL6.4 and ppc64]**:: mkdir -p /install/post/otherpkgs/rhels6.4/ppc64/xcat cp xcat-dep-*.tar.bz2 /install/post/otherpkgs/rhels6.4/ppc64/xcat @@ -95,33 +95,33 @@ The Golden Client acts as a regular node for xCAT, just have some extra rpms to For information of how to install a regular node, please refer to section :ref:`Diskful Installation ` -For support clone, add 'otherpkglist' and 'otherpkgdir' attributes to the image definition which will be used to deploy golden client, then deploy golden client as normal. then the golden client will have extra rpms to support clone. If you have deployed your golden client already, using 'updatenode' command to push these extra rpms to golden client. Centos share the same pkglist file with redhat. For example: +For support clone, add 'otherpkglist' and 'otherpkgdir' attributes to the image definition which will be used to deploy golden client, then deploy golden client as normal. then the golden client will have extra rpms to support clone. If you have deployed your golden client already, using 'updatenode' command to push these extra rpms to golden client. CentOS share the same pkglist file with RHEL. For example: -* **[RH6.4 and x86_64 system]**:: +* **[RH6.4 and x86_64]**:: - chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/instal /rh/sysclone.rhels6.x86_64.otherpkgs.pkglist + chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/instal/rh/sysclone.rhels6.x86_64.otherpkgs.pkglist chdef -t osimage -o -p otherpkgdir=/install/post/otherpkgs/rhels6.4/x86_64 updatenode -S -* **[Centos6.3 and x86_64]**:: +* **[CentOS6.3 and x86_64]**:: - chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/install /rh/sysclone.rhels6.x86_64.otherpkgs.pkglist - chdef -t osimage -o -p otherpkgdir=/install/post/otherpkgs/centos6.3/x86_64 + chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/install/rh/sysclone.rhels6.x86_64.otherpkgs.pkglist + chdef -t osimage -o -p otherpkgdir=/install/post/otherpkgs/CentOS6.3/x86_64 updatenode -S -* **[SLES11.3 and x86_64 system]**:: +* **[SLES11.3 and x86_64]**:: - chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/install /sles/sysclone.sles11.x86_64.otherpkgs.pkglist - chdef -t osimage -o -p otherpkgdir=/install/post/otherpkgs/sles11.3/x86_64 + chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/install/sles/sysclone.sles11.x86_64.otherpkgs.pkglist + chdef -t osimage -o -p otherpkgdir=/install/post/otherpkgs/SLES11.3/x86_64 updatenode -S -* **[For RH6.3 and ppc64 system]**:: +* **[RH6.3 and ppc64]**:: - chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/install /rh/sysclone.rhels6.ppc64.otherpkgs.pkglist + chdef -t osimage -o otherpkglist=/opt/xcat/share/xcat/install/rh/sysclone.rhels6.ppc64.otherpkgs.pkglist chdef -t osimage -o -p otherpkgdir=/install/post/otherpkgs/rhels6.3/ppc64 updatenode -S -*[Note]: If you install systemimager RPMs on Centos 6.5 node by above steps, you maybe hit failure. this is a known issue because some defect of Centos6.5 itself. Please refer to known issue section for help.* +*[Note]: If you install systemimager RPMs on CentOS 6.5 node by above steps, you maybe hit failure. this is a known issue because some defect of CentOS6.5 itself. Please refer to known issue section for help.* Capture Image from Golden Client ```````````````````````````````` @@ -139,7 +139,7 @@ Install the target nodes with the image from the golden-client following below commands to install the target nodes with the image captured from golden client. -* **[x86_64 system]**:: +* **[x86_64]**:: nodeset osimage= rsetboot net @@ -156,7 +156,7 @@ Update Nodes Later On If, at a later time, you need to make changes to the golden client (install new rpms, change config files, etc.), you can capture the changes and push them to the already cloned nodes without need to restart cloned nodes. This process will only transfer the deltas, so it will be much faster than the original cloning. -**[Limitation]**: In xcat2.8.5, this feature has limitation in redhat and centos. when your delta changes related bootloader, it would encounter error. This issue will be fixed in xcat higher version. So up to now, in redhat and centos, this feature just update files not related bootloader. +**[Limitation]**: In xcat2.8.5, this feature has limitation in RHEL and CentOS. when your delta changes related bootloader, it would encounter error. This issue will be fixed in xcat higher version. So up to now, in RHEL and CentOS, this feature just update files not related bootloader. Update delta changes please follow below steps: @@ -177,9 +177,9 @@ From one of the nodes you want to update, test the update to see which files wil If it lists files and directories that you do not think should be updated, you need to add them to the exclude list in 3 places -* On the golden node: /etc/systemimager/updateclient.local.exclude -* On the mgmt node: /install/sysclone/images//etc/systemimager/updateclient.local.exclude -* On all of the nodes to be updated: /etc/systemimager/updateclient.local.exclude +* On the golden node: ``/etc/systemimager/updateclient.local.exclude`` +* On the mgmt node: ``/install/sysclone/images//etc/systemimager/updateclient.local.exclude`` +* On all of the nodes to be updated: ``/etc/systemimager/updateclient.local.exclude`` From the mgmt node, push the updates out to the other nodes:: @@ -187,7 +187,7 @@ From the mgmt node, push the updates out to the other nodes:: b) If you are running xCAT 2.8.5 or later: -you could push the updates out to the other nodes quickly by below command:: +You could push the updates out to the other nodes quickly by below command:: updatenode -S @@ -195,10 +195,10 @@ you could push the updates out to the other nodes quickly by below command:: Known Issue ----------- -Can not install systemimager RPMs in centos6.5 by yum +Can not install systemimager RPMs in CentOS6.5 by yum `````````````````````````````````````````````````````` -If you install systemimager RPMs on Centos 6.5 node by yum, you maybe hit failure because some defect of Centos6.5 itself. So please copy related RPMs to Centos 6.5 node and install them by hand. +If you install systemimager RPMs on CentOS 6.5 node by yum, you maybe hit failure because some defect of CentOS6.5 itself. So please copy related RPMs to CentOS 6.5 node and install them by hand. * **On management node**:: @@ -206,17 +206,17 @@ If you install systemimager RPMs on Centos 6.5 node by yum, you maybe hit failur [root@MN xcat-dep]# scp systemimager-client-4.3.0-0.1.noarch.rpm \ systemconfigurator-2.2.11-1.noarch.rpm \ systemimager-common-4.3.0-0.1.noarch.rpm \ - perl-AppConfig-1.52-4.noarch.rpm :/ + perl-AppConfig-1.52-4.noarch.rpm :/ * **On golden client**:: - [root@Centos6.5 node]# cd / - [root@Centos6.5 node]# rpm -ivh perl-AppConfig-1.52-4.noarch.rpm - [root@Centos6.5 node]# rpm -ivh systemconfigurator-2.2.11-1.noarch.rpm - [root@Centos6.5 node]# rpm -ivh systemimager-common-4.3.0-0.1.noarch.rpm - [root@Centos6.5 node]# rpm -ivh systemimager-client-4.3.0-0.1.noarch.rpm + [root@CentOS6.5 node]# cd / + [root@CentOS6.5 node]# rpm -ivh perl-AppConfig-1.52-4.noarch.rpm + [root@CentOS6.5 node]# rpm -ivh systemconfigurator-2.2.11-1.noarch.rpm + [root@CentOS6.5 node]# rpm -ivh systemimager-common-4.3.0-0.1.noarch.rpm + [root@CentOS6.5 node]# rpm -ivh systemimager-client-4.3.0-0.1.noarch.rpm -Kernel panic at times when install target node with rhels7.0 in power 7 server +Kernel panic at times when install target node with rhels7.0 in Power 7 server `````````````````````````````````````````````````````````````````````````````` -When you clone rhels7.0 image to target node which is power 7 server lpar, maybe you will hit Kernel panic problem at times after boot loader grub2 download kernel and initrd. This is an known issue but without resolve yet. up to now, we recommend you try again. \ No newline at end of file +When you clone rhels7.0 image to target node which is Power 7 server lpar, maybe you will hit Kernel panic problem at times after boot loader grub2 download kernel and initrd. This is an known issue but without resolve yet. up to now, we recommend you try again. \ No newline at end of file From fc6e6e1d5d0b7249003232fb2b089b1d337ad038 Mon Sep 17 00:00:00 2001 From: huweihua Date: Thu, 10 Sep 2015 03:32:02 -0400 Subject: [PATCH 17/19] modify some parts depending on the comments --- .../common/deployment/cfg_second_adapter.rst | 18 +++---- .../common/deployment/raid_cfg.rst | 52 +++++++++---------- 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst index 283dcdf35..b07e632d3 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/cfg_second_adapter.rst @@ -1,9 +1,7 @@ -Configure Secondary Network Adapter -=================================== +Configure Additional Network Interfaces +======================================= -Introduction ------------- -The **nics** table and the **confignics** postscript can be used to automatically configure additional **ethernet** and **Infiniband** adapters on nodes as they are being deployed. ("Additional adapters" means adapters other than the primary adapter that the node is being installed/booted over.) +The **nics** table and the **confignics** postscript can be used to automatically configure additional network interfaces (mutltiple ethernets adapters, InfiniBand, etc) on the nodes as they are being deployed. The way the confignics postscript decides what IP address to give the secondary adapter is by checking the nics table, in which the nic configuration information is stored. @@ -15,7 +13,9 @@ Define configuration information for the Secondary Adapters in the nics table There are 3 ways to complete this operation. -**First way is to use command line input. below is a example** +1. Using command line + +Below is a example :: [root@ls21n01 ~]# mkdef cn1 groups=all nicips.eth1="11.1.89.7|12.1.89.7" nicnetworks.eth1="net11|net12" nictypes.eth1="Ethernet" 1 object definitions have been created or modified. @@ -23,9 +23,9 @@ There are 3 ways to complete this operation. [root@ls21n01 ~]# chdef cn1 nicips.eth2="13.1.89.7|14.1.89.7" nicnetworks.eth2="net13|net14" nictypes.eth2="Ethernet" 1 object definitions have been created or modified. -**Second way is to use stanza file** +2. Using stanza file -prepare your stanza file .stanza. the content of .stanza like below: +Prepare your stanza file .stanza. the content of .stanza like below: :: # cn1: @@ -45,7 +45,7 @@ define configuration information by .stanza :: cat .stanza | mkdef -z -**Third way is to use 'tabedit' to edit the nics table directly** +3. Using 'tabedit' to edit the nics table The 'tabedit' command opens the specified table in the user's editor(such as VI), allows user to edit any text, and then writes changes back to the database table. But it's tedious and error prone, so don't recommended this way. if using this way, notices the **nicips**, **nictypes** and **nicnetworks** attributes are required. diff --git a/docs/source/guides/admin-guides/manage_clusters/common/deployment/raid_cfg.rst b/docs/source/guides/admin-guides/manage_clusters/common/deployment/raid_cfg.rst index cf3ad7af7..c34a7e8ca 100644 --- a/docs/source/guides/admin-guides/manage_clusters/common/deployment/raid_cfg.rst +++ b/docs/source/guides/admin-guides/manage_clusters/common/deployment/raid_cfg.rst @@ -8,18 +8,18 @@ This section describes how to use xCAT to deploy diskful nodes with RAID1 setup, All the examples in this section are based on three configuration scenarios: -- RHEL6 on a system p machine with two SCSI disks sda and sdb +1. RHEL6 on a system p machine with two SCSI disks sda and sdb -- RHEL6 on a system p machine with two SAS disks and multipath configuration. +2. RHEL6 on a system p machine with two SAS disks and multipath configuration. -- SLES 11 SP1 on a system p machine with two SCSI disks sda and sdb +3. SLES 11 SP1 on a system p machine with two SCSI disks sda and sdb If you are not using the configuration scenarios listed above, you may need to modify some of the steps in this documentation to make it work in your environment. Deploy Diskful Nodes with RAID1 Setup on RedHat ----------------------------------------------- -xCAT provides two sample kickstart template files with the RAID1 settings, /opt/xcat/share/xcat/install/rh/service.raid1.rhel6.ppc64.tmpl is for the configuration scenario #1 listed above and /opt/xcat/share/xcat/install/rh/service.raid1.multipath.rhel6.ppc64.tmpl is for the configuration scenario #2 listed above. You can customize the template file and put it under /install/custom/install// if the default one does not match your requirements. +xCAT provides two sample kickstart template files with the RAID1 settings, ``/opt/xcat/share/xcat/install/rh/service.raid1.rhel6.ppc64.tmpl`` is for the configuration scenario **1** listed above and ``/opt/xcat/share/xcat/install/rh/service.raid1.multipath.rhel6.ppc64.tmpl`` is for the configuration scenario **2** listed above. You can customize the template file and put it under ``/install/custom/install//`` if the default one does not match your requirements. Here is the RAID1 partitioning section in service.raid1.rhel6.ppc64.tmpl: :: @@ -57,11 +57,11 @@ And here is the RAID1 partitioning section in service.raid1.multipath.rhel6.ppc6 part raid.22 --size 1 --fstype ext4 --grow --ondisk mpathb raid / --level 1 --device md2 raid.21 raid.22 -The samples above created one PReP partition, one 200MB /boot partition and one / partition on sda/sda and mpatha/mpathb. If you want to use different partitioning scheme in your cluster, modify this RAID1 section in the kickstart template file accordingly. +The samples above created one PReP partition, one 200MB ``/boot`` partition and one ``/`` partition on ``sda/sdb`` and ``mpatha/mpathb``. If you want to use different partitioning scheme in your cluster, modify this RAID1 section in the kickstart template file accordingly. After the diskful nodes are up and running, you can check the RAID1 settings with the following commands: -Mount command shows the /dev/mdx devices are mounted to various file systems, the /dev/mdx indicates that the RAID is being used on this node. +Mount command shows the ``/dev/mdx`` devices are mounted to various file systems, the ``/dev/mdx`` indicates that the RAID is being used on this node. :: [root@server ~]# mount /dev/md2 on / type ext4 (rw) @@ -72,7 +72,7 @@ Mount command shows the /dev/mdx devices are mounted to various file systems, th /dev/md0 on /boot type ext4 (rw) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) -The file /proc/mdstat includes the RAID devices status on the system, here is an example of /proc/mdstat in the non-multipath environment: +The file ``/proc/mdstat`` includes the RAID devices status on the system, here is an example of ``/proc/mdstat`` in the non-multipath environment: :: [root@server ~]# cat /proc/mdstat Personalities : [raid1] @@ -88,7 +88,7 @@ The file /proc/mdstat includes the RAID devices status on the system, here is an unused devices: -On the system with multipath configuration, the /proc/mdstat looks like: +On the system with multipath configuration, the ``/proc/mdstat`` looks like: :: [root@server ~]# cat /proc/mdstat Personalities : [raid1] @@ -111,7 +111,7 @@ The command mdadm can query the detailed configuration for the RAID partitions: Deploy Diskful Nodes with RAID1 Setup on SLES --------------------------------------------- -xCAT provides one sample autoyast template files with the RAID1 settings /opt/xcat/share/xcat/install/sles/service.raid1.sles11.tmpl. You can customize the template file and put it under /install/custom/install// if the default one does not match your requirements. +xCAT provides one sample autoyast template files with the RAID1 settings ``/opt/xcat/share/xcat/install/sles/service.raid1.sles11.tmpl``. You can customize the template file and put it under ``/install/custom/install//`` if the default one does not match your requirements. Here is the RAID1 partitioning section in service.raid1.sles11.tmpl: :: @@ -206,9 +206,9 @@ Here is the RAID1 partitioning section in service.raid1.sles11.tmpl: -The samples above created one 24MB PReP partition on each disk, one 2GB mirroed swap partition and one mirroed / partition uses all the disk space. If you want to use different partitioning scheme in your cluster, modify this RAID1 section in the autoyast template file accordingly. +The samples above created one 24MB PReP partition on each disk, one 2GB mirrored swap partition and one mirrored ``/`` partition uses all the disk space. If you want to use different partitioning scheme in your cluster, modify this RAID1 section in the autoyast template file accordingly. -Since the PReP partition can not be mirroed between the two disks, some additional postinstall commands should be run to make the second disk bootable, here the the commands needed to make the second disk bootable: +Since the PReP partition can not be mirrored between the two disks, some additional postinstall commands should be run to make the second disk bootable, here the the commands needed to make the second disk bootable: :: # Set the second disk to be bootable for RAID1 setup parted -s /dev/sdb mkfs 1 fat16 @@ -217,11 +217,11 @@ Since the PReP partition can not be mirroed between the two disks, some addition dd if=/dev/sda1 of=/dev/sdb1 bootlist -m normal sda sdb -The procedure listed above has been added to the file /opt/xcat/share/xcat/install/scripts/post.sles11.raid1 to make it be automated. The autoyast template file service.raid1.sles11.tmpl will include the content of post.sles11.raid1, so no manual steps are needed here. +The procedure listed above has been added to the file ``/opt/xcat/share/xcat/install/scripts/post.sles11.raid1`` to make it be automated. The autoyast template file service.raid1.sles11.tmpl will include the content of post.sles11.raid1, so no manual steps are needed here. After the diskful nodes are up and running, you can check the RAID1 settings with the following commands: -Mount command shows the /dev/mdx devices are mounted to various file systems, the /dev/mdx indicates that the RAID is being used on this node. +Mount command shows the ``/dev/mdx`` devices are mounted to various file systems, the ``/dev/mdx`` indicates that the RAID is being used on this node. :: server:~ # mount /dev/md1 on / type reiserfs (rw) @@ -232,7 +232,7 @@ Mount command shows the /dev/mdx devices are mounted to various file systems, th tmpfs on /dev/shm type tmpfs (rw,mode=1777) devpts on /dev/pts type devpts (rw,mode=0620,gid=5) -The file /proc/mdstat includes the RAID devices status on the system, here is an example of /proc/mdstat: +The file ``/proc/mdstat`` includes the RAID devices status on the system, here is an example of ``/proc/mdstat``: :: server:~ # cat /proc/mdstat Personalities : [raid1] [raid0] [raid10] [raid6] [raid5] [raid4] @@ -255,7 +255,7 @@ Disk Replacement Procedure If any one disk fails in the RAID1 arrary, do not panic. Follow the procedure listed below to replace the failed disk and you will be fine. -Faulty disks should appear marked with an (F) if you look at /proc/mdstat: +Faulty disks should appear marked with an (F) if you look at ``/proc/mdstat``: :: [root@server ~]# cat /proc/mdstat Personalities : [raid1] @@ -276,11 +276,11 @@ We can see that the first disk is broken because all the RAID partitions on this Remove the failed disk from RAID arrary --------------------------------------- -mdadm is the command that can be used to query and manage the RAID arrays on Linux. To remove the failed disk from RAID array, use the command: +``mdadm`` is the command that can be used to query and manage the RAID arrays on Linux. To remove the failed disk from RAID array, use the command: :: mdadm --manage /dev/mdx --remove /dev/xxx -Where the /dev/mdx are the RAID partitions listed in /proc/mdstat file, such as md0, md1 and md2; the /dev/xxx are the backend devices like dm-11, dm-8 and dm-9 in the multipath configuration and sda5, sda3 and sda2 in the non-multipath configuration. +Where the ``/dev/mdx`` are the RAID partitions listed in ``/proc/mdstat`` file, such as md0, md1 and md2; the ``/dev/xxx`` are the backend devices like dm-11, dm-8 and dm-9 in the multipath configuration and sda5, sda3 and sda2 in the non-multipath configuration. Here is the example of removing failed disk from the RAID1 array in the non-multipath configuration: :: @@ -294,7 +294,7 @@ Here is the example of removing failed disk from the RAID1 array in the multipat mdadm --manage /dev/md1 --remove /dev/dm-8 mdadm --manage /dev/md2 --remove /dev/dm-11 -After the failed disk is removed from the RAID1 array, the partitions on the failed disk will be removed from /proc/mdstat and the "mdadm --detail" output also. +After the failed disk is removed from the RAID1 array, the partitions on the failed disk will be removed from ``/proc/mdstat`` and the "mdadm --detail" output also. :: [root@server ~]# cat /proc/mdstat Personalities : [raid1] @@ -363,7 +363,7 @@ You can run :: fdisk -l -to check if both hard drives have the same partitioning now. +To check if both hard drives have the same partitioning now. Add the new disk into the RAID1 array ------------------------------------- @@ -372,7 +372,7 @@ After the partitions are created on the new disk, you can use command :: mdadm --manage /dev/mdx --add /dev/xxx -to add the new disk to the RAID1 array. Where the /dev/mdx are the RAID partitions like md0, md1 and md2; the /dev/xxx are the backend devices like dm-11, dm-8 and dm-9 in the multipath configuration and sda5, sda3 and sda2 in the non-multipath configuration. +To add the new disk to the RAID1 array. Where the ``/dev/mdx`` are the RAID partitions like md0, md1 and md2; the ``/dev/xxx`` are the backend devices like dm-11, dm-8 and dm-9 in the multipath configuration and sda5, sda3 and sda2 in the non-multipath configuration. Here is an example for the non-multipath configuration: :: @@ -388,7 +388,7 @@ Here is an example for the multipath configuration: All done! You can have a cup of coffee to watch the fully automatic reconstruction running... -While the RAID1 array is reconstructing, you will see some progress information in /proc/mdstat: +While the RAID1 array is reconstructing, you will see some progress information in ``/proc/mdstat``: :: [root@server raid1]# cat /proc/mdstat Personalities : [raid1] @@ -407,7 +407,7 @@ While the RAID1 array is reconstructing, you will see some progress information unused devices: -After the reconstruction is done, the /proc/mdstat becomes like: +After the reconstruction is done, the ``/proc/mdstat`` becomes like: :: [root@server ~]# cat /proc/mdstat Personalities : [raid1] @@ -428,13 +428,13 @@ Make the new disk bootable If the new disk does not have a PReP partition or the PReP partition has some problem, it will not be bootable, here is an example on how to make the new disk bootable, you may need to substitute the device name with your own values. -**RedHat:** -:: +* **[RHEL]**:: + mkofboot .b /dev/sda bootlist -m normal sda sdb -**SLES:** -:: +* **[SLES]**:: + parted -s /dev/sda mkfs 1 fat16 parted /dev/sda set 1 type 6 parted /dev/sda set 1 boot on From 5326ece7cd5ce0f0234fe265ee993013593eeddd Mon Sep 17 00:00:00 2001 From: penguhyang Date: Mon, 21 Sep 2015 04:30:27 -0400 Subject: [PATCH 18/19] if node is -h then exit 0 else exit 1 --- xCAT-server/sbin/runcmdinstaller | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/xCAT-server/sbin/runcmdinstaller b/xCAT-server/sbin/runcmdinstaller index d194d3c00..438db3f31 100755 --- a/xCAT-server/sbin/runcmdinstaller +++ b/xCAT-server/sbin/runcmdinstaller @@ -7,11 +7,16 @@ awk -v argc="$#" -v node="$1" -v cmd="$2" 'BEGIN { port = 3001 action = "sh" -if( node=="-h" || argc !=2 || ! node || ! cmd){ +if(node=="-h"){ print "Usage:\n\n runcmdinstaller \"\"\n" print " make sure all the commands are quoted by \"\"\n"; exit 0; } +if(argc !=2 || ! node || ! cmd){ + print "Usage:\n\n runcmdinstaller \"\"\n" + print " make sure all the commands are quoted by \"\"\n"; + exit 1; +} ns = "/inet/tcp/0/" node "/" port From c6c06d54d58b50b6770b53360ee83a62e6c4dfa1 Mon Sep 17 00:00:00 2001 From: wangxiaopeng Date: Sun, 20 Sep 2015 07:16:47 -0400 Subject: [PATCH 19/19] New doc: Add Overview part --- docs/source/index.rst | 19 +- docs/source/overview/Xcat-arch.png | Bin 0 -> 46599 bytes docs/source/overview/architecture.rst | 4 - docs/source/overview/features.rst | 4 - docs/source/overview/index.rst | 236 +++++++++++++++++- .../source/overview/setup_cluster_process.rst | 2 - docs/source/overview/support_list.rst | 2 - 7 files changed, 240 insertions(+), 27 deletions(-) create mode 100644 docs/source/overview/Xcat-arch.png delete mode 100644 docs/source/overview/architecture.rst delete mode 100644 docs/source/overview/features.rst delete mode 100644 docs/source/overview/setup_cluster_process.rst delete mode 100644 docs/source/overview/support_list.rst diff --git a/docs/source/index.rst b/docs/source/index.rst index b2fda3aaf..780c34f7b 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,16 +1,21 @@ xCAT stands for E\ **x**\ treme **C**\ loud/\ **C**\ luster **A**\ dministration **T**\ oolkit. -xCAT offers complete management of clouds, clusters (HPC), grids, datacenters, +xCAT offers complete management of clouds, clusters (HPC), grids, datacenters, renderfarms, online gaming infrastructure, and whatever tomorrows next buzzword may be. -xCAT enables the administrator to: +**xCAT enables the administrator to:** + #. Discover the hardware servers + #. Execute remote system management against the discovered server + #. Provision Operating Systems on physical (Bare-metal) or virtual machines + #. Provision machines in Diskful and Diskless + #. Install and configure user applications + #. Parallel system management + #. Integrate xCAT in Cloud - #. provision operating systems on physical (“bare metal”) or virtual machines. - #. provision machines using scripted install, stateless, statelite, iSCSI, or cloning. - #. remotely manage systems (lights out management, remote console support, distributed shell support). - #. quickly configure and control management nodes services: dns, http, dhcp, tftp, nfs, etc. +**xCAT** is an open source project hosted on `GitHub/xcat2 `_. You can get the **source code** +, submit **bug/question/requirement** and contribute **code, doc** here. -**xCAT** is an open source project hosted on `SourceForge `_ and currently mirrored on `GitHub `_. (We hope to be natively hosted on GitHub in the near future) +**xCAT**'s landing page is http://xcat.org. You can get the **download packages** and **release notes** here. Enjoy! diff --git a/docs/source/overview/Xcat-arch.png b/docs/source/overview/Xcat-arch.png new file mode 100644 index 0000000000000000000000000000000000000000..cef22a0b2680c030d94370eec8bac2206c94219e GIT binary patch literal 46599 zcmdSBbzIb4*DgLLN(u-lDIg%Nq%_LV-3`(W(p@SoAl)gQ(j7{7gS5oZ!_YC*+1&T@ zob%l0yr1{{{`{TqXJmBv#@=hMz1Fp^b&WxCGNKr$gs2b*1Vda5ssMr9a{+${9^D0> zte1RQMpk_3NiP74X&7yh`?seHPiF{?{br7>%65jTNZ zIN?=X{Cy_FQFYFAzE&PvPD#-JQ8&I(_>;)X>spRbhUbqTFCBUvADi3l)}8Wm>O`)m zx)0Vd)M?3p2SOkPof8g_Jwjb=mxyTk5r?M}YIq;k4132Mo~FBA?BW^YiL;aVb>6St zbf4a1CwTzziG82>xL(j=Y(J+aJ;~M?r7365z%ne0%KMDJ_N&R)qH8|Udi1>$w zN>5a|9c}E7h>~GP)YLe)=@$zd8?O~87Y2&Gcv86vRiwkFxsQ;nccoya5>e3TCrD7M zJ+D1V`19NIenq_uhqLP3A)PKO()rc(4Z+y?HgFrJ6+PsO}ln*ag{l|w)k8Sq>7JBgN-mijfx$kO!M{1f8iv$C_bYd@0H z(G}$2pF+;WY~Z0nAuZmVc)p9t6dLx{q<<>IVl=b?s zXQYEq%Pss=w=yA7dBI@5IUoh`f`YEEPrMoLX&ItnZgpb;VPka}d#JENac?TA!J)_? zGrpZ6p^9$41n3!VSmS)WwYY`EVyS}Xx6OKKR zRO;&L6D5xDnJOP2NQ9W7p&^^o&hK2=G#1Cpt1C|rkN7G5viRdta22+^xOh)69(`kD zBRV?znMX75q0`-Y0tU5jsi`Hu(c=1yR34+D6(UeFsj?yu{#3CZDi}1h9JX~I$6Xqi z9uPMCwpmkcn@T1sKgeqxRvNp*!M^P@uSRUGq7-PSXFQI9A3z@`USVK;qe>TNA`%c- zL_8j6gwN;hW`6!hv&B>EfMP6vDn2YKwu%TDn;PNjFg$61IDO?6Taa^Qo?K@^GZcYv z3-g$AUJH)cX5LJi<*&1?Ty+&06ODY*I8RR6}Z7HdA7*2oZxTs`pz1!-053$fx_)1(#YOdCH z<>zOVQk|x)wY40r^UhGBt}v1)3Eja=yPe-<{r&x5O`4sUD~t!YJUVr`%BvEjr#v^_>=(BgsdHfxeI!BD?Q z-P#6ND!5#c<-1V)8a+j?CsIu7?AL-#U?N3?MEg$*qs<|>c4 z&?z^39A$(=BCTgCdwP4#2NKy^u1+*XL^^k-D{SU!Gqku^S?w>+_JJdt0k{7204+;l z69hl&#pZeN244OF0YeF_0fA>8*Bv1Q3Cg^Dd{j6+Bi}`Xx(5e$yCW#UIfMQEU&F%q zT@SM4URfGmYEYf4Ji)FaVG$kkHN_?x(ivL*mD=Gy<913#Q74snwNbs^{lE{un3YUN zRjKc5{W~ZJiQ5|+3t7)+EM^^7LIRyA2>L#6W7CyocBit6o7%)*Vzre52(zLtB1Glm zcy%*u-Nn4I)o^9U#+lMr^a+7qjF}ZV6O`<1kE4s?XPKJ1YJlrzWxd}0waZE`f~jNs zyWEO^0(G&9ZtVzReU)2D_9jhCCqChbZ3i8@4lBNM?IY#AsSByA{_;>V?8nj!7SlE! zdpaBe>od;fsS$O>&S~xRzA3pLz02d5aGE#*Lw@p1d>48MM6hZr&U9^koj&!Yp_%$D z=Y<6x0Rh?Dk3Ozf7l;<;t?{71Y7i)F7Mqusm%n}c79al%3kyrwla;kXyTNI5d;0<` z&O##>3rl1mIHTTidkqH5Hr+HVQ@g&t4z4m}Nk~oA5f=|qWAHjy4a&&S3&14dcHZSR z=tPHoxV}7;O^_0S4h{~6a=pUnl@3}${nA-H&rTAEU$!~C{c|KVL7rSH^PG3TIqTN< zWjzlkgH*9%2-XiVz6J%>HCeArhirXAzTVx{)RxzovEG}}J` z>qwiFmzmX{7nN?XEl9-0^+qakp-Qo7Q6J5`2UpySc`#mAI_byqb03-*D~Th*V4KaqCJv?YS@egI33zGg=nOWh9Q{zj$HU7`Hg(#aQ`gl^bG0%@mM1I$@2x`k4Hyr_ zzcIKtfrE>ZcwJerZQmw`k?;(H5BHZlh%p25IV@CQvjYQRSy|t5+3Ftzb{ANwa>RA9 zso}_j0D&p{VQJUm!-xh`oKeSI`6EUO8c@7-8hFll+5a zj~d^x_?i=Ke^m4VoKL695^VH-=CF~0n#t4v>b zNB)+DQJ<(%kFeGC%12apHA>&b_Bu0{r{(0v)qq zxN%O&wMrABrFc6H=#JZ*>eMl7xgEl3b0kTIGI{cVj z-bXAJ#i(-hV`y*;56|=zXsb31ekQJO`YFYw)4&c;#S{KF2D7<#wzXr?C*p-t5HBPKR61 zI&GvJ{}nBz>Krdpr6f((+K7%2p7sSoTWyUT)$TAMjxq}&sGoYzM5o*_VQcHJlACf8 zXmk>>C*n^%H<|RIg*I!YZ8d`BtE<)a#Cm_+cWX;^-m^kVW`aIgP8hslsCW(cN#e|t zh+^kO+ayGmuvYii`iS$r&J}u;f#fECFVDuA*cEd;qg+Fg`4@GU8=2NI_@$pTKnaPcDgfdk67TN&;Sl1CpWye)D{{VIyO2wm?l8>{CPq|gnS%ythM#S z!D>%~GlDvkRxzgtWRAW*Nd|mTQPDJhPwuyG-%?ZO?8?)Y3k5;Tjr#CyY;2g~W}Dod zDNrBX9vjmYxhW|m;a&8!w0h=r^^Q0rnl?=xjZOCJGOW^UBn{!+E*}|B<_1gbo5vg& zG}=2Wra(F|7V+CV3Z?4}#`}&ao;`K3yApG?^)hyNgso1GJa1w;HiO_^I3-|}5@ZRe z%<-*17M7A}n{YQ`dX1?Y9!S?KL1*<&L8VA-#zVb&-yY7%wKRHN*+8Iqf&P+$QZp`d zC)Zd;hnTb_r|JwTGI+fbU#24ePV<+ucZv)AW0FsTV}Cs^6UmFZ#`RAacO!!A&gbu0 z0Klm#!v+cylC2cetCS7ye7LOyYmB>YKfNX*`A(^jEtFCeo&A&K$(lK>nXH~1+Ls; zhaF3gA2KZ3gF1)2#p`Ti2U8Ql@8-X@{EGlsDXk;G9o?O(a4-n1#oMKgzR>k!q&*irewtUi8d^&L?Drc{elT0ocxZsttNJtV8$( zPvY8BXM1o7>2_OGmnXfqZrASeFeyl=LE?G%<-~KgV!nDbbm=rLfn_dz-^}n2!s@pD z0xj4fMvF$KZ32(rFUsvc?vtIK_5M8SmMrEzIdxe{`Ff%HUBb7?CS}gR zyv5G+g2iD!);jAXEs*|Ty zg!Uex$hwX3F*2~B9|h%9kIm@ruSJ?xvx%`KJ;RX(j`V|_}GBwCxBoTu^DxV-tKQX;}+E>X+V&jJN zz)u^yOeMb$IG)GAL&1^vVAt!G^zqSnF#HK*sO>`$RAjaLA$k zdkA76R0FWj_|5eHuQ&KvY|w;8z_hxyR>1xg%ryis4T_42(xP`Ddv1VP(}{EQ^6~<# z&Q{n%2t8; zpgp{IJOBUAa5?_x7Z(+34EQ0(prSYD#=kY7vVv-+>z~#;v60=~VzBI~Qe9OYkmkhM z{p7^~4b}^gQ;F6&yk@bvy8M6~orGJ-HG+aitHrBn=FtR^U%7&COM<^78Pw1mWu|I6^~3 z!%0cV$aJd+R=_wDA^~SO*XUXSGW7e5;f@YioURvnW_vr-7>$~mS}{k;^-Z9y<;I03JxKm+u7cd23>S?bdh>Rj$|Cu-_+2^i0x#NIw;pwOK=GY zINgp+*ETxCNRqyNlY{5<_V!Me>Mj;*avCd^l$1!uGJpcUKr;Du8XB2ivJcH6XlNla zDcmFiULM<%#bn->wyv({fZ`N^Mo~+1dmjY`>Atte!hDL8ugUkN%`1m*pkJGEjzLuM79%g1le*$YL*L_V*O<%!p(a}fe>uKT7 zS^e9Vl!^~8&kwz>kbE9za`KUpk>&2EyWkpkP)y;{$S4udAFdB2pn~)XR?FYl_g%I$ zY;6tDg?j+P1V^^Fw}%pOlJdIjZA}zfTU&>6-8zX6_1M#z!A0*IPYMZXX>jM;lM_&m zS^Q!cwCkgLSfr(;0oP`?F-)$iI+c}`l~{cV5cFUQkIh139as*j9xQ>&7ZGBUCEA6* zuK@@EbzzVxFbR*-&Ii&>%FKM@L5mLTkWkTDK+o$?2(y&Lh9~Yc#%r z`uh5jQBj4HW##2|&d#M~iv0ZiQp^gZ;i_6%{ZB=Nh1n0(Zhbyc1eYptELT>wQg>#0 zI+RSH*_;GFL>X?8Af+;9416Skp!bRzk4K^CRa_s@vzkc2%bsqI1q1@O=$pGz(pNfi#glrH4a8UgyMZSKZesZM3K9P80X5S7LNADs+pWo@sJF&CJY9<#W$5ZJ?syPiryg!py1J0CxuVI#})q zSszMcb?0PZ0cMnxmM*T$N5LWm?Gyx9&;DV@2g0R_x9$L+g4PUfcTZ1qy?kjno+nQc z(LFr;5ro-ZR%w+KyYf4C0s6mt*E}Jq!ikiWbYk$0p|v$BAz{7S@fHXjAy5iXHrUt* z7<7IG<<{#cQ=Evx!fg{kBNT=j!vVUm;mqK{?-j z)(qitXhcF`Ip4v3282TX1+MZoO59lH zTDHF7*F4r@C;|F4@rtvlb8j`iMRYG60E|6{l(pblm8PI$9t`I$b*UvOJCo34v)dGpVrbr zP_>l5Wxip`l(p&$tAhjHe`tK@sYMTrWzElan`5QMpA{Yl9k>2HTjO={7V!F@KlNOL zC%A|hFb3EW`JV$qO=LT^`yGegDYFiXAs|&dN3OarrZs)3POZ#~0IYsX&Vglh; z{*hX#QBUOiNEP4Tym@T{TNcofAN6YY5H?2xCYRhhe8`xB=#SZqP%SqQnn#gw-r;}m zD*sL-GeHprQ&w)QexKm9UH5lX5)NfdAf6vg_`GVTK8dohocys@*e6cYXBP!uIFUp=RJ{ZZ$=Qif3+0Y(#{CNO9IrHtAqWlXt4V> zxQvn5It6S+TE<4?7;kfT)?wvPT8H2Km3#4l0;T5EWu;4gR^yB#AfjG7?rmKBw2|Qx zQFnK!aX5wTot2J%P_BsWp2>f~=euf`{VX_G*O2Y{Ht$wq;7FT}SXv?Z1tB>;&?XMkp~-5v(fYiM^NZXre0+x zhYgmZGgoBPH-q3vumM&_Q71@1!R1rkT5NAx2k2_PI@@vkqhBf{7XT^MZQCtVmfRdH zbUXfZ>%@ny?sxLYwhb)^G?LRS=@f=}a`84b=7ytIMBe2W5QGA@Lf|dZeh;gHq#hlQ zV2xVqJgIL=aiRJDU@(B^yM?)(_8e1kaxe~x*IMXLQD4ZLnzujjJi09iwZGs?n`Ru0 z;k)yhXaJ}W9g81Ckc)?&W(`I{ED#z>sbr)Ey!J8M4CYJ`tUJE zj#pPuUt=d@k7Q8Y@dZuU`i3?~+eQe8P!gcOmKO@BiNo(LaF_1acfD z?^dOiXj8ZtFvOv zC)Y0ul{4wYU3kVOa`L9j>IzfVFUcyQ|H!rwEO+2H(_a=`N#?4{*ECMP8ako_%==_3s4X-)E z;xp(ow~*-TI5*oD=ahZ9$j`11cU_s>O?*rONjC!I#2y)bNcQHFbkcrp9@j7r)qNE@ z+VJ0}R{i<#xXFR%C(=)7uPjsw%$C;dP1fUNt%_RW(&_Oa8;>x<7mZG) zK!EqgCpcld)M8{8jy-|4FPW>>Jb^&wKSObS0t4fD)=h^kuV*k*rJ?yt>oI8by}^>H zCege@ulgkOhsgrKW2=bM{i5JJ$}b0i6{g z6%vCPc0g-;g4Fl~TZ7FJMHVzr#LXh?GC<%3>5hsbK}jKg|K(agf07-=?&Lm zxHGMUa7s z!>-fJPu0VHH20{$>2&li*3;FJH`(4QTMskxXbbZ|E8fFy zFd6g<|1IA8=V=+YaYOL<-#7-Wz>8mb_TV)3|Bf1?_$yq>-TTKN4sWTkJktMoBDhUD zK3IdmM>T^0KPlZ3#o)+$0M4P1vH|l*fA}B7+7s-=TbAvgr~N&ei4}8hkS}f;H!akt zBP_UsRn-VTvXR_{2-^Dr7k;G8R^?woD|<`w`1JmnsCgP`+}bfV=&3(jeUa?<4*z*!pad|1XMB@QeI97>IeRa*qmX2TQ&b zmFDPc@^Gp@6jQeYmAfD*Jj*@)MK?QGYVUVLHlrF z1^!jV{F^osGp@dGDnqZw!~EG9wY?cn&2)qnI~AFA(O*}v{ZXPrm&LVMS})>nn|b!^ zF&oK!hz}AB{{=+~p`*^L*!37Q8(sYQ4P)R@jw9oz)}LZA&^0)|xq|lAaEUkvXk41p zL5^wryqz3ZE<6;xMP}Drkmzjxv_hnqQs@(_c91uu` za7Dy{;tCM4d^{nuFx)QCB1NvRgG}$3cv;px`p(o`S&<#CYPC9W`qA81ex0uUr5fdw z_PE0fuv&UzY(!Md>`W{lo9ZOHBxqHUSM(>%pZP?a`x>R29m{9TXG&-86>a8ehW#y} z8)!5x`^}pnW@>J=yA1zr5DnyE_#xU=#&OfS?&xWSjS<>WmX=L!SV$H*d4)?}zOA#I zuP9T&_M^YGQit1n%R@^mZr*B^@=Je|<959Q>z3#TRXjGtiv-R0=5FYiub}66OL)$I zAMPb$5M<{)pdFDar!+QsFt#k~&96nl1bXDla~5BvsOkH~JP4gi-fPXU1yPp(l3Wlr zo@5%kYU~f!NburRqmI(3f-iGg7_rB!9&9Mi>JRO_@=7)Spf{y=zf-<5FMl|!b?l^7IsXOW4H2-#srHM*^an`pLqX%3dWa}6Z@LYr!>OwFaPg@gF zM7=07fv7Cuw*TOY6e%-tej*Q|yY0F&c7&h%I%p6`Ok5IJO}bp(8PlW-|pGeB@QL}?Cb}RVLh)@{S15bgo0Z?NIXJ~22 z$EOtKoK>Ys(FJigbM4P8Ehj7CCl%TlTTPU>hr4dG?&^YS#js_%a0V+aSL@~|juLH@ zOm+^xXYIgTqh^LA2+bZyc=(#^bJ!3GsbzjCh;*5#SXy1ir|Vfjgp%NeV2PLmQVwv3 zl54iK=z5fm-*tfCrZbmf>ytzsEP19K9W`mam(+P;mDaJq)nG>)K=b5TBCTozL9LF~ z-mmPT`@0>-@5LfO4-hPg?#1~$sx564&BPm@hxTNzLkE^b9+sRYAQOy;1SoJ4)yjVpgI*j# zbCUi#PwiFUIkbI>6F5V)0=re9ZbpvxCv&WCZMaH|;{|PI@tvn5t9xhK#@;DuglVL% zn`UQR;NoCmrxX-h7q<}_Z!1m*nTbvrk$1)v*wg12Dolp9wFTz7h9&nw+v`-_qkV*)QkfV6LkM%^O;{Xg^IPfiX2729FT5TE`Q;m<0FJDJ@Onx!L0( zeZ;MHY5+D-Zmk?S`fE%#>{Yjz!!~EfT$KUPYF_&BZAQcelycELNq8dfYI~i4_g%ui zViq&S0Dx_tlfmg2gK4c)!J~umi358gh~Ve(cvIjO3h^;JEv8YDh{w=z>JP?-e~{g+ z$1%+weIW-TB@Nw6+t?AnyrPTSg!V`*>XEPfRg56fPyO!TR}@ejkL%M-AQt{$I^#ZK zO#N=7?t_qyx#iieGLXL3-M!*|IQ#+hs6mS#YPj`BZ)I9h-ulBxV&Kt$Q<5dLQxOhchGH)$rubKUTsU$}0b!3(uawqK@9 z)e_j6^bSV;YPjmkU1M6{TfYf!!-aymTnKk~TbOMi$*GRWcou*15nN`y+T^`LqoBa= zvuZtRxKI@c!2*Ss5kA#@1SLhs)i=Va;^n3Nfd`x64NX^`v*h;iIB1+{pfNcoaM{9C z(L$XW(B(xtD87C8(b&-+w*Dl{pZ!vt^GP^-qxH)#gPQh>4IWy zJ|rg*$0QLeJ4OIUWx$LRX9^Odj^S-~8wLVD;ont7n&J{00&XH}R(Fl*B(yH1IbPeu z;2^OMPt1Nl2h!r)-5W=k(7@qXmp43q z{23^hz+KXf?u9dLc^MoouO_{-o9^pULX|EezVxT|BQ`mhAKyDPI_KKGmosHFFA-cG z848Bp)vsR`3ZKDW&w5k%)F>D9iRrxWf;+<$Yy6UDfuukEJOEx^cuC!puxsZ?S^UZy zR-DzClQ<(zv~05~;LBq5Hu<5#2sAM=@;#DszNo#uxfryo#mT#Ld0pk;^%V30Ku<$- z(O2B#%}@BlBitZ70*I4zFTDt(B=f)cy3)gHcr*00ndsOYuRd(gJI}50G!fVU{UQB@ z{6-80A{ptBw>#uZ=OLh;1p&o75mas%r8N74*q=j4@TY}lC zdub&53R~bO2|N zO})sUo%iH$$fh^9BsLAW4q;a3?pEse1y8%H?ojCvdz{_G-&H+cPZJ4k-?i|cj`J?K ztec%pUj%TjzMQj|C}R%DjRPVe5E)w6QJy+}p;o!^IPnML_)AoJow*YY)E7W^t%TER zkn0LdUQ(Wwr-zgU5-eByR<%ZJIuBXkAdVQ`k3||x&a3wa|T~0|N#`U%h@9ur~2ED8;?-Yc8JNWtoAZ`At*}xZqCL8*; z6bQeOr}fbFp<6dlG|24vi{%OWp{)x7{jG4{ssUZyb#-P4ZM*rw-(1qjJI!15NP{AJ9?aaoXHNK?+gvoX&OcdEZ>3zWF)!U&ku8~3DLvg<{-0Dp|K7+``+ zIYx@I6OC8Ai_Ix*nghOT43RB{pw7H!PD785CUwm(`cr2#%XAfYtu8s1Cuq3L<+e0` z6+^=nj5=d?oLdKo0Z#jaRaD{ZI(6C_Sv+H^Sx3wd3fzuk)!QieNh@zWtbxAfYrxdY zY~lB~^gtd4_&-b2m7+XKwlW}ojkLRnK6stZT~gSJONe=~w%YVrfER-&%7seqgW($g z2aVhwGPG;Y+PKzXhm9$kTZ{o@u1s;7Rd5!UjwMNfewF@-ba#-ydG$O+%N+0*p z<&!!JtFC5f#`M|tfvlX>>Wu4KX$E?vvC5G&e#pQC>o8|U_3`fZIDr0(edHL?)HdK?gtyeu~Q+(ijv@N(@LZ((UIL7a7 z#mEr=#uF_%*5ahiv-mFN`uzj{oLgv+Ln{-MfWP&95pbY5wze{fbrzz+L18IiCl!23 zSbzlPc^#T+RBfvqYh>=S*WF!ew~B~CWt{V1nDf}Gym)A~C;Y%{|5yloPXc5yK+v+* z98ZYSVbc#);Mh@s|Q6JNq`b!w?8RJti1;jq`ED|l{mME6GcFX13^a^->C`T@}1=qIa-@!C6%aTs&1ykxY6*_e0(%A5G0fmeRrqwV75E z9%cO3aVi7K13A}5dU6A43IYTf=BI#in1KXQ3tA^J!BYgrc`-{u~?|;dFp8Shqxg|XE2K=(ne8cbB_t~+NJOaHo z0WmiA#{x?;_r4EWlwbB9hzk=Lj@K{18wdvcRb~H8ru>7^`L9?C zaN2PEcK(#Qg?dp{(GAa)cMLu(7KZBmw0yFh8@`dX`#RG;Q&mUz`6-3WJkMQI`CF31 zXKmB=7?Yn{je=I5X^x<&R7+!UeJ|;S&&K*lp>@AWCxmgN)X z=Ct(rmGAIR)GLeyP|F;Wmp5YDeU7@2@=Kby^H#D7^>8R7sxumzxt$Dk5@TA_ zo5mXxA4NeI4enwbW>)ETa;>;cDB*| zOomb9RgK)~HBm^2R)-zZr;QORT-zXnHl^*)Aoay1jEQ&d++kv3GQZlg^ks_I>CT?> z@qvutC`xyYTIOvERfJY#dmNmeI&*R;Eg3sP1{D|$%?pxl@a-Gd`uSKn-!PAKwEG2K zH{Rq1wHkiCh(8@{$<<=4VW>G>Sa>e{9F2$B(q@GJC1ciNvzG_vqLyii7QAp<7cZKi zaeg0;s@~E0BUA_rb6&vwW+$q`#4(IvIw zE_AQldIz0+I4SM20jZZ9ORx@EYZ!~0lvDGUVVB8EO60FZv%!Nq^t!vrXU+`V+B)4j z7=Pfdt?zX>Qdj3nfhjJ8z8#;45kFb`MO+_!m8qBAIj)9~ch_a{D|JUtyy6TA#zeTc zjV=YPp2R1dPo@}UQ{~K5y805m*{~DM9iGdPcV!W+qP~^>;J4$hMG!{JTalQL%Ij4k z8B?C6(g5$C)$;T&>B*zXHI0h}8gQ3OTT#lcAUHLh1pm$fd7Sq7!}(u>XntDchi*P? z5Q;6siTpGnft)X0Yv;4)G^^Czx-wjM{w2<<>>w`XqJ2R7v*)Kb|5?RnBNPFmmIBAr zRX#I=N&jp%qJ%)^a@NX1rGbvKmR%hrujd`nKbFfyT0f+K=4Ooyb#f(ZrG;UXp-Ix92d20726vf~WjCh-i__LZfx7|6 zBghxcbqI4tIJF?F9ur#&$JLBjO+<_E^@-kIK)zr8PsPsM@06) z5Gmd^TS56rzc};hKKR6<-xA5`;@3{s2eRfw5m6Oya}|Q3%EmI@1U$9rqw7^bRk*a& z#gtE5;xh1G|0+=!&B$1&RBkq$BmG&YhnmJ+Ds|^gozBmDg79I`0=Xre2PD)x=+#$b z^#sHXBY(<#X2-*VjNiX~&T#+zOOiikOPdyOdYbc?T-6g%#A(?{{!H*=2qB2tKDS(L zI@j|xmK{WV3syVvtC;9&4$G_N#S8i?)dMaM>8=Nmm|M&F7_S|Yrg<-{*x#xhO9Q}T zp`euqBK3sO{}OomJo*k;XhCuFj~%*$=RLVpiI`c_LS(n64eN9~FAr!5W_?c=JO;C5 zdR(j)%dZtT!aJ+et(SP`k`e@Kw%*h{K33U5UV(EDA4Q)WgAw-g$K~Fgc;XVPn4et+ zDY@+O8()Z@9DT~lR4SQ-RyYdmeiiMsOhujyEBvK5m99^9YXb2ATB-NPpUO)znxJg5 z{2@1H<7?P^5>C7nT+h9cpJwpfsy7zj58l3`+_hn%XO2FdFG3u3dW)xNN%+y0KeU#Y zQ7B9Pewvu7z`$X9A7k#`D=C#c zKfQd8J&om&Z5Gi%PPu;bFhQ)37RhP5ynpb^@VRJf`N4_VwEm%8rcU60S= zaRovwN=71>)yV0}MV48jzGV0?{!oI+)lJ{l>yv{*9(Iyw{!cuDMW)>v;YHhLh?b^H zaxc603g2F?>~#+Z!TBQ=ov2%w-c%YsLkC=c;=0L7oMK|d?g_Uvih_V$WSmO0IX9}d z@oGDDCJjxmMZt^0bhL^Pg!L=d(>14ah2MI%31K{w7E0;>zuHct9&g zvagfZHDNM;R(o{zEpt{khWdUi0dw_$@51p8r zSG^B>TBF%FnC}IU^Cs(7;UV*xppKai};p2*Qt8rk-s6(uI^0NxPXT~ zWAx`dEPbBAD}3UYPYe1Mo>1SH+s;wtuf%4HV5*MAFCL1D4}zpvj{XeeO{qAPZI%On zh0Vuxt+t;b_OZm#$TMqrnwv(wsj!Q88<3;pl5sgY$j0^cVhqiiHx{_b#Ql~T?^SG% zHWvff5gr}wOY7q>!t}OT=J~5C@kbS8cqFXP0sOahS7Tv9Vl*p3~U=sSwzp~ z!B4Bbhhg_rai)ov}pOP_Fb8@ea3l$xQW`0m(ko%^HAdx$qv>}#>624Cl@~! z7f-x2tI}jcsLD;8wP|-GUJgbHgnZ-Y*GfGKUtLaKmd_Q|@>J3JB&9*UIL6en6}(Jf zm-@5Mn=)dap3#EZ>g^iI+n_G06oC$zSVVon}DU`^hz6Sa=unP_az5kEs}$s z+OKnP=dz2%j7zy8?qSz3PUR7l2yEBhscYRsu1lC|tg7})n!1|o(}4ue!s{uC@q!!- zjYo`)3{6bbx(tP{8WOzh)5ioUwh+StZ@J#;CX+=x&oCV<;(6zIDo0b5)UIdS+`R7? zl$F&?^d&Ltp{ml>+AGG~=n5PP7AFJTh;5hDEHN)cSGfdztZ_?@eOBu8LPr6e-n!s+ z0~qXqUCrYvs-Qsw7ufm1MN4CSocHPQpJy6(D{s5>x28sOr@T1zSiE?1B--IZ^1xZ~ zH0#q#GK)0xLtcskH=d+7t+uTI{(|smglBY*1O;bAu*(;b>V7)DqNq@(H1aMgP)kY+ z>Zjp-ZjyC4++bRCph!K3;YY%X_-tT9ZEn zKhJRPa)Yn3eOevlG~&G%+xBgSCG*7oem()+t_{xmTwL|;0PK-ko$cr^!|8PGhoN8J zt^d_Vuk177<7dTx=52Q#B>}78?x%Y~;Nc4IlYoH=pdW2*_dH~jAchY8Bk>EG^Ijr) zE8%L=HBzY{DD^qVswRFJlIKKqoOql6M7^hCkl~Vgy=N`=2Z^EW_0_oFZ^m@n{3BG;2X_rJ@Mexur?PhJu5~GOHzy(t3QVkI=1x; z`r7L4$ny2m>@XqSlTAx0bZ#uL@&5u99Rjo;r_!orA-PSY!* zh7uo15#IP#8sd|1u}gDhwJv|_O7aNjENnT2v-q#Gbk@!YTarrGSt!9NK=7EB)lmKcyxd;JD+TBw5WaHH~aAjZEa65Zp#mke&fJV$;GAtUy18186?9g;FD zDxhU_lq>ctXL{PNNy7|}sun(aQkuRW{%nHvaFPYtUp!U_uPWcvnv=u>T^;Kp0DQ* zztqc(1vmk;<)qf}L+~EG0NY{9T@zXWMW6kpPWYRLcs(qBt7z!>hoXV)pIQt?!Vm7` zoA0gFOLD%OD)jH`h}D0KkIUOsOT=LJG&Q2q4w7QZzF&cHpj4|);#2W79roxW>gx&C z3Gg58C{~)fQ5xgp0cg!QP9Qo|XXJS0wDafwww!bq(3U-q@0KFL2^TN%lw)xT_qAeX zG@rL%(QxR$BXb87SczfpkKq@ zsT(>DlWaP%X*y6!ZAm_~a<@f63-R7BGyke zUF;)FIjB4)AyB9k0S^AHPR5kXo;mETDruE;(Wc;*xXdVv)nj(kZKUG|HY$0hMn;x=8IE)F=Z5_&rR2XAiyRQ2}#e;*SIkycV^ zM7kwK5NT;?0cip0E=2_q5b5rg=FlM`(%mgcr*y-A9lY`F`OQ2t&zXDY&IQgVcCEG7 zUhmhco6leCzZPCRslHbk+Y}R%L*qi#UGQue{fF(opbREfH?h^1)%`)cB$UAj&o`rc z!z(-OPHKk#E{E2H_09MZ;T?(o)r8YH5-N=)uGd+hlZP*F?wC<;ROxbN)^DG>U`3(i z!$fgp9W7%}{W7BfU%cTj{(xY{dKawNR=v@K^v@eCe9dh{WLT=XTc!h#+Lk`tFiOP$ z!z$NQL9drbOP=&fjEzowx_w2d#BZ9l{3A}Ij0{ba8<|yX1Lw|Ug;&6LQW}|RCFV)b z$IsTsS4F{Ff5foFptZ2(t8x}wX@xvoCqVs?T@|hGpvu_%xk^7Dt$E9g%+pcpUR1FD za{%qvVpcS+XVQNXkOUN6TTB6b1D)e|Njpoy*MDi0dPDJG&yXHM zZ@0<7o*@j&+nUc?-Wom1`#hBFT~7gzGKz>%kZf@yI@8SWez26SAg8V?Di#@|u5v-| zjkq_>hW>@|uxW1P+w|a$rQJ^W@bL;+{_-A6rzAIBC#xE(JS+g`Zo$Of3 zu&`^d7PbXwELtWD>2S}vvKn%$P@S$%h=>b;97O#Vhn}_5#4}>8ymXKVDDhFOdCF;M zT3JK(P)M`;rOhyjx{mCl$0CcaBXF-sGxcs;qWBARDUj?H*ne3i%FexTH1IoRiw)yq(UNV!h57!wVUa^k zAELNK(pg|flPoiR!iAi94_Pg+1B9(;(qyPzBdicsD~jN*G$cPC{#i}M{SeLeL4pFq zk-0|chjr}d2}(=-*GcS18W#_IT$K##-f&9X6<6(u53tb-%a7{$%l8m*IW8dMHWc{y*fIs@aXf&qqCR+ zwKp^euR4Iu6=?)7vu>W{pr)csc!ldjzq!wGK{@WM0j|V*Rq;l(A%r^GXt z$@-L~Y4~O?b>%4Xjxf05(IOY?rwQ}kcRjy#2loYo$1PdnC@U)l(MLk-R z-H9)1RAVUxHITdEy>pchu@={v`?Br2s)t9zq_=e(%a1v#DeGus(U_>g7|Z$%JFkW3 z;&fEvacA{$z`^N5C5D}5hC#-m3)_QX%GiLY zNLixt)Wa=BNC&+L0RVUXI4-FWi^PQ6>FX``;PBuY`dsqX1FPMv9i5KLLK&uq@7)Abx_=av2I(ih2ZF2paD zMtNbKZGIE-f^3p7lW%(Jl2>BtcI7X0m3!=3()kYGaFwiXE6tlm#YjkyUXBrvuTfX7 zpyIv}Q#EuV0?lU<>#a_&e)Wu?mFMN*UTKMx>s5OyMkB5tlrMC++-qw#xSxru+%Wm8 zw*mu?ed&w~bGxoM1ts{A`@Bc}N;3<21IGL}lpbOvX>u);Udr(_|0ZX5f_}Yv-$c3B zIKo6Zdp}1*ZEKR_LGxqXIFW+nm$#Z(cjPu$&8HMJpZG6^)W4&2xGdMd#*1C-cwR}C zDm25*uWj8KspT$KM?o)tpxJbF+L9eZK13`np)lfHdcXgXiCDyxAO)rI34Q=hZ%oVdE1Eb0Ufe`ZqTC**5r6XlofuOh%fo zdMs|XyWW=R&U0VLIyCX%D4aG){b45_qxtdLuGBcc;1+eYy7(tDq=5%+vSoVNYCzA% zkfQcXV>rCOt^dHX_R88&6Rw_Rcm#&#jIDo&Gr|}Yw6fmELqiR>l((+r zhz=o$|Fw3`n{YvT>iy1nt%rXn&OXq!c8Y>SHbITKZB~K+urX}h(N*#nA!{Qux+uW# z)y$7#P{|6Wq5_?U#&eu!5htDV`uH&NnRl~58`fcpTF)Y)q&c4<1drx6@y}vObr3}# zccT>1sQXBvA3Z_{J}9of@e!_eDirwRYL+EUXWc{2cvBgEFdK)-@W)LN)f~#U-aNUi%Ju+y zP6iY`h>{49*2UFzHoB4zSs5(pnK(YZYAS{1o4K0B8+|DBM9_!I@cOA6ZcTvdXsUOa z_Yj7+UM>=`Jt{WGgPuRnb)h7zBy1rZb?k@ArtaA6NJy79+0X8@zJ%C{8*XJNvAIs* zNva07(|8LF`F9%^m;D|w0`%~^-`{;1egSdPHNOh^$8pRhPL~iJ&MpVb#h|yZ(~$~e zdeEJh=3ZSbV8vB^yq^itY1%DmwrNWTK=AKYmbEn#cz0w;#;x_1_6 zfc`dps=`0q9yVtuzG}!5wHGITf7JcVLP%`v35eVsoty}7-n?}F6^QOZh*AamLlAxU zS{oW$fUv)>`t5b%jEctr#MxTan(`CuF=m|QJ?4&yOG`^2dsQ#97As|9Mtv-R>UZnb zEe;NjyXZ2NEUc``S*jw!XFYxZC#1ugi(Y19FRLyb1GMdNadB`55KPMjfgTb*!lxg} zSpLdJvU#9#ijukr3y+PBNfmV$7_@7aS*N?597`368YVeXMCj!0`f`pv;jQYfuQwRT z)dq3Ck&zKTgSyxI_i{3x2!gk)qzcl-giL~20A~C8JpAItivSR`w6tt$ZFK=Q!ONGq z;ze3dAc5hzLTJRfQ`%*mxUDKBM?@TlaW;& ztd#3Dha7Eo%V3c5^V+RMd3c;DC`?Lv4&q^rGHXzCV_iD$=jR7^D|0)k(vb&Et%CJZ ziNl6IAPzl+;(dIYL9;0&?CtFhvQ+qc7G~yf7QH5TNgTSnJD0fKj-8pxFM{0Lm}Tt+%v2K1Qb+tbd3(^wlfhBt&f1zbo_C#&NWme{g2KM$=jU%N_S-G? z`Ep+Y9X-fJpX=%M&i;He#1>k8FKUl#<4obZ$;G+MoT?#UFVzE20Cev_vbzmq1XQn0 z>{rQ-A!(yCny`Q8WDQT=~#`M&b^;}2%0s~M8fe0CXU(00+SAGC=*YQu%)R%wy3ltR= zDlpt)dG4k2bKH3YUEa{tv{k?Q6Xo#Wpq_yNYcGx)A;_-fJ}<&!;y{--O|NTKIw~H> z0@FdBA+*|lO=wN`b4y4|f9GZ?IU$`| zVPHoW!GE_@o!@RnooonJ8WWSrmiq8u%{$dW|6AKaR{!~Js>+c^T#@j7MZHmxi|o?BkyU8LY31_w$4=`IW}bi zXdos%FgA7^Y0{s;b2JlazCfa=c(AjotYBqnsq^Z&&J*W&=wH8d_0XJ2pix*^SxqO0 zgoLz&oFWi>y_GZ$NbMO1bSa2>aWEqWGL( zN*8r$!uaAp{8bA=fA~8VYEfh31)!MTRm|KB{QTk%7=z}<-l55fj%#oz;D8QgVL5?S ze`D=u{c6R=bxghI&s$>n%0p`3IT~JfVY8U|*@D(__x((*Nq@G@%E(|tL&Ny^NuUhB zY@s#F!d5Y0D|OY>hJnyP%I|D{e7I*iT(q^bL!B@(GU9f;_bxs0rqL#}m?c+~vF-uL z>xCXZ^!93=cKua@22>2dQ2YQPJ8plk@8R2tjXf>5BX-QX@86Ht>PdV1(wmjnU>X3V zNLpB5UmxD-7~WSyLnH9hr@N+C#@}UZ4tcUsQ;W*(0SF{LT~$^#U^2rHrl+Jf&>9oc z->N8gX6wQl^7JVH34(b-mxho&DxH7x8%kXBi;F|WlO-o&-Hsb^A|fMM`)%J}t%Bar z!chK+ms3WG@c{bqTiHUx`}gmQC(AK1GLjkhW+;k@i6!H;mO0~w zhJ@T*I0OSN84U~skzbNN`npvZi^B;HCL)b6LBgt4ZGBSm@|YLL+pL>3>hC%n?gE{J zdn2Nhgp3Tp3(j-GIM~>U>f`P0{(C0Cevretev`@FK^vxR1rSL@l8J5RIv#ZDC*EA$ zhF*d$y-y-bALI|j*mv``1eUEc!upCa! z&Zh24?*fYk1p1;|Cu-Xp8z91$We8_8!eXVE1FN{Vw+HwcF$oFs1X$6^N=p?9dwLIgZRGn#*9?TA%Jw0#14J;-rj-<0YtrQ&B`*{<)Ms#%yU&3 zgoz}HZ3e9obrnEze8Lt+2gW%)H}^^D!!^JjgyHMy>0x4Gwk*OI*zbObbWe=5| z<=U{AIcMk(YzvyIr#E4RG8-F3$t+{k5s?P0j2la0g_NYEvBRa0nVFfB zD;vO-4aR5gZ@GK;R;5569dmkmT0~S-`TYb^tM2;s>%1iX4o*k=HZvl&wzkgB&Qf41 zv~_!QNm>Dfw=!5T3t$rALY=zzu!g~kMo37Qnw%Uh;KmPl)$k%mAmB)bPQV1KE-4x4 z>$8PrsYNClFd9i>feKi5nwn#2vWa*=T8@fpu|}s#0OSvo8@=gJp|-em4Da5cK`hXR zj&^_gD{>~3Jkn0l{k6S4V_6ETIwuoT81x#zRJ;v=1P&M8SYOBTsq5_Q1j;i!dA2nn z4yvQ0_A=gq${4kt!g7@b|H6uEV`HONFc=>n-{SmGxCg~MlxrNf1e|z9W##I4H7OR> z*C23zG-PCQQuZau=Kx^gJ-5*=&<+IN@oG0CqxtFP(CDZr2#S;Ay4C{rNEaw|QEQ}!`>j9`DMW?X7zP_!6i^E^v3HNkS-#2`YGN(}os(d%S zJYW*Lx>V{jb)=zeXFD15%}WbeT_Yh)fV=&?OOW0?ZrQu%eK)=5G7A}*kb_ZgFBm&q zEv=@YR8HeBoFCSo-4@Zdwl1{nC!e181@9v2;vgw=geB``DQ4_Yb##)ELfXp;KwA>;ub;R=F+x?b~1AGcaJ^l_bV#HuB`~+rr}F z33L|tP2Jtyx-(VR(a{x2Y~pX+ym|A+jVCs{G3n_juIUwG}sGnn*{|}a3mxo7(z{6ychxcVQ;1~++V75GVN3K z^et9>nw7UGj^vOSWiSBTJv}`S4>#I@DGUxVU|_qtSZ?1IWS)8SW}N9aA4MTFM+Y23 zLQb;}MBH)|z(7fefA~(b+I6(EGp{QpV&x#0a%8f@x=Kwsr}D5%2xWspu9oMrCx?#H zGdvFAa8X!yEHX4EYCF_V{nHR_>=U1PQtVi4=qd~T$7-k9=!?$JqNGwCe5+{e&v`8? z3pGlHMacHZ2X&MkgI$oo=Mp9JpVW7Slb z;Kw~ASc2ZdDcgauS=4O3f&H7xR1{dBN`$1tZeKuHs~*8oC2Y3bugu$~F6;Lt?A76G zIj_~>NBAX(I62Nr_B3YgrCW5Zv&5hB{OW&|;$=sB(cCqZ*j)+5UIB5-hZh>Wk6Nr& z*p&h&7Oq6?5xiq1MZ}$nP=W0-uHA7Cu+?=INBix*U&$7%24H=6ZoyzDXLzjIqntMa zSA|oi>l`kJZfgyf2r^{7oKMVW?(g{UkDEx`FR(o_hNP;p=eEz3uva_fPl8gf9y$AZ z^&l5lt~*{WyOnA5vk3?qoOWy1cbx@%w(`5*^p{ZPU-7mi9==|=)oKdx&ib@OpK{UI zA-`BP(_He3AC?J@@2VBnJU$xsI>?QO%LosDSDm-$J>L3&dVFn2!di&0C=q-DnO z(XCTASPrfePUr>WwXU(ti9IGz@E6_yKD>OYw4Nr$EtVuKi&YiPkN0q1>~Jq z+jIA4%jp~j?T%F~W^-^ahS&n0+7O$myJb+K)Gw(px8i9vVB1$lv`T52HFd}P6$&$6 zv68CJgky9he47c%%y0`Zs*g_*#<=QrWufSS6`7SdH7%u*W>@}Sv^lwx<0C~vbVKfQ zK7HoSP75MkwLsZ~v~ld5i$E@4L_T|Msa5!sw91)^%cs?Xl&jY1t0=)0CHj{$#geei zLJ*`(6t{&kYPlY4o8rDb+j`DNZ)OSpnHIY$VaeK_eG?_-XQ-s6(MK!-sN*cTRaI4d zyf3zlVcOuc-ZKWSS9>ShosmN?`9Se3X!UhAAJ5kO{heH7?Pmj<&R@ddLTmyr7F?N} zbRnoF0K0vls!a?GJk5LhLRYuD*nAw;3)tAxEqtr1qgN|h0t)9nJd-c(XTWe}+RB3~ z5XoVB<=V9m-rkr#bzp13jvvCtHY+QOkVt?n2E=5#JD3>$=64&LyEu{BSLla#dy}!% zHr|}JWscGg%Qq;y5fMlgW-^InT2vthJZK>jiF@fh_>i!E?Ez(vi%7cnetWFIYJbjn z?K=!yN?KZ4;5?!HY=8-~T1*O)p;&>i_ly;AYg6EYiV5JTKv9Idrgu3x?Z7j#TOCsj zqM)KO8!1`)QS%yDa5h#}CSUFd)&c|u=rFXQQ&ge_+`Ls)GdcGn5p9QfnGjn-Ji*oH z%S?PLY5r|?1EM7+Wbb`_k6;g(k+lIgXRSu!;o(6n3ggNZ0*)6qF@r-xfq)OdG#{@J zdbRI|eG%c|rehU*z}&L6v5^uN&nQfX?KD$0vZ|^ILITdiRPqtd>R=CNTY z9m_uoux>AiyC|xvIy_N?t@5*Hw>KQY4Tb$un-?bebyEHpK&*YLs-%R6gY&ItFju>_ ze6@2~WpiKsoMXTLmO0g} zdVAwlhkyXRYs}bmt2vN)d{aPm&$XdO6$MS^t)DUQe^h!6PR zWG4IV83%{(yB?NQ98N!103*|KHU7!m;$pKTIyN?TU{sVk(l0nTxV*f4-Bfg1YHxQ} z!I$Fh-KN^wL~YSCThk*ZFX&yuLA<@ly*`@qA$5MD#XKAq7D39et4kEr3K|fD zz{kbif(fRdcJFoj?CLqc&H;_SeU2WlTL1es3xZ4h`fFbIu^P&JarAmJ^6xdxLo z{7l7_Fsa;t6kCOZ3w{3PC*V6AFHT}s5!)EwCyCl?RmJD2WUzHK9u=v{dZ%5kv=3o} zt;7&65mxs$)biBx1ps)b4o4w$-+u?P$zT5e1Y~R2JwtF2W|N@ATbNFB)2-yC>e+)_ z5iLt!?m_N=<0$bao^^MZ1dV_|^{-#Qa&wuqw6vt^*WW|<#&o>OWw^)`JoH}cds5n> z_BGSrnl_l{-A#3~3MX#eQ3R*m6v7RE#HiVceTMR$-ZLIq^^$gFeZ41<;HP}*W;&|<b?;1XB=q-L0*ejq4e@VUN-c`EI>JS>+mkYR7(=(rF)nxKM;69XP) zluM?QOC~^^V|tcTfk(8)`26V?{};UVwFg0F!e=h1je!z@J*Q|&!o7$GBZA-u_kxB? zfilU(C;tP85b>TUju|E^;)}_h&KE&nTc|<94ub9%%bk@VV}HSLA~JadIxEQ%>$veQ z0nmc@E9K+RWiCIooiXn@pMY>OL1mXJ{wJ0pWVIUy`Fxw9ccR5}g(4O>R z1tgc}j4vsIfc-$!RY&{^3pP|UG0(MR$9o4LiXnxXvuW4BJ3*w8Q$(PLIxkW{H51;{ z1+vgFY3?hd!d4Af-gga%}emPdxzqL<>XqwIQ|-$z3Z^|5YIb&rm3*;2hCFBsZ; z;wwvNa;8M6FdhE0QaXZH6VVHvT?f%$06KR(GzPBOgAp=9TCInv(jg^fQA`b8(_=~Yl1<5$OWSRPS=hbs2- zq9GA1{X^t`Vm3tK=pp*$V#Nd?*A%@ZHo7M>89s?omNs?*)xIiRDp>^n3;NV5(!-e^ zD{E~f%kM{-Qdsu@$AfY*)h*}!e%`=LwL{9|85~R{NJ5T=r*2siYwQSI1%4rP$Bdo#~U&d}pqm8|= zg2F`u1w7#*ps%&t$>B#j{~9+(4)cso41Chu!8nnj3TBc3Rkqc%+li=fjfufZ#Pb4^ zPpw;8cq~4E(aSN5a{JNBaiCwSxoaj4{U>GBfCxqnJ}8p~wX<~FR{-o2`~V3Ei?KbE z3o?dte8=ZfDA*4xfZ|>1ryv&t5_8O3#kS`8?q?Lv8t@Ae=W&hj`xIN+zoH|Y5pQnx zO8bxSJujE>%Tk~3OC6VV2TcR$Tz(}=KLkoKMZBDM2i+zyHN!^qT5v#Cb7i&LrNQ-e znHHBXg&=;!3IICZMK_!0zm!Em7kMXP{nM)gx<}OG_veS~K7o)NaPC0g^S-LZc0`Q; zB^-MF#3K6Bs>QE%@;RpJH{2|8vXsp^-tF)*v+QltGH4B$&y%)9^$O$Q7EUk7SgF}8 zna@l$;=TuVgPd9H-4^kpMWSav2PFzb&pgqBvA)pur7IiC8HDy%+kl^~F?P7!^tSW` zL(a}8EtROy>gvwaBE9;NZKy_Nuwd{WIc0fk<7nc`{=trNZ*k<@pask9d`CB6jz) zSgntk^KFPrS9E}YpXzzM@nf#t(&&wc$fE7*@l7J+lkvl9YW{<^rva7+3Q*4gsPU5t zyfHBz-WqED6B|R##gkW5hjz#V=E!^{0>I zeVKzp74?D9>i)Zo?5DE(Q1PZr4bF+KGxBKur;}MX2q|hOVW%}6IWH{SYa@iI!h@Sc z%_Q^ci1TfNj^q|1;`AU`(Dy%s0uMR;KmRR%*CQ73Yak1k%&?~m4ib$WNl_0-r6wb@ zkj(~0Sg1A>7>M;Gk`DqVKUJtw+DaH=;mRpLs)zE9{dgduD4!_fi{3m^%&o9;25SDv zea2N^b^Ol;FCGY!m{)AD9WEW!5?W%dAQuKg0cidFhrV& ztE$kPUpxH>51oGN)MfO8dxKOAEtOS9nBCGqrMc*0(P7ZEJ{i>mz>jghUq`?XgOOI# zv&v-=4!d#(TIOQc6N>FaEf@f{(Hqh03{aisc-55b)A;ev?o7hj`k1{yFF;a0tmdES zuz4!<>zK#QMKL0j@A))li|Kg+k5i2klF4kD!*nN5{0*KNKkK%DBE!V5^O`?Hn5?pt zzs{^Xr`+2K&sQ&f*;gYuT?|rbo-HB`BauIq*0*2^@ztU|Idl3>|I11E(+z>sZutBY zRSW+A{|7YwtIT;s#L#+GO2qXCTOT%G)*x~KYUOI@#0O=F9G$OApzvRUzN+d8^V)U& zaK=vX%%lOy-eV1_Tj1eQUPC?izaaj78)+&d)NvFTn)@Mp$hdP76$NrHy4CeatNV^& z%j}(TN?c*r{*8#h08?^K#kCOiW1+UAz&e)hwFj2GMSi4l45wKV1RPnq&BPhoKQ*M~ z&Y+tnjt`dQkqX_;$@QGl9roGGvM9$YywD~B58xXR80GcUlj?qlRBMTdmF=^Tl!qOd zvD>Vs2Hq;M5x&F|7ecWmpuQ!;+)5l&v&=&QX*=``Q8=-Vn@}NB_#PhQDR7=j22V>E zq-y}EW;Nn@pz~r9fNBlpxHjVJK(&I(3|YJ*fMR61Jov6Fg|<#gf9P;;0 zEZ+zhkgSJ|7i|h}EhpH_dUWql@mM=ObP_s?iZmh6Ukl5dG)zOt3*_??m!IA{EEn{F zD6J~TwTov*`#z*{W++V}ANIRBUEL9SLmTdX1n->ROcqcr3c4xatpn_xJLo3PJWzlc~I! zIxNP9YKz4S1MMh;lnf&yCDnAB(ah|&UYeV${zy|+^HR>qZp}Y?s%Bh zqMkN_7}d}zk=`j^Itq6ZTEFRF$^tugT~4>wF2{14v>A6!fVZmj_K`(#akV+m^Xu}1 zUt6i4FQS3ZlLHs^ruiE{G@v>=uXQ7!p{eFu6)`4M>m0D(R=Agy&W|Y)I}}1}HEgn^ z6Pq4O8`3nLuPlTce$lxo_PEkWZn@o5>Rq?{N5SF=IN`=X#3@=Ahz6OQ&?fba*JYjb zfa3wogws3mk#BIEVLizlG0*pK$OMZRR_uK4=g3qV{BwULGh|;Tn{{lkoA&{Irq5?00xPa9 z^vl0bU!Zt`2&Z|p1YMJs108Kny1pXN80$@|0`V;Fur54;`dYwjX0zHR*x$0q7Sy8L zqMP`odGc{>pG2Dc;j5XesHa3AvJxV_^`X3KZ?UA6SHn==ZN3K#Sx?xO8>aIQ3%MXJ zc#q>9?ZhG>_kq9e_u@wvOfQcg*-Rp9flAb8ZIGC<58O3?e#(hCXt>G~+QQ?dGi$qV z|IzD!MnttiDyk?PPHyStnnhFq2?bDs&`5kOB^o>_h@f^ol<*H>~_L7Ymrpy zV`pY5d058>doukWuhdmBx%!8pcxJdyamTleDG~jV(r75$qgy;7{Cs7yb!I(5`bB_Z z(8i|lMWt;#24a)+)&E8Mjm!A^Y2WFK&Z3O;=sy~5 zu(y|zQ>6g5iP?B!ZCKq~jy7S6Q+#jtQSAu|^aZhW@MJ<(O*4}DBbx~~>uToWvY;UE z_-J)REk3~@S2=oE%GbZ2)ZEY36JK4rrwlLW;_n50ZE^8TAw^lZVg6qnlcD6Z54+9G z-Nu)fS2I<<>)6C>JJ74yE^rT_svdnBT7-x@6zu{}KfSC09OAm1lvlvF1KtT8ZlCMv z;J(>*s44`eDbQ+wFvviWqQUR|gKkk-%CS`3_tH>V zvDXhtNvXZg;{@Y`9{^0-D_*+?pGqJTI`*BW*$`3m&< zq8v*pf7a^bU3J*}s(RM`v`yX)c5#p8( zo9U)`2DUf?GOW3|i%k1`J09J*R{_BNpE>jU#XbfndOQU>W|VJ;80i{d54A;59_H{_ zf$uCrk=1IOEaBCrr}#AV*3bdEcXe*+k(a~UK50eeO@d;I>9+8XSrQ2{W=ON~?vdm~ zq$sE4%M=S#9|AsM$-jD{YLjkZvLn05h_Y*V z7R=jA*HbzGKo_%dV$gHNuATiNYJM0-!&m`=IBvma@t=@*mt@|Y3L0|=DiKwYV3ux% zzuc_MW==hMhSdu3mV8|^Cva<6w^&77N)LlYdoEp*a$+26xpsg4-+6=f^bSlqyf2&DkVhwOOnA*pz z@?hThsCZ%eGJ$sk@&so(z7^`+hJ@lg=daM*0gR|71s?@jqLNuJ!!p|*_VrMFsEIwq zNp>TAz~=gI9K0yLKBd7(b;iD6yxA!_rjNEsoW*!LCF;VuC)qjHVeww!zUxiq7syyG zv>7(aZj*PGAbH+Vr|B`iRr1TJK-y9yeEmFChSp(!^O}O#QfakUL$;M5QUbk24N1Ij z>o}!1NQ0!dM9Vqj*#8@&4U^ zf`7I5b6y!gkV;+$3SO|IEs#!W<4h79};Q}?|aU)TJ@BJQ&yU1WFh@k1G}7K&|gTNoMEm%)h7u`E%TxaIGy}@ z8|CQ)oUrL%o9i+B5DB`nd+46v*YRy3wteg+DOYrzKDGLgR#0@8%gX)i7!UEY?yA5; z=U9BE|Juaz!dLHMGCEwhJy}yoqtN zFFcT)13>)dX_m|)jklk9fw1<*X8i1bHMHvW1`Da&YnTwak&Os71u$7y%VWwjJ(w5I zd6@l_j^dK-W_Dj;WoPfTz7!1UhLclhHj5*r1>h`v-)uEKaQ@X~^P|BAA2&w>U}F?- z{x@t4BGND6CeA=sXwN_3)52yhS`UC5~#!49lvpr?VbK=Wga*RBjgQlhfqB>3Sj z*uS&r7j)v=^Eze??_cS-iOPszW@`Et*mcfiX~YdltrPYVIT3dTeing2rVE~$``}$Z1C^6zwugya6}LtIBo2A8A6Tj zpC3IXUllft@AwFPf(>Pz$!Dk9SM9P=gcMLG&+JsOpqeC1)=byEZA%q?xMd?qc%Gi{ zahQ=-;s7f99f9~E04Wc!r@6{PVsih+?-+rgm14-rJ?q)fa18UHX@P=8BBp11B#Ne` zjjCU*otU1~P9=XAxtPsqH*W|?=lQ2`d8cnv)GXsGQReV<(CtNX3;AYyd+giTMYcWN zfaKR5AXqnsN^_@nWVpBITd?cN`^wd4P-dRRJkUJ)g)O}{eXa=W+EDyrbp_|I?#gz6 z<7w@cZ;FIKHIa9ntECuG(!zEapB-LKQIlQKt{1T{os?+x!_26TK|uSU9RnJo`meOVv9$3Z>Qv;Di0 zwjK*=s{}Dx{OKel;bBUi%P^F!Y|AF9pSTQR9|E4or`@QUJw(M2QzZ+)P+^?kTz%o^ z6Jy-e{I5E&`Xs&XEg!h<@@x#Xq}Rsp@q=;7o&Uj@Z^@KrE-DN)ggIjP=&~r8*p@2| zS@WI>_gK}ut>_u}s|59bua44sI>uD9;O=;lx~rU15GaP-dNDv`y>|EC%G9=Tl!xTv z@%X3w1Gn@|xOa|5GZ-7W{m(%5uSB|_O2a_m8%7xGL>}WC?$uXShSpLGd2O9UTTFc{a>)vlNAvaF{R})y43hnK&uk_ zgwR866veH>MN#c*lf)GkQcdavW3{hOqo|C%j67a@Xl8VIHM6bsNvp~pH>rs^NU=YV zI0f*GDmzJp!4X`cr%?cKXyBM6!#v#WPxPo>CTy}M@oz0$!CE-=5$5=_OfQ?{fyY+U zjg1*lLQ}ku@Smuiupj%2_8F`zZ5k*+7eS_zcIz~h)R(DC(AN->>sG**LfIlbp61ah z^fT-~1RNO)pdRQ}yMcKLMpTwmFZ73h<9$KKXhW-KU%GdbCT+bgzcE|e}Y>Z9~P2@K_lPyjoPNvEd*vbIq zG?jRImFOC(s1EBFEtM~`2f^d_w}_VEl&9BbkjSjwTY^VgU3EDGQj89qb2UnnF2^Wg zAWus}8J4R?lq@_&r`JaOj#wIA%;2zqsq_b~5lOEN5$Uqi_kd*%?~h2==t|6GgJhyet$+adbOFF1JW}_}7}5En zuF9?)Oh|$C(z@Ubmf-a1$isv6_vxG!d*0sR zNUa!T_aD4zNf>S@L=3Qd3{9Q_I7oohr}(N$)WT{X^HKbCLqIF!d*hB7pENH+Swp|d<5ZgeeBMw0T&lpe#88eWn>ObPub zV0agrA$hRyX8=Qv%5XCpW98Gm3Ol!GD*A5LA`l3)iCrD8WH}nsBP&%L&W3Af92xP` z+SJ@@Y`r#&WH~a+m83!%57(LZiL<*hxlhbHx&KU|t1cB1;VV1Nfm;|5j^0BsIVD*OdR_bV(|qx(|KcFSRrM8rUthGHob zCT*96se~0YquliC(z-^LiXP5}0fW{LwYzxA5el4fW;Gf{TCQ>(YguX4@)~o3;bIp& zr`e?@3!)goXND;&{%InoHds}L!5_Hc+bw>Rvql&3Y>6LL&+@;#k2%VhJ5v)7>qTR~ zESZu3yZehF+lp6@@J%5)IR~c;TUte|bTdj2Ud(?*L=M@Hc|Orc^|agFn^Nj!{i3XY z2ig7?<$Fe}pvWfE-rm{j!TwLwUf;HXDCOHf$Lr?c^tX1-$*t^Bez`3j%Gh3e(5BUGCj8`%zsMwaN?sD-!e~Cyy_UK>;+D^r^Mp*CD3mH;ZlEmdk>ZJdYC%o{v#IOG)^K9G_}s%>!X-f~o#zk-6HfR5|b1bUMi)9>n8rC*Q81=yF;;;DYy{b2lQUv;~H+J15} z*aZ4j^|Q}xl5!9q!Z9_~_Oe$)PfhLjg46_8TanX1<3vJdka=f7EOl9#l62NbRQ%o41owbiljy2+!)m`vjL& z&i;b^K8Vg;{M73+EVq3)uAeVS2FY;D7Qdhp%!`2(^#d8%HhM6O^o+C(&i2v9hLdvA zdOZ(zE}k27+#xRBlU}V;dvSlW$rZ;@ua+m3pIG${gLX(6TFb{EJlLO8C%{|*qnAFL zc0c=SD<*1ob=Z=bpiLK_4G1q`D%Or<7F@;{-p3WTVJbaM)!?TU7_$~ebQk?LM3BAJU*UGKk?j%~X>Y*DXxL#0H? z$nvvodFsk0aXGqjkHww@Vgzm!c7Ztx;R>-lzz5hWo%$aU(do^~3*N@F^4Tirr z@QR;L|KOWQ48?s^xdTSnM1avA4_|*|Xjw(w-uRn?iz>lAQEYV)US#d`uk)}%^|+wG$xzwoZYYFNP&E(W1S7`?Hz3(%+}~BXB3S$zL?36e ziE%0SXNp|-2eK>PPsTrpy9*WkpyXuOmyVW`N-5#>VK-+-s5vvh>ei>w@GE>jz2Ia3 zGL5_jcB0i8cC-q`nDNQGzfJ%9;c1d3ig+D=!Uc)Rg>;$z3S>&Ig#U{*}5wG{`H4#DK=>_3OH0 z@;fVSZhW1<(+~IIY@Al<t^*ZTGoz#xO>TX5I~h**F?=4*6Im=}x#05SxsrHwSDv`!sA)WDj< zVRM?=!LjStYX+Sp{5;MpdgJ9zJ7(0G z(h+{sTbq50-pP~s`7CEr^V%ISmNcJ2MOO)l?&7{0r6g&d`y%?HW93dciHf2?JKBnS zJxhk#7aM{I8(dZSi2h#@Gr07EF24_BP2XJ>jig2W3PJGt5DuJ9S8UqHqt;maH`(M* z8{?0I^53uDN~enc6|GnD@3UNZ()-B{>@M-5w7*sKh`8mC0i3_T%a-DRtUlX=|NF9r z`(0uU4S(G44>q-6DqBtpV_|h4CV!g#>5PR2M3viudTp(l^O=8tA1ukge>rYmf|@;t z1T*e;HV5lR?AL9laf167q_pl|7IR=dbv2;w=|BDgF?0^qh3@*dsk3=GC*xz$Xu^ssZbWVuZ@zU=JTI&8e!{MMAa?NScu35)O{qr)i+L>N=1Mp!p-_}=4ju9 zYjz@Pweek5tkq1^EtMNEJl}VDjp>>SCle4WyAjg9f)_zNf6#FrL31Uv3dcLM+85^m z%(KV!Pi!i?h| zcNy^nhrxvwE50It495L4qUSm^Byh<%8uk6N1lLhFgv(U99TpoW-TR<{K+p^>3RE8J zO@=hT&2BRyP7G}c-Hr2RIbabK4zKX4r$X=4rPJE@1$XY-_tc$&>OmEF5l6w zZ@2B>8aXi!zsxG?`?GfS$U`MUZ79Rk_o=(@&?DV!W}P#4=PzO++=-}O#NMzX`#zxf zh2oy7O<#HgpJkzWw&~kElQXUlp}B71hT@vg6+XImyPB;!AwMCzdTXeypF?Y?qNIKD z4F5S2{Z++;yTuec+C{%UHqkX-6~Fr7si2rh<8uD++rj?j>g}|qzNc=LqSO2JL3H=% zfLq8adB!d%vz1+^D-9>OE4gEVLw(>S$H7vZ?Q3TZmuZH$!i2)K&yV{wri?cFsB&z2 zxcq(*%35W0p5T82O#$%sH-M{{bhqC^lrALc_yXjW>6FVbYRT=+#vS& zp5SqU#DZRSzijlQQBoTR?N2e+|KG5Re&qx zRS_g!cj)90<4nfYU16=&O*e~cDSWZ7BJ$0&OFMJ0_0ScBd)pVAbDSDOIGk(@Bh(e! z0g*9uE&LkVVHpUv^Am*2>PM}sR!qvMrdv5Ny6kU;w@r`hgx}|TZaUdP(WcC``y=hr zOOk=(#e?pv-L{1z_SDwb4Gal+--waZ^?M8FcK^3~lVM4ncSRVGSUakr1p28$0a)g!?_zz;GG)cJYGlAAUr6|DJH428>>|>zf*{9mOu0cY;h&e(~GI zxpH^02~^HHK^o{!mE)_n<~kokVfhVRmVwK-53mJwg~r*hVK04CZ#X|C2m|o1 z2fudyUmsj?&AaajLrU>KH794@nKXRVt2`6p@@M&{5&sF}AKvrp*8!m65D@X7BzLA~ zWDFG=b@ywzMT62MJX}RZWqy8s5Eu#p_m^75SFoap%}y|_3y?AnAQBVWJI&CU0RBS0El?gpC# z!W&LuVCSgV)bAnPva`cR2(dI;748{}eH4m2eKs9F6)hqXu=;~vSRG(KAjy?MRm%Er zcnhlo>!7E|#z}Ew;&;4xCALUoA0s+c&Ck!L_nZiH=qnwk1p`N9TsQSiT3SPtkkGMX zWdBXyab!+mtb&4qGqr3w7HQ?f++6nLWNCkF8)G!W_$?CsQ={ zA37vPyZpu^H!L(XJ!pCL`$0&wzh8Kr4aXl59~2Z+U0uEC+n%Yzz?<3I+m8oTR8(MI z1AAcc4KOHt^%w4I<#rCt;nkN3W>!`O7N$*fw6x`H`kGzaMF@dnyK(VkCr1!|i;xsx z4;Yd&(cm@wJUk#7Gc`BArpw2QB7S^LK{P5Wn?{7~4kRWYdDkdS31E3tZL=qAb#dy4+obwA=JFrzrle`|FtANF1 ze6|Au4@f|L#YN@imn%k70LMTBic-2Ck1N<9#nO>ZCY9cS%hH~uXZ`NxhgX*#J7Uza zyOAa)W%tG@o9oNksYN&8NdO{%HipOn%o_@YVqjn(=hA1isam7V^7MVq?5|{>i|VYc zO&03CR1gjXh1?ZO%l-%3wjUJ~Y^P8_!RZEy&hhQkkiAFBd>M|yAAxfLyLI8h1+X+x zLwYiZ>46-hyj}oWMOdMchlnpu9yIkeHDFol6U1DT`~zxmeBGR?N~hDUb30QMJ?ucW z3Ot%`y&Q%&ZEf+;4uafnibh0oxPAWw5kIoyD4(Dp4m5h;?Lcz5rWdXyB`po?k8I-R zurOdi1c8^)U4W`QnUa!HnV<@nD%&s%urEN#0eXrB$I!?~S1H<%l)D0d;P3BGnM#&L zM|0P-wVjcdN1hLb%LeG9<7dtQe?%{f%(aaX23Y~ZUUZ1!A1QGTmOu)D_(gIXu6HG` zj(|89Qj#OS5}XafIBZ|6-J{%GC$NWm_ZA_>vYfBekw&BWEspKy<;@`atqduzjiu!x z3sV3H1wlYcN-71n)C+R8@ys_75iM}}2JSDktgLuTO6J0``j>>W_MqoI=gR*iA|e9F z4pC7ExY-W5zqeOWMa93j*tV#s2(-9$b#=fXf_DXGT)1f{n)AC(dGL=m>|_V_H5*)n z$Kb{nyzffeI6FIM>VPn~uM|Gyrsy#O^6Om09D|YXGdc(04yRsi2<;$=oGB{{Wn&VMaWVa~4Vzu8GRR>pyb~)aksEk}H^zkb6AAn3 zLwQHFiYi|O{Ia#7{Xn#Zi=->jDAugI)>F{dZ!{S@;4>;{gLJFrQ6v=#_QsW>%?20? zycs9EzRc+K?OfaIo&7{lQ&`=gpJJ2i@5k0gssHeymWsHqmV~*y$M6tT{}a~36U0y> z1dia3m}dJ^v3j^^MUOtuF}8oP`!JtC(xdRE?nunn^Bz|de@0`T^Epku)DCk;%xrmC zFMiAkb)P*Ia`=_pucjik3s;p4(D|1Us88D+KjHYXrSqm+U&ZGKf^*ZP$NL%!Rzrvq zQ=JL!vrenr@{*jrh~$*sqr($|t)tVf#yNCy{|Dykr^fl>37ZSYCNaYk!hsH*jEY%n zPQ?<%V7LgGO;2&lx>f|f+g)73>dM&LYck;l&%-DKE@IM+Sj>1<()59|oD4zsq}+2d z`I{xt!+Q+`tS>>2`;zkJJ~7kEbAlL@kiAnpzi2lnry;8nnE+tZ887M9?owPUK8^ZZ zzJbR+i~e=xO;UGWGVOc8HMQ119{yfjgRda zeBvfl>Afzr>Rw;@Zg3>{b~H~9L~SD zVz$u{DgobA=03xl)Q=@h_N3a;84dwPfp#vJxSx%@-fVoaeA(KIy{;E3?D|FMI=gj4 zExHPpt+xl0(J%VSH4i?B*1{3&zoyj7{IT;SgJyv|(5^S#az6fPvHVg^for9XrRk zabj*QOMo4zeZh{I<`fEU!+@KQCvzxCB6s2qVrcR6@Z1;CaB<&6ytif?{+1j+bn()`@qlko87 z=9o==Ovjh{UXtO!>{KT z76Mj=LVzt)S}IG@0p43+aPSokjgu!%Tur(LK&{9~u04CIs6rff44(RExN7N696sxT z&xlAn)3|yNyrb^TY+_z;KlXXv07_R!p#A;*9UJ&0Q;Q5FigCt+dG z0i<%jDJ{JRH5f%p-^j?w%d3pLrmoHwiZ`H*G3Mp~vl2Rx`9D`)Ka{R_l;Y|KlZCFF zaE+|+^IeC3OlH_$8xt^tI;gA=B61a5mfqNTP$K>)1u~^`oV+<4^+_$^(%m}WN z0bEW%4VzgMKYyOZn;IG#f`z&Em1bjHATnE8zLd2CC3a!ed?O?bdFP&2Z4Qfac)J#X zaR#8(AXxw&2*5RtPEL}63f@0&y1N&@WDix{C9r_NN8skWc-Q&#uCLrT*I$YlZpLV4sRh0=X2+#-xlzmGF%t>S=_wQGQ)geuRzTOjnQs`+qb8C45 zhXt6B+Fw~IzKu;y+PR%3bR!0tT4bXs{1Lbj)vF^(plHvYU0YrK>{q&iYJCxa(&>q= z@~oSFxhFQE;_^b-2LySRy37nm#ZNUiH`{$p5j=LxA20@EV-6%dyaNeo=~%0fpdc+o zUVi@LM~|So5DmNvWu1;rzG8h#OFoL|MG-e^(b)l?X3IMvF8*Yf>uHzDz1csAO-g(2 zXVkt&?lIHzgf8*g75X_HmfyQT@`KLxI{=}el%!Vqcx<0)>+Hmj)Xck+MUSbmQqVCR zf2JLDQqT@S83Gs zw8W=&$JQ2Aes>6y)CVmw^jkTQweh~L_}V+H5F~`-7tH{3f&{uX6e6@3NA2t5IwI!` zf_$~VK0iMNI(#2*Zz+p1DM?9fb12a6Iyp^(8XdqARvdHnM_hUM_~g)(J75Hzt~}3n z0&L+nP}tOu68@EZyY33J?2>7cT+_ z><(=|nBjaI_K4C+2C8;txtb}+&yQTF<%6~WDz#@ZF)V=C_^oqKGBQ{^(4=!tqJb|| z1GZ9>{V=r3*yLnoH06YF;N!=afuacQN&zAJZ#7S#u4W(CG1I%8xr2Nr#WTlMRp!Q! zXCI4R_a15?4%{(7K&ZI>4;(??&;W)3Ah|EHX#hIisBaSh^$s350ITQ*x%t+HPrk|I z%*+6^ac|$gh4LN^Wy@FSzA~>T9LFWunwqjIT2bAXZ{>#oX*xVS94~FbcjQP?OpKAG z<OstEcUrel(70~&`9KA!62o@J+wd-p#Y_i<9aptreZ-lQ?R zgsM_q?$=_-+~}{ZCdx`7rfa$g>@nyt3orjN9l{3yZ|oQs?~dFuEg-c779zk+Zm6uu zT-Ve{@apbA6J*w1sbfCXZc~m~ZQ*S2U;7!9%D+K>zpC0>;j>FvH-H~y{*pe-(6jIq ze0#*iSl{YttYcs$ZzwElwxGb#%lyq zljB+Hv*UpdZL0|rwMHX+T*p*`rA&Lh4{@;Z=uQx)QxAJIkS)B3gB{qg^plGyv5Sry zqQW0VzJ}*kG!oZu;PEtkp!A!ZYaBy^16MfQe3;8S3tH2`gD(!j+(o>i`-{0lUb1;< zVMSxJ+Vm9TEu^azlVI)|=IT{WN$TUKH(v_2$@y&(r(&riDP;TU3X6L<=nVH(>^z?Qt>uBQF{-HZ!`kFxHt z^z@-)Z3&#_JC}*n9sa_BPDyOS>T~(`dUwl8v&=>T%;p+31)<=*@{+n$_hWm2Z=Uoz z<+F(Omm3yT_KhE&=+6urrA7?DiIAbbiSM~vVbCO%B!Ad#A~yrG)F3D7LeO_<$uBO< zH{&87AcVI`SKH8CmK>0Hd@^PaH0hBQ8ODX#*3%n5gK%vJ5gVm_VAKf}=aqiknHXx} zi|f(@y~MboO%S)1oA$DdZ}#f2GRhqL?p+x%`MD3{=g6bx4!-2Ia_pF0f7*>b|Js4f z`VY1CgBz*QguL>^cRv=F!*V4WU6Q=lR2FWBjEeS_Z_Op=UJ6KdgC_|8xM?R>9`ZQ8IjKooh;t6uEa=)QgW}?<7KqyLn)Y$hIdc zpwZQ$1HY}ZlrB{^Z~{BV@UW{E5k=HwzGaA)Dm>fqON z?+8EN$$Ra8x(x!Iz;TF!I|hHj|Pj>98E8Za_8j){9~WH;g%aZST9=WNP(cD4=;V`Ghku6|0RW&Rm2aR*t0rF<%LPSy7c z?peaofFFaw(Hqu+7w&I_RCS3?`OprzT>A34XSF-hnV9U?-dLm+_9{z>E2`MtSt36_ zPfWY--3eRz+8gV_+-50f%fyP+;CspM2IJ3p=mrq3auhkuIH1$9%hiFT%I}!rj5EBp z8K0BltyO;F{Cz_!f-*>clV!@9D)Uoel?Hw!hVe*mEs_fZA!+Vo`95cqGPXsKHXOm5 zuU0ebqp;5;+5mFedyX-pC>5*IJH%_4Sc)S#y9j|#4qQa8R-AM2z^ z*d^S@5J1~u-2U{sG|1VWR5mLW_|D7Gya@)5wm!X)`+{vx3SqWZ3B%v3sR=f9a6TX}^|{&&FpqYs^HU1wyU)uWvXY47X27Ec@~J@Z+YF7? zv#$^GkC%0!JCxKol;javabz_PdxYvT*Xv3=_6QWd0UYxNrygG>TA`lQXZE z(Wkhau$6C=!~HGt;1(uif$gL?t2GY~CObXCb;qnOhPUbn3#&t5``Dmvdt%0Q|6+4z zptK2B!Ss6gmd#H?5#248s!=t@WWipO-B>(=JUy`peg={fNMF8#><8#UH}rVp;*3HcECpjIgG6 z3hJV?A06){;@8@vTFiF|@BY&;h#UH7t`=%eGqZdv!yrMi%sYLoyELb9x3K!?nX^q8 z{(23@)>h}!?b=q_$~>o}tCP&@N3*gNe9Lk-MQxPi3qurNR>q~@WF29pWs_zr*Y4%@ zVqIUZRpb<_+1d^GG&d`AT8D;BxX|hyeZ(!DQZ1010YEL$VQ?Qfw zI2@)V8nu~=HfH0rD_!6Bw+^qK)TBhtHeWse^qKb{C52g0IybvMu#+=FhZNpzQ&3hl z`l9mKm&`p?+e@(e%TKlMXr6FMi2kyp*n;^<)d5PEet-6C$`+zBr5B96qgu&S=AOjZXYcIHq?wem0cUFn7E2t6DXA!|DnJ zso-ig)`%2`E$@9Ec;gv-$&Li%S`{gi%#~H}?;ZobHQmkG@D;N@<%NkO`6azWGAC)c zx24xyTOUG_|1B@agH?*dd*rc$w%xe#yW3B9N-xpl5dqa0lkdVy^d z)IoSIK_i^j^l(e30CQAiGc_vjSFK5_>O}8fTztW_9h1)61S|U;aKVJ}F@Id##;RHW ztK_fp1eMH$j{1Ymr<&7?@xgjL9;XD-j`|$>epgC*VyS4~-h&WjzH89)5shQSQ2Trn zv)tAA8rR=>P=@V!oigiV1OJOg;&)l_VH8e|TQ3SKc}dC1b@doud>ARDig_UjP9`h5 zi2C&LCXS#B$GE>va(Db3gx3)`2o)Ra+z7~k;iw%v1u&R(pZ&(%*4o{FIWKL-5?-H8 NOG8(^K<(zE{{Tl!-Ddy* literal 0 HcmV?d00001 diff --git a/docs/source/overview/architecture.rst b/docs/source/overview/architecture.rst deleted file mode 100644 index d76d7e7da..000000000 --- a/docs/source/overview/architecture.rst +++ /dev/null @@ -1,4 +0,0 @@ -Architecture -============ - -https://sourceforge.net/p/xcat/wiki/XCAT_Overview%2C_Architecture%2C_and_Planning/#xcat-architecture diff --git a/docs/source/overview/features.rst b/docs/source/overview/features.rst deleted file mode 100644 index 4640ce691..000000000 --- a/docs/source/overview/features.rst +++ /dev/null @@ -1,4 +0,0 @@ -Features -======== - -https://sourceforge.net/p/xcat/wiki/XCAT_Overview%2C_Architecture%2C_and_Planning/#overview-of-xcats-features diff --git a/docs/source/overview/index.rst b/docs/source/overview/index.rst index 4af7cdd62..401ee1dbb 100644 --- a/docs/source/overview/index.rst +++ b/docs/source/overview/index.rst @@ -1,14 +1,234 @@ Overview ======== -xCAT enables you to easily manage large number of servers for any type of techincal computing workload. +xCAT enables you to easily manage large number of servers for any type of technical computing workload. -xCAT is known for exceptional scaling, wide variety of supported hardware, operating systems, and virtalization platforms, and complete day0 setup capabilities. +xCAT is known for exceptional scaling, wide variety of supported hardware, operating systems, and virtualization platforms. And complete day0 setup capabilities. -.. toctree:: - :maxdepth: 2 +xCAT Differentiators +-------------------- + +* xCAT Scales + + Beyond all IT budgets. 100,000s of nodes with distributed architecture. + +* Open Source + + Eclipse Public License. You can also buy support contracts. + +* Support Multiple OS + + RH, Sles, Ubuntu, Debian, CentOS, Fedora, Scientific Linux, Oracle Linux, Windows, Esxi, RHEV + +* Support Multiple Hardware + + IBM Power, IBM Power LE, x86_64 + +* Support Multiple Virtulization + + IBM zVM, IBM PowerKVM, KVM, ESXI, XEN + +* Support Multiple Installation Options + + Diskful (Install to Hard Disk), Diskless (Run in memory), Cloning + +* Built in Automatic discovery + + No need to power on one machine at a time to discover. Also, nodes that fail can be replaced and back in action by just powering new one on. + +* Rest API + + Support Rest API for the third-party software to integrate. + +Features +-------- + +#. Discover the hardware servers + + * Manually define + * MTMS-based discovery + * Switch-based discovery + * Sequential-based discovery + +#. Execute remote system management against the discovered server + + * Remote power control + * Remote console support + * Remote inventory/vitals information query + * Remote event log query + +#. Provision Operating Systems on physical (Bare-metal) or virtual machines + + * RHEL + * SLES + * Ubuntu + * Debian + * Fedora + * CentOS + * Scientific Linux + * Oracle Linux + * PowerKVM + * Esxi + * RHEV + * Windows + * AIX + +#. Provision machines in + + * Diskful (Scripted install, Clone) + * Stateless + +#. Install and configure user applications + + * During OS install + * After the OS install + * HPC products - GPFS, Parallel Environment, LSF, compilers ... + * Big Data - Hadoop, Symphony + * Cloud - Openstack, Chef + +#. Parallel system management + + * Parallel shell (Run shell command against nodes in parallel) + * Parallel copy + * Parallel ping + +#. Integrate xCAT in Cloud + * Openstack + * SoftLayer + +Matrix of Supported OS and Hardware +----------------------------------- + ++-------+-------+-------+-----+-------+--------+--------+--------+ +| | Power | Power | zVM | Power | x86_64 | x86_64 | x86_64 | +| | | LE | | KVM | | KVM | Esxi | ++=======+=======+=======+=====+=======+========+========+========+ +|RHEL | yes | yes | yes | yes | yes | yes | yes | +| | | | | | | | | ++-------+-------+-------+-----+-------+--------+--------+--------+ +|SLES | yes | yes | yes | yes | yes | yes | yes | +| | | | | | | | | ++-------+-------+-------+-----+-------+--------+--------+--------+ +|Ubuntu | no | yes | no | yes | yes | yes | yes | +| | | | | | | | | ++-------+-------+-------+-----+-------+--------+--------+--------+ +|CentOS | no | no | no | no | yes | yes | yes | +| | | | | | | | | ++-------+-------+-------+-----+-------+--------+--------+--------+ +|AIX | yes | no | no | no | no | no | no | +| | | | | | | | | ++-------+-------+-------+-----+-------+--------+--------+--------+ +|Windows| no | no | no | no | yes | yes | yes | +| | | | | | | | | ++-------+-------+-------+-----+-------+--------+--------+--------+ + +xCAT Architecture +----------------- + +The following diagram shows the basic structure of xCAT: + +.. image:: Xcat-arch.png + +Mgmt Node (xCAT Management Node): + The server which installed xCAT and is used to perform the system management for the whole cluster. Generally, the database is installed in this server to store the Definition of Compute Node; The network services like dhcpd, tftpd, httpd are enabled on this server for OS deployment. + +Service Node: + An slave server of **Mgmt Node** to take over the system management work for part of nodes in the cluster. **Service Node** has all the functions of **Mgmt Node**, but generally it only works under **Mgmt Node**'s instruction. + + The **Service Node** is necessary only for large cluster that **Mgmt Node** cannot handle all the nodes because of the limitation of CPU, Memory or Network Bandwidth of **Mgmt Node**. + +Compute Node (Target Node): + The target node or workload nodes in the cluster which are the targets servers of the xCAT to manage for customer. + +dhcpd, tftpd, httpd: + The network services that are used to perform the OS deployment. xCAT handles these network services automatically, user does not need to configure the network services by themselves. + +SP (Service Processor): + A hardware Module imbedded in the hardware server which is used to perform the out-of-band hardware control. e.g. the IMM or FSP + +Management network: + It's used by the **Mgmt Node** or **Service Node** to install and manage the OS of the nodes. The MN and in-band NIC of the nodes are connected to this network. If you have a large cluster with service nodes, sometimes this network is segregated into separate VLANs for each service node. See TODO [Setting_Up_a_Linux_Hierarchical_Cluster] for details. + +Service network: + It's used by the **Mgmt Node** or **Service Node** to control the nodes out of band via the SP. If the SPs are configured in shared mode (NIC of SP can be used to access both SP and server host), then this network can be combined with the management network. + +Application network: + It's used by the applications on the **Compute Node** to communicate among each other. Usually it's an IB network. + +Site (Public) network: + It's used to by user to access the management node and sometimes for the compute nodes to provide services to the site. + +Rest API: + The rest api interface of xCAT which can be used by the third-party application to integrate with xCAT. + +Brief Steps to Set Up an xCAT Cluster +------------------------------------- + +If xCAT looks suitable for your requirement, following steps are recommended procedure to set up an xCAT cluster. + +#. Find a server as your xCAT management node + + The server can be a bare-metal server or a virtual machine. The major factor to select a server is the machine number of your cluster. The bigger the cluster is, the performance of server need to be better. + + ``NOTE``: The architecture of xCAT management node is recommended to be same with the target compute node in the cluster. + +#. Install xCAT on your selected server + + The server which installed xCAT will be the **xCAT Management Node**. + + Refer to the doc: :doc:`xCAT Install Guide <../guides/install-guides/index>` to learn how to install xCAT on a server. + +#. Start to use xCAT management node + + Refer to the doc: :doc:`xCAT Admin Guide <../guides/admin-guides/index>`. + +#. Discover target nodes in the cluster + + You have to define the target nodes to the xCAT database before managing them. + + For a small cluster (less than 5), you can collect the information of target nodes one by one and then define them manually through ``mkdef`` command. + + For a bigger cluster, you can use the automatic method to discover the target nodes. The discovered nodes will be defined to xCAT database. You can use ``lsdef`` to display them. + + Refer to the doc: :doc:`xCAT discovery Guide <../guides/admin-guides/manage_clusters/ppc64le/discovery/index>` to learn how to discover and define compute nodes. + +#. Try to perform the hardware control against the target nodes + + Now you have the node definition. Take a try to confirm the hardware control for defined nodes is working. e.g. ``rpower stat``. + + Refer to the doc: :doc:`Hardware Management <../guides/admin-guides/manage_clusters/ppc64le/management/index>` to learn how to perform the remote hardware control. + +#. Deploy OS for the target nodes + + * Prepare the OS images + * Customize the OS images (Optional) + * Perform the OS deployment + + Refer to the doc: :doc:`Diskful Install <../guides/admin-guides/manage_clusters/ppc64le/diskful/index>`, :doc:`Diskless Install <../guides/admin-guides/manage_clusters/ppc64le/diskless/index>` to learn how to deploy OS for a target node. + +#. Update the OS after the deployment + + You may require to update the OS of certain target nodes after the OS deployment, try the ``updatenode`` command. ``updatenode`` command can execute the following tasks for target nodes: + + * Install additional software/application for the target nodes + * Sync some files to the target nodes + * Run some postscript for the target nodes + + Refer to the doc: :doc:`Updatenode <../guides/admin-guides/manage_clusters/ppc64le/updatenode>` to learn how to use ``updatenode`` command. + +#. Run parallel commands + + When you manage a cluster which has hundreds or thousands of nodes, you always need to do something for a bunch of nodes in parallel. xCAT has some parallel commands can help you on these task. + + * Parallel Shell + * Parallel copy + * parallel ping + + Refer to the doc: :doc:`Parallel Commands <../guides/admin-guides/manage_clusters/ppc64le/parallel_cmd>` to learn how to use parallel commands. + +#. Contribute to xCAT (OPtional) + + During your using of xCAT, if you find something (code, document ...) that can be improved and you want to contribute that to xCAT, please do that for the behalf of yours and other xCAT user's. And welcome to xCAT community! + + Refer to the doc: :doc:`Developer Guide <../developers/index>` to learn how to contribute to xCAT community. - features.rst - support_list.rst - architecture.rst - setup_cluster_process.rst diff --git a/docs/source/overview/setup_cluster_process.rst b/docs/source/overview/setup_cluster_process.rst deleted file mode 100644 index 263ec35a6..000000000 --- a/docs/source/overview/setup_cluster_process.rst +++ /dev/null @@ -1,2 +0,0 @@ -Brief Steps to Set Up an xCAT Cluster -===================================== diff --git a/docs/source/overview/support_list.rst b/docs/source/overview/support_list.rst deleted file mode 100644 index 04d821fc1..000000000 --- a/docs/source/overview/support_list.rst +++ /dev/null @@ -1,2 +0,0 @@ -Supported Hardware and Operating Systems -========================================