mirror of
				https://github.com/xcat2/xcat-core.git
				synced 2025-11-03 21:02:34 +00:00 
			
		
		
		
	Remove trailing spaces in file xCAT-server/share/xcat/rollupdate/rollupdate_stateful.input.sample
This commit is contained in:
		@@ -31,7 +31,7 @@ scheduser=loadl
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Scheduler Feature values 
 | 
			
		||||
# Scheduler Feature values
 | 
			
		||||
#    Node feature values that will be changed in the scheduler during the
 | 
			
		||||
#    update process.  These features can be used if users need to run jobs
 | 
			
		||||
#    on only old nodes or only new nodes, but cannot have the job span both
 | 
			
		||||
@@ -41,7 +41,7 @@ scheduser=loadl
 | 
			
		||||
oldfeature=oldvalue
 | 
			
		||||
 | 
			
		||||
# newfeature:  A new feature value that will be set in the scheduler for each
 | 
			
		||||
#    node after it has been updated.  
 | 
			
		||||
#    node after it has been updated.
 | 
			
		||||
newfeature=newvalue
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -49,7 +49,7 @@ newfeature=newvalue
 | 
			
		||||
#
 | 
			
		||||
# updategroup:  A set of nodes to be updated as a single group
 | 
			
		||||
#        updategroup = name(noderange)
 | 
			
		||||
#    where "name" is the name to be assigned to the updategroup and 
 | 
			
		||||
#    where "name" is the name to be assigned to the updategroup and
 | 
			
		||||
#    "noderange" is any valid xCAT noderange syntax (see noderange man page)
 | 
			
		||||
#    You may list multiple updategroup stanzas in this file and all of them will
 | 
			
		||||
#    be processed.
 | 
			
		||||
@@ -57,8 +57,8 @@ updategroup=ns01(c4lpar201-c4lpar204)
 | 
			
		||||
updategroup=ns11(c4lpar211-c4lpar214)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# mapgroups:  Many updategroups can also be defined through a 
 | 
			
		||||
#    single statement using nodegroup mappings.    
 | 
			
		||||
# mapgroups:  Many updategroups can also be defined through a
 | 
			
		||||
#    single statement using nodegroup mappings.
 | 
			
		||||
#        mapgroups=nodegroup_range
 | 
			
		||||
#    Where nodegroup_range is processed in the same way xCAT handles node name
 | 
			
		||||
#    ranges to generate a set of nodegroup names.  Each nodegroup will be
 | 
			
		||||
@@ -71,19 +71,19 @@ updategroup=ns11(c4lpar211-c4lpar214)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# mutex:  Identify updategroups that are mutually exclusive and must not be 
 | 
			
		||||
# mutex:  Identify updategroups that are mutually exclusive and must not be
 | 
			
		||||
#    updated at the same time in order to maintain active resources within
 | 
			
		||||
#    the cluster. Only 1 updategroup listed in the entry will be updated at 
 | 
			
		||||
#    the cluster. Only 1 updategroup listed in the entry will be updated at
 | 
			
		||||
#    a time.
 | 
			
		||||
#        mutex=updategroup,updategroup,...
 | 
			
		||||
#    For example, the update jobs for ns1 and for ns2 will not be allowed 
 | 
			
		||||
#    For example, the update jobs for ns1 and for ns2 will not be allowed
 | 
			
		||||
#    to run at the same time:
 | 
			
		||||
#mutex=c1,c2,c3
 | 
			
		||||
#mutex_count=2
 | 
			
		||||
 | 
			
		||||
# You may list multiple mutex stanzas in this file to identify different
 | 
			
		||||
#    sets of mutual exclusion.  
 | 
			
		||||
# Multiple mutually exclusive sets can be specified using updategroup name 
 | 
			
		||||
#    sets of mutual exclusion.
 | 
			
		||||
# Multiple mutually exclusive sets can be specified using updategroup name
 | 
			
		||||
#    ranges. For example, the following:
 | 
			
		||||
#mutex=block[1-3]a,block[1-3]b,block[1-3]c
 | 
			
		||||
# would be equivalent to:
 | 
			
		||||
@@ -110,7 +110,7 @@ updategroup=ns11(c4lpar211-c4lpar214)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# maxupdates:  Maximum number of updategroups that can be updated at one time 
 | 
			
		||||
# maxupdates:  Maximum number of updategroups that can be updated at one time
 | 
			
		||||
#    This allows you to ensure you will always have enough computing
 | 
			
		||||
#    resources in your cluster and that not all nodes will attempt to
 | 
			
		||||
#    be updated at once.
 | 
			
		||||
@@ -130,7 +130,7 @@ maxupdates=all
 | 
			
		||||
#reconfiglist=login1,login2
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# jobtemplate:  Scheduler job template file.  See this sample LoadLeveler file 
 | 
			
		||||
# jobtemplate:  Scheduler job template file.  See this sample LoadLeveler file
 | 
			
		||||
#     for details on how the template will be processed:
 | 
			
		||||
jobtemplate=/opt/xcat/share/xcat/rollupdate/ll.tmpl
 | 
			
		||||
 | 
			
		||||
@@ -143,7 +143,7 @@ jobdir=/u/loadl/rollupdate_jobs
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#reservationcallback:  INTERNAL KEYWORD used for development only.
 | 
			
		||||
#    This is the reservation notify or callback command.  
 | 
			
		||||
#    This is the reservation notify or callback command.
 | 
			
		||||
#    For Loadleveler, this script must reside on the LoadLeveler central
 | 
			
		||||
#    manager and will be called when the reservation for an updategroup
 | 
			
		||||
#    becomes active.
 | 
			
		||||
@@ -152,19 +152,19 @@ jobdir=/u/loadl/rollupdate_jobs
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#reservationduration:  Maximum time to hold a LoadLeveler reservation for 
 | 
			
		||||
#    the update process.  This value in minutes should be longer than the 
 | 
			
		||||
#    expected time to shutdown, update, and reboot all the nodes in an update 
 | 
			
		||||
#    group.  xCAT will release the nodes from the reservation as they come 
 | 
			
		||||
#reservationduration:  Maximum time to hold a LoadLeveler reservation for
 | 
			
		||||
#    the update process.  This value in minutes should be longer than the
 | 
			
		||||
#    expected time to shutdown, update, and reboot all the nodes in an update
 | 
			
		||||
#    group.  xCAT will release the nodes from the reservation as they come
 | 
			
		||||
#    back up, and will cancel the reservation when the last node has completed.
 | 
			
		||||
reservationduration=15
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#update_if_down:  If set to "yes", also attempt the update for any node in an 
 | 
			
		||||
#update_if_down:  If set to "yes", also attempt the update for any node in an
 | 
			
		||||
#    updategroup that is down or not active/available in the scheduler
 | 
			
		||||
#    (useful if you have nodes that are not part of your scheduler's cluster).
 | 
			
		||||
# If set to "no", any node in an updategroup that is not active in the 
 | 
			
		||||
# If set to "no", any node in an updategroup that is not active in the
 | 
			
		||||
#    scheduler will be skipped.
 | 
			
		||||
# If set to "cancel", if any node in an updategroup is not active,
 | 
			
		||||
#    the entire updategroup will be skipped.
 | 
			
		||||
@@ -189,7 +189,7 @@ update_if_down=yes
 | 
			
		||||
#    filesystem on all the nodes, or moving critical services
 | 
			
		||||
#    to a backup server for specific nodes.
 | 
			
		||||
# Multiple prescript entries or prescript/prescriptnodes pairs of entries may
 | 
			
		||||
#    be specified. Each command will be run in order. 
 | 
			
		||||
#    be specified. Each command will be run in order.
 | 
			
		||||
#
 | 
			
		||||
#prescript=/u/admin/bin/shutdownGPFS $NODELIST
 | 
			
		||||
#
 | 
			
		||||
@@ -204,22 +204,22 @@ prescriptnodes=sn
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# skipshutdown:  Should a shutdown command be sent to the nodes.  
 | 
			
		||||
#    Shutdown is required for diskless nodes.  For diskfull nodes, simple 
 | 
			
		||||
#    updates may be applied to the nodes through prescripts, and a node 
 | 
			
		||||
#    reboot may not be required. 
 | 
			
		||||
# skipshutdown:  Should a shutdown command be sent to the nodes.
 | 
			
		||||
#    Shutdown is required for diskless nodes.  For diskfull nodes, simple
 | 
			
		||||
#    updates may be applied to the nodes through prescripts, and a node
 | 
			
		||||
#    reboot may not be required.
 | 
			
		||||
# Default is "no" - a node shutdown will be performed.
 | 
			
		||||
skipshutdown=no
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# bringupstatus:  
 | 
			
		||||
# bringupstatus:
 | 
			
		||||
#  OR
 | 
			
		||||
# bringupappstatus:  
 | 
			
		||||
#    The xCAT database node status or appstatus value that xCAT will check and 
 | 
			
		||||
# bringupappstatus:
 | 
			
		||||
#    The xCAT database node status or appstatus value that xCAT will check and
 | 
			
		||||
#    will wait for to determine that the node has completed its updates.
 | 
			
		||||
#    After running prescripts, xCAT will continue to check the status, and once
 | 
			
		||||
#    this status is reached, xCAT will  release this node from the scheduler 
 | 
			
		||||
#    this status is reached, xCAT will  release this node from the scheduler
 | 
			
		||||
#    reservation.
 | 
			
		||||
# If both attributes are set, only bringupappstatus will be used.
 | 
			
		||||
#bringupappstatus="gpfs=ready"
 | 
			
		||||
@@ -229,7 +229,7 @@ bringupstatus=booted
 | 
			
		||||
 | 
			
		||||
# bringuptimeout:  (optional) The maximum number of minutes xCAT should wait
 | 
			
		||||
#    after completion of running prescripts for bringupstatus/bringupappstatus
 | 
			
		||||
#    to be met before giving up.  
 | 
			
		||||
#    The scheduler reservation will be cancelled if the timeout is reached. 
 | 
			
		||||
#    to be met before giving up.
 | 
			
		||||
#    The scheduler reservation will be cancelled if the timeout is reached.
 | 
			
		||||
bringuptimeout=10
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user