2009-06-22 16:00:28 +00:00
package xCAT_plugin::esx ;
use strict ;
use warnings ;
use xCAT::Table ;
use xCAT::Utils ;
2012-05-18 16:36:16 +00:00
use xCAT::TZUtils ;
2009-06-22 16:00:28 +00:00
use Time::HiRes qw ( sleep ) ;
2012-02-22 18:19:30 +00:00
use xCAT::Template ;
2009-06-22 16:00:28 +00:00
use xCAT::MsgUtils ;
2009-09-19 17:03:14 +00:00
use xCAT::SvrUtils ;
2009-10-21 16:39:17 +00:00
use xCAT::NodeRange ;
2009-06-22 16:00:28 +00:00
use xCAT::Common ;
use xCAT::VMCommon ;
use POSIX "WNOHANG" ;
use Getopt::Long ;
use Thread qw( yield ) ;
use POSIX qw( WNOHANG nice ) ;
2009-11-19 22:46:26 +00:00
use File::Path qw/mkpath rmtree/ ;
2009-06-22 16:00:28 +00:00
use File::Temp qw/tempdir/ ;
use File::Copy ;
2011-08-17 18:06:34 +00:00
use Fcntl qw/:flock/ ;
2009-06-22 16:00:28 +00:00
use IO::Socket ; #Need name resolution
2009-09-18 19:53:48 +00:00
#use Data::Dumper;
2009-06-22 16:00:28 +00:00
Getopt::Long:: Configure ( "bundling" ) ;
Getopt::Long:: Configure ( "pass_through" ) ;
my @ cpiopid ;
our @ ISA = 'xCAT::Common' ;
#in xCAT, the lifetime of a process ends on every request
#therefore, the lifetime of assignments to these glabals as architected
#is to be cleared on every request
#my %esx_comm_pids;
2011-03-11 21:00:31 +00:00
my % limbonodes ; #nodes in limbo during a forced migration due to missing parent
2009-06-22 16:00:28 +00:00
my % hyphash ; #A data structure to hold hypervisor-wide variables (i.e. the current resource pool, virtual machine folder, connection object
my % vcenterhash ; #A data structure to reflect the state of vcenter connectivity to hypervisors
2010-09-17 16:52:55 +00:00
my % vmhash ; #store per vm info of interest
2010-09-17 21:10:58 +00:00
my % clusterhash ;
2009-10-01 14:35:41 +00:00
my % hypready ; #A structure for hypervisor readiness to be tracked before proceeding to normal operations
2009-06-22 16:00:28 +00:00
my % running_tasks ; #A struct to track this processes
my $ output_handler ; #Pointer to the function to drive results to client
my $ executerequest ;
2009-12-09 21:05:35 +00:00
my $ usehostnamesforvcenter ;
2009-06-22 16:00:28 +00:00
my % tablecfg ; #to hold the tables
2011-02-25 23:02:08 +00:00
my % hostrefbynode ;
2009-06-22 16:00:28 +00:00
my $ currkey ;
2010-09-01 19:43:09 +00:00
my $ requester ;
2009-10-21 16:39:17 +00:00
my $ viavcenter ;
2010-01-14 20:32:51 +00:00
my $ viavcenterbyhyp ;
2010-11-04 18:49:27 +00:00
my $ vcenterautojoin = 1 ;
2011-08-17 14:13:42 +00:00
my $ datastoreautomount = 1 ;
2011-03-09 15:20:00 +00:00
my $ vcenterforceremove = 0 ; #used in rmhypervisor
2010-11-09 19:01:33 +00:00
my $ reconfigreset = 1 ;
2009-11-12 22:06:04 +00:00
my $ vmwaresdkdetect = eval {
require VMware::VIRuntime ;
VMware::VIRuntime - > import ( ) ;
1 ;
} ;
2011-08-17 18:06:34 +00:00
my % lockhandles ;
2009-06-22 16:00:28 +00:00
2011-11-02 20:28:51 +00:00
sub recursion_copy {
my $ source = shift ;
my $ destination = shift ;
my $ dirhandle ;
opendir ( $ dirhandle , $ source ) ;
my $ entry ;
foreach $ entry ( readdir ( $ dirhandle ) ) {
if ( $ entry eq '.' or $ entry eq '..' ) { next ; }
my $ tempsource = "$source/$entry" ;
my $ tempdestination = "$destination/$entry" ;
if ( - d $ tempsource ) {
unless ( - d $ tempdestination ) { mkdir $ tempdestination or die "failure creating directory $tempdestination, $!" ; }
recursion_copy ( $ tempsource , $ tempdestination ) ;
} else {
copy ( $ tempsource , $ tempdestination ) or die "failed copy from $tempsource to $tempdestination, $!" ;
}
}
}
2011-08-17 18:06:34 +00:00
sub lockbyname {
my $ name = shift ;
my $ lckh ;
mkpath ( "/tmp/xcat/locks/" ) ;
while ( - e "/tmp/xcat/locks/$name" ) { sleep 1 ; }
open ( $ lockhandles { $ name } , ">>" , "/tmp/xcat/locks/$name" ) ;
flock ( $ lockhandles { $ name } , LOCK_EX ) ;
}
sub unlockbyname {
my $ name = shift ;
unlink ( "/tmp/xcat/locks/$name" ) ;
close ( $ lockhandles { $ name } ) ;
}
2009-06-22 16:00:28 +00:00
my % guestidmap = (
2010-08-20 13:32:35 +00:00
"rhel.6.*" = > "rhel6_" ,
2009-06-22 16:00:28 +00:00
"rhel.5.*" = > "rhel5_" ,
"rhel4.*" = > "rhel4_" ,
"centos5.*" = > "rhel5_" ,
"centos4.*" = > "rhel4_" ,
"sles11.*" = > "sles11_" ,
"sles10.*" = > "sles10_" ,
"win2k8" = > "winLonghorn" ,
"win2k8r2" = > "windows7Server" ,
2010-06-21 17:33:19 +00:00
"win7" = > "windows7_" ,
2009-07-28 21:09:59 +00:00
"win2k3" = > "winNetStandard" ,
"imagex" = > "winNetStandard" ,
2009-09-14 20:10:41 +00:00
"boottarget" = > "otherLinux"
2009-06-22 16:00:28 +00:00
#otherGuest, otherGuest64, otherLinuxGuest, otherLinux64Guest
) ;
sub handled_commands {
return {
copycd = > 'esx' ,
mknetboot = > "nodetype:os=(esxi.*)" ,
2011-10-04 20:02:36 +00:00
mkinstall = > "nodetype:os=(esxi5.*)" ,
2009-06-22 16:00:28 +00:00
rpower = > 'nodehm:power,mgt' ,
rsetboot = > 'nodehm:power,mgt' ,
rmigrate = > 'nodehm:power,mgt' ,
mkvm = > 'nodehm:mgt' ,
rmvm = > 'nodehm:mgt' ,
2010-08-31 20:53:55 +00:00
clonevm = > 'nodehm:mgt' ,
2010-06-04 19:15:18 +00:00
rinv = > 'nodehm:mgt' ,
chvm = > 'nodehm:mgt' ,
2010-09-17 13:02:26 +00:00
rshutdown = > "nodetype:os=(esxi.*)" ,
2010-08-25 17:49:54 +00:00
lsvm = > [ 'hypervisor:type' , 'nodetype:os=(esx.*)' ] ,
rmhypervisor = > [ 'hypervisor:type' , 'nodetype:os=(esx.*)' ] ,
2011-06-09 20:59:43 +00:00
chhypervisor = > [ 'hypervisor:type' , 'nodetype:os=(esx.*)' ] ,
2009-10-02 15:20:52 +00:00
#lsvm => 'nodehm:mgt', not really supported yet
2009-06-22 16:00:28 +00:00
} ;
}
sub preprocess_request {
my $ request = shift ;
my $ callback = shift ;
2012-02-03 13:27:19 +00:00
#if already preprocessed, go straight to request
if ( ( defined ( $ request - > { _xcatpreprocessed } ) )
&& ( $ request - > { _xcatpreprocessed } - > [ 0 ] == 1 ) )
{
return [ $ request ] ;
}
2009-06-22 16:00:28 +00:00
my $ username = 'root' ;
my $ password = '' ;
my $ vusername = "Administrator" ;
my $ vpassword = "" ;
2009-10-06 20:47:59 +00:00
unless ( $ request and $ request - > { command } and $ request - > { command } - > [ 0 ] ) { return ; }
2009-06-22 16:00:28 +00:00
if ( $ request - > { command } - > [ 0 ] eq 'copycd' )
{ #don't farm out copycd
return [ $ request ] ;
2011-10-04 20:02:36 +00:00
} elsif ( $ request - > { command } - > [ 0 ] eq 'mknetboot'
or $ request - > { command } - > [ 0 ] eq 'mkinstall' ) {
2009-06-22 16:00:28 +00:00
return [ $ request ] ;
}
2009-09-14 13:04:01 +00:00
xCAT::Common:: usage_noderange ( $ request , $ callback ) ;
2009-06-22 16:00:28 +00:00
2010-03-17 15:42:04 +00:00
if ( $ request - > { _xcatpreprocessed } and $ request - > { _xcatpreprocessed } - > [ 0 ] == 1 ) { return [ $ request ] ; }
2009-07-15 14:50:04 +00:00
# exit if preprocesses
2009-06-22 16:00:28 +00:00
my @ requests ;
my $ noderange = $ request - > { node } ; # array ref
my $ command = $ request - > { command } - > [ 0 ] ;
my $ extraargs = $ request - > { arg } ;
my @ exargs = ( $ request - > { arg } ) ;
my % hyp_hash = ( ) ;
2010-09-17 21:10:58 +00:00
my % cluster_hash = ( ) ;
2009-06-22 16:00:28 +00:00
# Get nodes from mp table and assign nodes to mp hash.
my $ passtab = xCAT::Table - > new ( 'passwd' ) ;
my $ tmp ;
if ( $ passtab ) {
( $ tmp ) = $ passtab - > getAttribs ( { 'key' = > 'vmware' } , 'username' , 'password' ) ;
if ( defined ( $ tmp ) ) {
$ username = $ tmp - > { username } ;
$ password = $ tmp - > { password } ;
}
( $ tmp ) = $ passtab - > getAttribs ( { 'key' = > 'vcenter' } , 'username' , 'password' ) ;
if ( defined ( $ tmp ) ) {
$ vusername = $ tmp - > { username } ;
$ vpassword = $ tmp - > { password } ;
}
}
my $ vmtab = xCAT::Table - > new ( "vm" ) ;
unless ( $ vmtab ) {
$ callback - > ( { data = > [ "Cannot open vm table" ] } ) ;
$ request = { } ;
return ;
}
2010-09-17 21:10:58 +00:00
my $ vmtabhash = $ vmtab - > getNodesAttribs ( $ noderange , [ 'host' , 'migrationdest' ] ) ;
2009-06-22 16:00:28 +00:00
foreach my $ node ( @$ noderange ) {
2011-06-09 20:59:43 +00:00
if ( $ command eq "rmhypervisor" or $ command eq 'lsvm' or $ command eq 'rshutdown' or $ command eq "chhypervisor" ) {
2010-02-22 20:40:42 +00:00
$ hyp_hash { $ node } { nodes } = [ $ node ] ;
} else {
2009-06-22 16:00:28 +00:00
my $ ent = $ vmtabhash - > { $ node } - > [ 0 ] ;
if ( defined ( $ ent - > { host } ) ) {
push @ { $ hyp_hash { $ ent - > { host } } { nodes } } , $ node ;
2010-09-17 21:10:58 +00:00
} elsif ( defined ( $ ent - > { migrationdest } ) ) {
$ cluster_hash { $ ent - > { migrationdest } } - > { nodes } - > { $ node } = 1 ;
2009-06-22 16:00:28 +00:00
} else {
2012-06-23 13:45:52 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": no host or cluster defined for guest" ] , $ callback , $ node ) ;
2009-06-22 16:00:28 +00:00
}
2010-02-22 20:40:42 +00:00
}
2009-06-22 16:00:28 +00:00
}
# find service nodes for the MMs
# build an individual request for each service node
my $ service = "xcat" ;
my @ hyps = keys ( % hyp_hash ) ;
2011-01-28 01:18:32 +00:00
my % targethyps ;
2010-06-12 01:57:12 +00:00
if ( $ command eq 'rmigrate' and ( scalar @ { $ extraargs } >= 1 ) ) {
@ ARGV = @ { $ extraargs } ;
my $ offline ;
my $ junk ;
GetOptions (
"f" = > \ $ offline ,
"s=s" = > \ $ junk #wo don't care about it, but suck up nfs:// targets so they don't get added
) ;
my $ dsthyp = $ ARGV [ 0 ] ;
if ( $ dsthyp ) {
push @ hyps , $ dsthyp ;
2011-01-28 01:18:32 +00:00
$ targethyps { $ dsthyp } = 1 ;
2010-06-12 01:57:12 +00:00
}
2009-06-22 16:00:28 +00:00
}
#TODO: per hypervisor table password lookup
2010-09-17 21:10:58 +00:00
my @ allnodes ;
push @ allnodes , @ hyps ;
push @ allnodes , @$ noderange ;
my $ sn = xCAT::Utils - > get_ServiceNode ( \ @ allnodes , $ service , "MN" ) ;
2009-07-13 18:04:39 +00:00
#vmtabhash was from when we had vm.host do double duty for hypervisor data
#$vmtabhash = $vmtab->getNodesAttribs(\@hyps,['host']);
#We now use hypervisor fields to be unambiguous
my $ hyptab = xCAT::Table - > new ( 'hypervisor' ) ;
2009-07-13 19:48:34 +00:00
my $ hyptabhash = { } ;
if ( $ hyptab ) {
2009-10-01 14:35:41 +00:00
$ hyptabhash = $ hyptab - > getNodesAttribs ( \ @ hyps , [ 'mgr' ] ) ;
2009-07-13 19:48:34 +00:00
}
2009-07-13 18:04:39 +00:00
2009-06-22 16:00:28 +00:00
# build each request for each service node
foreach my $ snkey ( keys %$ sn ) {
my $ reqcopy = { %$ request } ;
$ reqcopy - > { '_xcatdest' } = $ snkey ;
2009-07-15 14:50:04 +00:00
$ reqcopy - > { _xcatpreprocessed } - > [ 0 ] = 1 ;
2009-06-22 16:00:28 +00:00
my $ hyps1 = $ sn - > { $ snkey } ;
my @ moreinfo = ( ) ;
my @ nodes = ( ) ;
foreach ( @$ hyps1 ) { #This preserves the constructed data to avoid redundant table lookup
my $ cfgdata ;
2011-01-28 01:18:32 +00:00
if ( not $ targethyps { $ _ } and not $ hyp_hash { $ _ } ) { #a vm, skip it
2010-09-17 21:10:58 +00:00
next ;
} elsif ( $ hyp_hash { $ _ } { nodes } ) {
2009-06-22 16:00:28 +00:00
push @ nodes , @ { $ hyp_hash { $ _ } { nodes } } ;
$ cfgdata = "[$_][" . join ( ',' , @ { $ hyp_hash { $ _ } { nodes } } ) . "][$username][$password][$vusername][$vpassword]" ; #TODO: not use vm.host?
} else {
$ cfgdata = "[$_][][$username][$password][$vusername][$vpassword]" ; #TODO: not use vm.host?
}
2009-07-13 18:04:39 +00:00
if ( defined $ hyptabhash - > { $ _ } - > [ 0 ] - > { mgr } ) {
$ cfgdata . = "[" . $ hyptabhash - > { $ _ } - > [ 0 ] - > { mgr } . "]" ;
2009-06-22 16:00:28 +00:00
} else {
$ cfgdata . = "[]" ;
}
push @ moreinfo , $ cfgdata ; #"[$_][".join(',',@{$hyp_hash{$_}{nodes}})."][$username][$password]";
}
2010-09-17 21:10:58 +00:00
foreach ( keys % cluster_hash ) {
my $ cluster ;
my $ vcenter ;
if ( /@/ ) {
( $ cluster , $ vcenter ) = split /@/ , $ _ , 2 ;
} else {
die "TODO: implement default vcenter (for now, user, do vm.migratiodest=cluster" . '@' . "vcentername)" ;
}
2010-09-20 17:26:24 +00:00
push @ moreinfo , "[CLUSTER:$cluster][" . join ( ',' , keys % { $ cluster_hash { $ _ } - > { nodes } } ) . "][$username][$password][$vusername][$vpassword][$vcenter]" ;
}
if ( scalar @ nodes ) {
$ reqcopy - > { node } = \ @ nodes ;
2010-09-17 21:10:58 +00:00
}
2009-06-22 16:00:28 +00:00
#print "nodes=@nodes\n";
$ reqcopy - > { moreinfo } = \ @ moreinfo ;
push @ requests , $ reqcopy ;
}
return \ @ requests ;
}
sub process_request {
#$SIG{INT} = $SIG{TERM} = sub{
# foreach (keys %esx_comm_pids){
# kill 2,$_;
# }
# exit 0;
#};
my $ request = shift ;
$ output_handler = shift ;
$ executerequest = shift ;
2010-09-01 19:43:09 +00:00
if ( $ request - > { _xcat_authname } - > [ 0 ] ) {
$ requester = $ request - > { _xcat_authname } - > [ 0 ] ;
}
2009-06-22 16:00:28 +00:00
my $ level = shift ;
my $ distname = undef ;
my $ arch = undef ;
my $ path = undef ;
my $ command = $ request - > { command } - > [ 0 ] ;
#The first segment is fulfilling the role of this plugin as
#a hypervisor provisioning plugin (akin to anaconda, windows, sles plugins)
if ( $ command eq 'copycd' ) {
return copycd ( $ request , $ executerequest ) ;
2011-10-04 20:02:36 +00:00
} elsif ( $ command eq 'mkinstall' ) {
return mkinstall ( $ request , $ executerequest ) ;
2009-06-22 16:00:28 +00:00
} elsif ( $ command eq 'mknetboot' ) {
return mknetboot ( $ request , $ executerequest ) ;
}
#From here on out, code for managing guests under VMware
#Detect whether or not the VMware SDK is available on this specific system
2009-11-12 22:06:04 +00:00
unless ( $ vmwaresdkdetect ) {
$ vmwaresdkdetect = eval {
require VMware::VIRuntime ;
VMware::VIRuntime - > import ( ) ;
1 ;
} ;
}
2009-06-22 16:00:28 +00:00
unless ( $ vmwaresdkdetect ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VMWare SDK required for operation, but not installed" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
my $ moreinfo ;
my $ noderange = $ request - > { node } ;
xCAT::VMCommon:: grab_table_data ( $ noderange , \ % tablecfg , $ output_handler ) ;
my @ exargs ;
unless ( $ command ) {
return ; # Empty request
}
if ( ref ( $ request - > { arg } ) ) {
@ exargs = @ { $ request - > { arg } } ;
} else {
@ exargs = ( $ request - > { arg } ) ;
}
2012-05-22 09:00:37 +00:00
#my $sitetab = xCAT::Table->new('site');
#if($sitetab){
#(my $ref) = $sitetab->getAttribs({key => 'usehostnamesforvcenter'}, 'value');
my @ entries = xCAT::Utils - > get_site_attribute ( "usehostnamesforvcenter" ) ;
my $ t_entry = $ entries [ 0 ] ;
if ( defined ( $ t_entry ) ) {
$ usehostnamesforvcenter = $ t_entry ;
2009-12-09 21:05:35 +00:00
}
2012-05-22 09:00:37 +00:00
#($ref) = $sitetab->getAttribs({key => 'vcenterautojoin'}, 'value');
@ entries = xCAT::Utils - > get_site_attribute ( "vcenterautojoin" ) ;
$ t_entry = $ entries [ 0 ] ;
if ( defined ( $ t_entry ) ) {
$ vcenterautojoin = $ t_entry ;
if ( $ vcenterautojoin =~ /^n/ or $ vcenterautojoin =~ /^dis/ ) {
$ vcenterautojoin = 0 ;
}
2010-11-02 20:20:56 +00:00
}
2012-05-22 09:00:37 +00:00
#($ref) = $sitetab->getAttribs({key => 'vmwaredatastoreautomount'}, 'value');
@ entries = xCAT::Utils - > get_site_attribute ( "vmwaredatastoreautomount" ) ;
$ t_entry = $ entries [ 0 ] ;
if ( defined ( $ t_entry ) ) {
$ datastoreautomount = $ t_entry ;
if ( $ datastoreautomount =~ /^n/ or $ datastoreautomount =~ /^dis/ ) {
$ datastoreautomount = 0 ;
}
2011-08-17 14:13:42 +00:00
}
2012-05-22 09:00:37 +00:00
#($ref) = $sitetab->getAttribs({key => 'vmwarereconfigonpower'},'value');
@ entries = xCAT::Utils - > get_site_attribute ( "vmwarereconfigonpower" ) ;
$ t_entry = $ entries [ 0 ] ;
if ( defined ( $ t_entry ) ) {
$ reconfigreset = $ t_entry ;
if ( $ reconfigreset =~ /^(n|d)/i ) { #if no or disable, skip it
$ reconfigreset = 0 ;
}
}
2010-11-09 19:01:33 +00:00
2012-05-22 09:00:37 +00:00
# }
2009-06-22 16:00:28 +00:00
if ( $ request - > { moreinfo } ) { $ moreinfo = $ request - > { moreinfo } ; }
else { $ moreinfo = build_more_info ( $ noderange , $ output_handler ) ; }
foreach my $ info ( @$ moreinfo ) {
$ info =~ /^\[(.*?)\]\[(.*?)\]\[(.*?)\]\[(.*?)\]\[(.*?)\]\[(.*?)\]\[(.*?)\]/ ;
my $ hyp = $ 1 ;
my @ nodes = split ( ',' , $ 2 ) ;
my $ username = $ 3 ;
my $ password = $ 4 ;
2010-09-20 13:18:59 +00:00
my $ tmpvcname = $ 7 ;
my $ tmpvcuname = $ 5 ;
my $ tmpvcpass = $ 6 ;
2010-09-17 21:10:58 +00:00
if ( $ hyp =~ /^CLUSTER:/ ) { #a cluster, not a host.
$ hyp =~ s/^CLUSTER:// ;
2010-09-20 13:18:59 +00:00
$ clusterhash { $ hyp } - > { vcenter } - > { name } = $ tmpvcname ;
$ clusterhash { $ hyp } - > { vcenter } - > { username } = $ tmpvcuname ;
$ clusterhash { $ hyp } - > { vcenter } - > { password } = $ tmpvcpass ;
2010-09-17 21:10:58 +00:00
foreach ( @ nodes ) {
$ clusterhash { $ hyp } - > { nodes } - > { $ _ } = 1 ;
}
next ;
}
2010-09-20 13:18:59 +00:00
$ hyphash { $ hyp } - > { vcenter } - > { name } = $ tmpvcname ;
$ hyphash { $ hyp } - > { vcenter } - > { username } = $ tmpvcuname ;
$ hyphash { $ hyp } - > { vcenter } - > { password } = $ tmpvcpass ;
2009-06-22 16:00:28 +00:00
$ hyphash { $ hyp } - > { username } = $ username ; # $nodeid;
$ hyphash { $ hyp } - > { password } = $ password ; # $nodeid;
unless ( $ hyphash { $ hyp } - > { vcenter } - > { password } ) {
$ hyphash { $ hyp } - > { vcenter } - > { password } = "" ;
}
my $ ent ;
for ( my $ i = 0 ; $ i < @ nodes ; $ i + + ) {
2010-06-12 01:57:12 +00:00
if ( $ command eq 'rmigrate' and grep /-f/ , @ exargs ) { #offline migration,
2010-06-14 17:40:06 +00:00
$ hyphash { $ hyp } - > { offline } = 1 ; #if it is migrate and it has nodes, it is a source hypervisor apt to be offline
#this will hint to relevant code to operate under the assumption of a
#downed hypervisor source
#note this will make dangerous assumptions, it will make a very minimal attempt
#to operate normally, but really should only be called if the source is down and
#fenced (i.e. storage, network, or turned off and stateless
2010-06-12 01:57:12 +00:00
}
2009-06-22 16:00:28 +00:00
my $ node = $ nodes [ $ i ] ;
#my $nodeid = $ids[$i];
$ hyphash { $ hyp } - > { nodes } - > { $ node } = 1 ; # $nodeid;
}
}
2009-07-16 20:43:36 +00:00
my $ hyptab = xCAT::Table - > new ( 'hypervisor' , create = > 0 ) ;
if ( $ hyptab ) {
my @ hyps = keys % hyphash ;
2010-09-17 16:52:55 +00:00
$ tablecfg { hypervisor } = $ hyptab - > getNodesAttribs ( \ @ hyps , [ 'mgr' , 'netmap' , 'defaultnet' , 'cluster' , 'preferdirect' , 'datacenter' ] ) ;
2009-07-16 20:43:36 +00:00
}
2009-12-09 21:05:35 +00:00
my $ hoststab = xCAT::Table - > new ( 'hosts' , create = > 0 ) ;
if ( $ hoststab ) {
my @ hyps = keys % hyphash ;
$ tablecfg { hosts } = $ hoststab - > getNodesAttribs ( \ @ hyps , [ 'hostnames' ] ) ;
2010-09-20 13:18:59 +00:00
2009-12-09 21:05:35 +00:00
}
2009-06-22 16:00:28 +00:00
#my $children = 0;
#my $vmmaxp = 84;
#$SIG{CHLD} = sub { my $cpid; while ($cpid = waitpid(-1, WNOHANG) > 0) { delete $esx_comm_pids{$cpid}; $children--; } };
2009-10-21 16:39:17 +00:00
$ viavcenter = 0 ;
2010-02-22 20:40:42 +00:00
if ( $ command eq 'rmigrate' or $ command eq 'rmhypervisor' ) { #Only use vcenter when required, fewer prereqs
2009-06-22 16:00:28 +00:00
$ viavcenter = 1 ;
}
2011-03-09 15:20:00 +00:00
if ( $ command eq 'rmhypervisor' and grep /-f/ , @ exargs ) { #force remove of hypervisor
$ vcenterforceremove = 1 ;
}
2009-06-22 16:00:28 +00:00
my $ keytab = xCAT::Table - > new ( 'prodkey' ) ;
if ( $ keytab ) {
my @ hypes = keys % hyphash ;
$ tablecfg { prodkey } = $ keytab - > getNodesAttribs ( \ @ hypes , [ qw/product key/ ] ) ;
}
2010-08-09 18:43:26 +00:00
my $ hyp ;
my % needvcentervalidation ;
2010-09-17 21:10:58 +00:00
my $ cluster ;
foreach $ cluster ( keys % clusterhash ) {
my $ vcenter = $ clusterhash { $ cluster } - > { vcenter } - > { name } ;
unless ( $ vcenterhash { $ vcenter } - > { conn } ) {
eval {
$ vcenterhash { $ vcenter } - > { conn } = Vim - > new ( service_url = > "https://$vcenter/sdk" ) ;
$ vcenterhash { $ vcenter } - > { conn } - > login ( user_name = > $ clusterhash { $ cluster } - > { vcenter } - > { username } ,
password = > $ clusterhash { $ cluster } - > { vcenter } - > { password } ) ;
} ;
if ( $@ ) {
$ vcenterhash { $ vcenter } - > { conn } = undef ;
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to reach $vcenter vCenter server to manage cluster $cluster: $@" ] , $ output_handler ) ;
next ;
}
my $ clusternode ;
}
2010-09-20 13:18:59 +00:00
$ clusterhash { $ cluster } - > { conn } = $ vcenterhash { $ vcenter } - > { conn } ;
2010-09-17 21:10:58 +00:00
foreach my $ clusternode ( keys % { $ clusterhash { $ cluster } - > { nodes } } ) {
$ vmhash { $ clusternode } - > { conn } = $ vcenterhash { $ vcenter } - > { conn } ;
}
}
2010-08-09 18:43:26 +00:00
foreach $ hyp ( sort ( keys % hyphash ) ) {
2009-06-22 16:00:28 +00:00
#if($pid == 0){
2009-10-01 21:01:03 +00:00
if ( $ viavcenter or ( defined $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { mgr } and not $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { preferdirect } ) ) {
2010-01-14 20:32:51 +00:00
$ viavcenterbyhyp - > { $ hyp } = 1 ;
2009-10-01 14:35:41 +00:00
$ hypready { $ hyp } = 0 ; #This hypervisor requires a flag be set to signify vCenter sanenes before proceeding
2009-06-22 16:00:28 +00:00
my $ vcenter = $ hyphash { $ hyp } - > { vcenter } - > { name } ;
unless ( $ vcenterhash { $ vcenter } - > { conn } ) {
2010-01-14 20:32:51 +00:00
eval {
2009-06-22 16:00:28 +00:00
$ vcenterhash { $ vcenter } - > { conn } =
Vim - > new ( service_url = > "https://$vcenter/sdk" ) ;
$ vcenterhash { $ vcenter } - > { conn } - > login (
user_name = > $ hyphash { $ hyp } - > { vcenter } - > { username } ,
password = > $ hyphash { $ hyp } - > { vcenter } - > { password }
) ;
2010-01-14 20:32:51 +00:00
} ;
if ( $@ ) {
$ vcenterhash { $ vcenter } - > { conn } = undef ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to reach $vcenter vCenter server to manage $hyp: $@" ] , $ output_handler ) ;
2010-01-14 20:32:51 +00:00
next ;
}
2009-06-22 16:00:28 +00:00
}
2010-09-17 21:10:58 +00:00
my $ hypnode ;
foreach $ hypnode ( keys % { $ hyphash { $ hyp } - > { nodes } } ) {
$ vmhash { $ hypnode } - > { conn } = $ vcenterhash { $ hyphash { $ hyp } - > { vcenter } - > { name } } - > { conn } ;
}
2009-06-22 16:00:28 +00:00
$ hyphash { $ hyp } - > { conn } = $ vcenterhash { $ hyphash { $ hyp } - > { vcenter } - > { name } } - > { conn } ;
$ hyphash { $ hyp } - > { vcenter } - > { conn } = $ vcenterhash { $ hyphash { $ hyp } - > { vcenter } - > { name } } - > { conn } ;
2010-08-09 18:43:26 +00:00
$ needvcentervalidation { $ hyp } = $ vcenter ;
$ vcenterhash { $ vcenter } - > { allhyps } - > { $ hyp } = 1 ;
2009-06-22 16:00:28 +00:00
} else {
2010-01-14 20:32:51 +00:00
eval {
$ hyphash { $ hyp } - > { conn } = Vim - > new ( service_url = > "https://$hyp/sdk" ) ;
$ hyphash { $ hyp } - > { conn } - > login ( user_name = > $ hyphash { $ hyp } - > { username } , password = > $ hyphash { $ hyp } - > { password } ) ;
} ;
if ( $@ ) {
$ hyphash { $ hyp } - > { conn } = undef ;
2010-09-17 21:10:58 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to reach $hyp to perform operation due to $@" ] , $ output_handler ) ;
2010-01-14 20:32:51 +00:00
$ hypready { $ hyp } = - 1 ;
next ;
2010-09-17 21:10:58 +00:00
}
my $ localnode ;
foreach $ localnode ( keys % { $ hyphash { $ hyp } - > { nodes } } ) {
$ vmhash { $ localnode } - > { conn } = $ hyphash { $ hyp } - > { conn } ;
2010-01-14 20:32:51 +00:00
}
validate_licenses ( $ hyp ) ;
2009-06-22 16:00:28 +00:00
}
#}else{
# $esx_comm_pids{$pid} = 1;
#}
}
2010-08-09 18:43:26 +00:00
foreach $ hyp ( keys % needvcentervalidation ) {
my $ vcenter = $ needvcentervalidation { $ hyp } ;
if ( not defined $ vcenterhash { $ vcenter } - > { hostviews } ) {
populate_vcenter_hostviews ( $ vcenter ) ;
}
if ( validate_vcenter_prereqs ( $ hyp , \ & declare_ready , {
hyp = > $ hyp ,
vcenter = > $ vcenter
} ) eq "failed" ) {
$ hypready { $ hyp } = - 1 ;
}
}
2009-10-01 14:35:41 +00:00
while ( grep { $ _ == 0 } values % hypready ) {
wait_for_tasks ( ) ;
sleep ( 1 ) ; #We'll check back in every second. Unfortunately, we have to poll since we are in web service land
}
2010-06-12 01:57:12 +00:00
my @ badhypes ;
2009-10-01 14:35:41 +00:00
if ( grep { $ _ == - 1 } values % hypready ) {
foreach ( keys % hypready ) {
if ( $ hypready { $ _ } == - 1 ) {
2011-06-24 18:33:04 +00:00
unless ( $ hyphash { $ _ } - > { offline } ) {
2010-06-14 17:40:06 +00:00
push @ badhypes , $ _ ;
2011-06-24 18:25:46 +00:00
}
2010-06-14 17:40:06 +00:00
my @ relevant_nodes = sort ( keys % { $ hyphash { $ _ } - > { nodes } } ) ;
2012-06-23 13:45:52 +00:00
my $ sadhypervisor = $ _ ;
2010-06-14 17:40:06 +00:00
foreach ( @ relevant_nodes ) {
2012-06-23 13:45:52 +00:00
if ( $ command eq "rmigrate" and grep /-f/ , @ exargs ) { $ limbonodes { $ _ } = $ needvcentervalidation { $ sadhypervisor } ; } else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": hypervisor unreachable" ] , $ output_handler , $ _ ) ;
2011-06-24 18:29:12 +00:00
}
2012-06-23 13:45:52 +00:00
if ( $ command eq "rpower" and grep /stat/ , @ exargs ) { $ limbonodes { $ _ } = $ needvcentervalidation { $ sadhypervisor } ; } #try to stat power anyway through vcenter of interest...
2010-06-14 17:40:06 +00:00
}
delete $ hyphash { $ _ } ;
2009-10-01 14:35:41 +00:00
}
}
2010-06-14 17:40:06 +00:00
if ( @ badhypes ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": The following hypervisors failed to become ready for the operation: " . join ( ',' , @ badhypes ) ] , $ output_handler ) ;
2010-06-14 17:40:06 +00:00
}
2009-10-01 14:35:41 +00:00
}
2009-06-22 16:00:28 +00:00
do_cmd ( $ command , @ exargs ) ;
2010-06-12 01:57:12 +00:00
foreach ( @ badhypes ) { delete $ hyphash { $ _ } ; }
2010-09-17 21:10:58 +00:00
foreach my $ vm ( sort ( keys % vmhash ) ) {
$ vmhash { $ vm } - > { conn } - > logout ( ) ;
2009-11-12 19:48:20 +00:00
}
2009-06-22 16:00:28 +00:00
}
sub validate_licenses {
my $ hyp = shift ;
2010-09-17 21:10:58 +00:00
my $ conn = $ hyphash { $ hyp } - > { conn } ; #This can't possibly be called via a cluster stack, so hyphash is appropriate here
2009-06-22 16:00:28 +00:00
unless ( $ tablecfg { prodkey } - > { $ hyp } ) { #if no license specified, no-op
return ;
}
my $ hv = get_hostview ( hypname = > $ hyp , conn = > $ conn , properties = > [ 'configManager' , 'name' ] ) ;
my $ lm = $ conn - > get_view ( mo_ref = > $ hv - > configManager - > licenseManager ) ;
my @ licenses ;
foreach ( @ { $ lm - > licenses } ) {
push @ licenses , uc ( $ _ - > licenseKey ) ;
}
my @ newlicenses ;
foreach ( @ { $ tablecfg { prodkey } - > { $ hyp } } ) {
2010-07-09 06:24:18 +00:00
if ( defined ( $ _ - > { product } ) and $ _ - > { product } eq 'esx' ) {
2009-06-22 16:00:28 +00:00
my $ key = uc ( $ _ - > { key } ) ;
unless ( grep /$key/ , @ licenses ) {
push @ newlicenses , $ key ;
}
}
}
foreach ( @ newlicenses ) {
$ lm - > UpdateLicense ( licenseKey = > $ _ ) ;
}
}
sub do_cmd {
my $ command = shift ;
my @ exargs = @ _ ;
if ( $ command eq 'rpower' ) {
2010-08-09 21:02:17 +00:00
generic_vm_operation ( [ 'config.name' , 'config.guestId' , 'config.hardware.memoryMB' , 'config.hardware.numCPU' , 'runtime.powerState' , 'runtime.host' ] , \ & power , @ exargs ) ;
2009-06-22 16:00:28 +00:00
} elsif ( $ command eq 'rmvm' ) {
2009-10-21 16:39:17 +00:00
generic_vm_operation ( [ 'config.name' , 'runtime.powerState' , 'runtime.host' ] , \ & rmvm , @ exargs ) ;
2009-06-22 16:00:28 +00:00
} elsif ( $ command eq 'rsetboot' ) {
2009-10-21 16:39:17 +00:00
generic_vm_operation ( [ 'config.name' , 'runtime.host' ] , \ & setboot , @ exargs ) ;
2010-06-04 19:15:18 +00:00
} elsif ( $ command eq 'rinv' ) {
2011-06-13 17:30:48 +00:00
generic_vm_operation ( [ 'config.name' , 'config' , 'runtime.host' , 'layoutEx' ] , \ & inv , @ exargs ) ;
2010-02-22 20:40:42 +00:00
} elsif ( $ command eq 'rmhypervisor' ) {
generic_hyp_operation ( \ & rmhypervisor , @ exargs ) ;
2010-09-17 13:02:26 +00:00
} elsif ( $ command eq 'rshutdown' ) {
generic_hyp_operation ( \ & rshutdown , @ exargs ) ;
2011-06-09 20:59:43 +00:00
} elsif ( $ command eq 'chhypervisor' ) {
generic_hyp_operation ( \ & chhypervisor , @ exargs ) ;
2010-06-08 16:54:13 +00:00
} elsif ( $ command eq 'lsvm' ) {
generic_hyp_operation ( \ & lsvm , @ exargs ) ;
2010-08-31 20:53:55 +00:00
} elsif ( $ command eq 'clonevm' ) {
generic_hyp_operation ( \ & clonevms , @ exargs ) ;
2009-06-22 16:00:28 +00:00
} elsif ( $ command eq 'mkvm' ) {
generic_hyp_operation ( \ & mkvms , @ exargs ) ;
2010-06-07 19:44:48 +00:00
} elsif ( $ command eq 'chvm' ) {
generic_vm_operation ( [ 'config.name' , 'config' , 'runtime.host' ] , \ & chvm , @ exargs ) ;
#generic_hyp_operation(\&chvm,@exargs);
2009-06-22 16:00:28 +00:00
} elsif ( $ command eq 'rmigrate' ) { #Technically, on a host view, but vcenter path is 'weirder'
generic_hyp_operation ( \ & migrate , @ exargs ) ;
}
wait_for_tasks ( ) ;
2011-05-01 13:10:25 +00:00
if ( $ command eq 'clonevm' ) { #TODO: unconditional, remove mkvms hosted copy
my @ dhcpnodes ;
foreach ( keys % { $ tablecfg { dhcpneeded } } ) {
push @ dhcpnodes , $ _ ;
delete $ tablecfg { dhcpneeded } - > { $ _ } ;
}
unless ( $ ::XCATSITEVALS { 'dhcpsetup' } and ( $ ::XCATSITEVALS { 'dhcpsetup' } =~ /^n/i or $ ::XCATSITEVALS { 'dhcpsetup' } =~ /^d/i or $ ::XCATSITEVALS { 'dhcpsetup' } eq '0' ) ) {
$ executerequest - > ( { command = > [ 'makedhcp' ] , node = > \ @ dhcpnodes } ) ;
}
}
2009-06-22 16:00:28 +00:00
}
2010-06-07 19:44:48 +00:00
#inventory request for esx
2010-06-04 19:15:18 +00:00
sub inv {
2010-06-07 19:44:48 +00:00
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
if ( not defined $ args { vmview } ) { #attempt one refresh
2010-09-17 21:10:58 +00:00
$ args { vmview } = $ vmhash { $ node } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' , 'runtime.powerState' ] , filter = > { name = > $ node } ) ;
2010-07-29 20:03:05 +00:00
if ( not defined $ args { vmview } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM does not appear to exist" ] , $ output_handler , $ node ) ;
2010-07-29 20:03:05 +00:00
return ;
}
2010-06-07 19:44:48 +00:00
}
2011-04-08 13:36:56 +00:00
if ( not $ args { vmview } - > { config } ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM is in an invalid state" ] , $ output_handler , $ node ) ;
return ;
}
2010-11-11 20:49:49 +00:00
@ ARGV = @ { $ args { exargs } } ;
require Getopt::Long ;
my $ tableUpdate ;
my $ rc = GetOptions (
't' = > \ $ tableUpdate ,
) ;
$ SIG { __WARN__ } = 'DEFAULT' ;
2011-02-25 23:31:19 +00:00
if ( @ ARGV > 1 ) {
2010-11-11 20:49:49 +00:00
xCAT::SvrUtils:: sendmsg ( "Invalid arguments: @ARGV" , $ output_handler ) ;
return ;
}
if ( ! $ rc ) {
return ;
}
2010-06-07 19:44:48 +00:00
my $ vmview = $ args { vmview } ;
2010-11-04 14:28:48 +00:00
my $ moref = $ vmview - > { mo_ref } - > value ;
xCAT::SvrUtils:: sendmsg ( "Managed Object Reference: $moref" , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
my $ uuid = $ vmview - > config - > uuid ;
2012-03-06 20:17:47 +00:00
$ uuid =~ s/(..)(..)(..)(..)-(..)(..)-(..)(..)/$4$3$2$1-$6$5-$8$7/ ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "UUID/GUID: $uuid" , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
my $ cpuCount = $ vmview - > config - > hardware - > numCPU ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "CPUs: $cpuCount" , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
my $ memory = $ vmview - > config - > hardware - > memoryMB ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "Memory: $memory MB" , $ output_handler , $ node ) ;
2011-06-13 17:30:48 +00:00
my % updatehash = ( cpus = > $ cpuCount , memory = > $ memory ) ;
2010-11-11 20:49:49 +00:00
2010-06-07 19:44:48 +00:00
my $ devices = $ vmview - > config - > hardware - > device ;
my $ label ;
my $ size ;
my $ fileName ;
my $ device ;
2011-06-13 17:30:48 +00:00
if ( $ tableUpdate and $ hyp ) {
validate_datastore_prereqs ( [ $ node ] , $ hyp ) ; #need datastoremaps to verify names...
}
my % vmstorageurls ;
2010-06-07 19:44:48 +00:00
foreach $ device ( @$ devices ) {
$ label = $ device - > deviceInfo - > label ;
if ( $ label =~ /^Hard disk/ ) {
2010-08-26 19:23:25 +00:00
$ label . = " (d" . $ device - > controllerKey . ":" . $ device - > unitNumber . ")" ;
2010-06-07 19:44:48 +00:00
$ size = $ device - > capacityInKB / 1024 ;
$ fileName = $ device - > backing - > fileName ;
2010-08-26 19:23:25 +00:00
$ output_handler - > ( {
node = > {
name = > $ node ,
data = > {
desc = > $ label ,
contents = > "$size MB @ $fileName"
}
}
} ) ;
2011-06-13 17:30:48 +00:00
#if ($tableUpdate) {
# $fileName =~ /\[([^\]]+)\]/;
# $vmstorageurls{$hyphash{$hyp}->{datastoreurlmap}->{$1}}=1;
#}
2010-06-08 20:15:34 +00:00
} elsif ( $ label =~ /Network/ ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "$label: " . $ device - > macAddress , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
}
}
2011-06-13 17:30:48 +00:00
if ( $ tableUpdate ) {
my $ cfgdatastore ;
foreach ( @ { $ vmview - > layoutEx - > file } ) {
2012-02-21 19:53:36 +00:00
#TODO, track ALL layoutEx->file....
2011-06-13 17:30:48 +00:00
if ( $ _ - > type eq 'config' ) {
$ _ - > name =~ /\[([^\]]+)\]/ ;
$ cfgdatastore = $ hyphash { $ hyp } - > { datastoreurlmap } - > { $ 1 } ;
last ;
}
}
my $ cfgkey ;
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cfgstore } ) { #check the config file explicitly, ignore the rest
$ cfgkey = 'cfgstore' ;
} elsif ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storage } ) { #check the config file explicitly, ignore the rest
$ cfgkey = 'storage' ;
}
2012-02-21 19:53:36 +00:00
my $ configuration = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { $ cfgkey } ; #TODO: prune urls that map to no layoutEx->file entries anymore
my $ configappend = $ configuration ;
$ configappend =~ s/^[^,=]*// ;
2011-06-13 17:30:48 +00:00
$ tablecfg { vm } - > { $ node } - > [ 0 ] - > { $ cfgkey } =~ m !nfs://([^/]+)/! ;
my $ tablecfgserver = $ 1 ;
my $ cfgserver = inet_aton ( $ tablecfgserver ) ;
if ( $ cfgserver ) {
$ cfgserver = inet_ntoa ( $ cfgserver ) ; #get the IP address (TODO: really need to wrap getaddrinfo this handily...
my $ cfgurl = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { $ cfgkey } ;
$ cfgurl =~ s/$tablecfgserver/$cfgserver/ ;
if ( $ cfgurl ne $ cfgdatastore ) {
2012-02-21 19:53:36 +00:00
$ updatehash { $ cfgkey } = $ cfgdatastore . $ configappend ;
2011-06-13 17:30:48 +00:00
}
}
}
if ( $ tableUpdate ) {
my $ vm = xCAT::Table - > new ( 'vm' , - create = > 1 ) ;
$ vm - > setNodeAttribs ( $ node , \ % updatehash ) ;
}
2010-06-04 19:15:18 +00:00
}
2010-06-07 19:44:48 +00:00
#changes the memory, number of cpus and device size
2010-06-08 20:56:51 +00:00
#can also add,resize and remove disks
2010-06-04 19:15:18 +00:00
sub chvm {
2010-06-07 19:44:48 +00:00
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
if ( not defined $ args { vmview } ) { #attempt one refresh
2010-09-17 21:10:58 +00:00
$ args { vmview } = $ vmhash { $ node } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' ,
2010-06-07 19:44:48 +00:00
properties = > [ 'config.name' , 'runtime.powerState' ] ,
filter = > { name = > $ node } ) ;
2010-07-29 20:03:05 +00:00
if ( not defined $ args { vmview } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM does not appear to exist" ] , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
return ;
2010-07-29 20:03:05 +00:00
}
}
2010-06-07 19:44:48 +00:00
@ ARGV = @ { $ args { exargs } } ;
my @ deregister ;
my @ purge ;
my @ add ;
my % resize ;
my $ cpuCount ;
my $ memory ;
my $ vmview = $ args { vmview } ;
require Getopt::Long ;
$ SIG { __WARN__ } = sub {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not parse options, " . shift ( ) ] , $ output_handler ) ;
2010-06-07 19:44:48 +00:00
} ;
2011-02-08 21:13:19 +00:00
my @ otherparams ;
2011-05-10 16:08:49 +00:00
my $ cdrom ;
my $ eject ;
2010-06-07 19:44:48 +00:00
my $ rc = GetOptions (
"d=s" = > \ @ deregister ,
"p=s" = > \ @ purge ,
"a=s" = > \ @ add ,
2011-02-08 21:13:19 +00:00
"o=s" = > \ @ otherparams ,
2010-06-07 19:44:48 +00:00
"resize=s%" = > \ % resize ,
2011-05-10 16:08:49 +00:00
"optical|cdrom|c=s" = > \ $ cdrom ,
"eject" = > \ $ eject ,
2010-06-08 20:38:44 +00:00
"cpus=s" = > \ $ cpuCount ,
2010-06-07 19:44:48 +00:00
"mem=s" = > \ $ memory
) ;
$ SIG { __WARN__ } = 'DEFAULT' ;
if ( @ ARGV ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "Invalid arguments: @ARGV" , $ output_handler ) ;
2010-06-07 19:44:48 +00:00
return ;
}
if ( ! $ rc ) {
return ;
}
#use Data::Dumper;
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("dereg = ".Dumper(\@deregister));
#xCAT::SvrUtils::sendmsg("purge = ".Dumper(\@purge));
#xCAT::SvrUtils::sendmsg("add = ".Dumper(\@add));
#xCAT::SvrUtils::sendmsg("resize = ".Dumper(\%resize));
#xCAT::SvrUtils::sendmsg("cpus = $cpuCount");
#xCAT::SvrUtils::sendmsg("mem = ".getUnits($memory,"K",1024));
2010-06-07 19:44:48 +00:00
2010-06-04 19:15:18 +00:00
2010-06-07 19:44:48 +00:00
my % conargs ;
if ( $ cpuCount ) {
2010-06-08 20:38:44 +00:00
if ( $ cpuCount =~ /^\+(\d+)/ ) {
$ cpuCount = $ vmview - > config - > hardware - > numCPU + $ 1 ;
} elsif ( $ cpuCount =~ /^-(\d+)/ ) {
$ cpuCount = $ vmview - > config - > hardware - > numCPU - $ 1 ;
}
2010-06-07 19:44:48 +00:00
$ conargs { numCPUs } = $ cpuCount ;
}
if ( $ memory ) {
2010-06-08 20:38:44 +00:00
if ( $ memory =~ /^\+(.+)/ ) {
$ conargs { memoryMB } = $ vmview - > config - > hardware - > memoryMB + getUnits ( $ 1 , "M" , 1048576 ) ;
} elsif ( $ memory =~ /^-(\d+)/ ) {
$ conargs { memoryMB } = $ vmview - > config - > hardware - > memoryMB - getUnits ( $ 1 , "M" , 1048576 ) ;
} else {
$ conargs { memoryMB } = getUnits ( $ memory , "M" , 1048576 ) ;
}
2010-06-07 19:44:48 +00:00
}
my $ disk ;
my $ devices = $ vmview - > config - > hardware - > device ;
my $ label ;
my $ device ;
my $ cmdLabel ;
my $ newSize ;
my @ devChanges ;
if ( @ deregister ) {
for $ disk ( @ deregister ) {
$ device = getDiskByLabel ( $ disk , $ devices ) ;
unless ( $ device ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Disk: $disk does not exist" ] , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
return ;
}
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg(Dumper($device));
2010-06-07 19:44:48 +00:00
push @ devChanges , VirtualDeviceConfigSpec - > new (
device = > $ device ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'remove' ) ) ;
}
}
if ( @ purge ) {
for $ disk ( @ purge ) {
$ device = getDiskByLabel ( $ disk , $ devices ) ;
unless ( $ device ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Disk: $disk does not exist" ] , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
return ;
}
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg(Dumper($device));
2010-06-07 19:44:48 +00:00
push @ devChanges , VirtualDeviceConfigSpec - > new (
device = > $ device ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'remove' ) ,
fileOperation = > VirtualDeviceConfigSpecFileOperation - > new ( 'destroy' ) ) ;
}
}
if ( @ add ) {
my $ addSizes = join ( ',' , @ add ) ;
2010-06-07 21:24:22 +00:00
my $ scsiCont ;
my $ scsiUnit ;
my $ ideCont ;
my $ ideUnit ;
my $ label ;
2010-08-26 19:23:25 +00:00
my $ idefull = 0 ;
my $ scsifull = 0 ;
2010-06-07 21:24:22 +00:00
foreach $ device ( @$ devices ) {
$ label = $ device - > deviceInfo - > label ;
if ( $ label =~ /^SCSI controller/ ) {
2010-08-26 19:23:25 +00:00
my $ tmpu = getAvailUnit ( $ device - > { key } , $ devices , maxnum = > 15 ) ;
if ( $ tmpu > 0 ) {
$ scsiCont = $ device ;
$ scsiUnit = $ tmpu ;
} else {
$ scsifull = 1 ;
}
#ignore scsiControllers that are full, problem still remains if trying to add across two controllers in one go
}
if ( $ label =~ /^IDE/ and not $ ideCont ) {
my $ tmpu = getAvailUnit ( $ device - > { key } , $ devices , maxnum = > 1 ) ;
if ( $ tmpu >= 0 ) {
$ ideCont = $ device ;
$ ideUnit = $ tmpu ;
} elsif ( $ device - > { key } == 201 ) {
$ idefull = 1 ;
}
}
2010-06-07 21:24:22 +00:00
}
2010-08-26 19:23:25 +00:00
unless ( $ hyphash { $ hyp } - > { datastoremap } ) { validate_datastore_prereqs ( [] , $ hyp ) ; }
push @ devChanges , create_storage_devs ( $ node , $ hyphash { $ hyp } - > { datastoremap } , $ addSizes , $ scsiCont , $ scsiUnit , $ ideCont , $ ideUnit , $ devices , idefull = > $ idefull , scsifull = > $ scsifull ) ;
}
2010-06-07 19:44:48 +00:00
2011-05-10 16:08:49 +00:00
if ( $ cdrom or $ eject ) {
my $ opticalbackingif ;
my $ opticalconnectable ;
if ( $ cdrom ) {
my $ storageurl ;
if ( $ cdrom =~ m !://! ) {
$ storageurl = $ cdrom ;
$ storageurl =~ s!/[^/]*\z!! ;
unless ( validate_datastore_prereqs ( [] , $ hyp , { $ storageurl = > [ $ node ] } ) ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to find/mount datastore holding $cdrom" ] , $ output_handler , $ node ) ;
return ;
}
$ cdrom =~ s!.*/!! ;
} else {
$ storageurl = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storage } ;
$ storageurl =~ s/=.*// ;
$ storageurl =~ s/.*,// ;
$ storageurl =~ s/\/\z// ;
}
$ opticalbackingif = VirtualCdromIsoBackingInfo - > new ( fileName = > "[" . $ hyphash { $ hyp } - > { datastoremap } - > { $ storageurl } . "] $cdrom" ) ;
$ opticalconnectable = VirtualDeviceConnectInfo - > new ( startConnected = > 1 , allowGuestControl = > 1 , connected = > 1 ) ;
} elsif ( $ eject ) {
$ opticalbackingif = VirtualCdromRemoteAtapiBackingInfo - > new ( deviceName = > "" ) ;
$ opticalconnectable = VirtualDeviceConnectInfo - > new ( startConnected = > 0 , allowGuestControl = > 1 , connected = > 0 ) ;
}
my $ oldcd ;
foreach my $ dev ( @$ devices ) {
if ( $ dev - > deviceInfo - > label eq "CD/DVD drive 1" ) {
$ oldcd = $ dev ;
last ;
}
}
unless ( $ oldcd ) {
if ( $ cdrom ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to find Optical drive in VM to insert ISO image" ] , $ output_handler , $ node ) ;
} else {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to find Optical drive in VM to perform eject" ] , $ output_handler , $ node ) ;
}
return ;
}
my $ newDevice = VirtualCdrom - > new ( backing = > $ opticalbackingif ,
key = > $ oldcd - > key ,
controllerKey = > 201 ,
unitNumber = > 0 ,
connectable = > $ opticalconnectable ,
) ;
push @ devChanges , VirtualDeviceConfigSpec - > new (
device = > $ newDevice ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'edit' ) ) ;
}
2010-06-07 19:44:48 +00:00
if ( % resize ) {
while ( my ( $ key , $ value ) = each ( % resize ) ) {
my @ drives = split ( /,/ , $ key ) ;
for my $ device ( @ drives ) {
my $ disk = $ device ;
$ device = getDiskByLabel ( $ disk , $ devices ) ;
unless ( $ device ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Disk: $disk does not exist" ] , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
return ;
}
2010-06-08 20:38:44 +00:00
if ( $ value =~ /^\+(.+)/ ) {
$ value = $ device - > capacityInKB + getUnits ( $ 1 , "G" , 1024 ) ;
} else {
$ value = getUnits ( $ value , "G" , 1024 ) ;
}
2010-06-07 19:44:48 +00:00
my $ newDevice = VirtualDisk - > new ( deviceInfo = > $ device - > deviceInfo ,
key = > $ device - > key ,
controllerKey = > $ device - > controllerKey ,
unitNumber = > $ device - > unitNumber ,
deviceInfo = > $ device - > deviceInfo ,
backing = > $ device - > backing ,
capacityInKB = > $ value ) ;
push @ devChanges , VirtualDeviceConfigSpec - > new (
device = > $ newDevice ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'edit' ) ) ;
}
}
}
if ( @ devChanges ) {
$ conargs { deviceChange } = \ @ devChanges ;
}
2011-02-08 21:13:19 +00:00
if ( @ otherparams ) {
my $ key ;
my $ value ;
my @ optionvals ;
foreach ( @ otherparams ) {
( $ key , $ value ) = split /=/ ;
unless ( $ key ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Invalid format for other parameter specification" ] , $ output_handler , $ node ) ;
return ;
}
if ( $ value ) {
push @ optionvals , OptionValue - > new ( key = > $ key , value = > $ value ) ;
} else {
push @ optionvals , OptionValue - > new ( key = > $ key ) ; #the api doc says this is *supposed* to delete a key, don't think it works though, e.g. http://communities.vmware.com/message/1602644
}
}
$ conargs { extraConfig } = \ @ optionvals ;
}
2010-06-07 19:44:48 +00:00
my $ reconfigspec = VirtualMachineConfigSpec - > new ( % conargs ) ;
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("reconfigspec = ".Dumper($reconfigspec));
2010-06-07 19:44:48 +00:00
my $ task = $ vmview - > ReconfigVM_Task ( spec = > $ reconfigspec ) ;
$ running_tasks { $ task } - > { task } = $ task ;
2010-09-17 14:07:23 +00:00
$ running_tasks { $ task } - > { callback } = \ & chvm_task_callback ;
2010-06-07 19:44:48 +00:00
$ running_tasks { $ task } - > { hyp } = $ hyp ;
2010-09-17 14:07:23 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > "node successfully changed" , cpus = > $ cpuCount , mem = > $ memory } ;
2010-06-07 19:44:48 +00:00
}
2010-06-17 21:10:39 +00:00
sub getUsedUnits {
my $ contKey = shift ;
my $ devices = shift ;
my % usedids ;
$ usedids { 7 } = 1 ;
$ usedids { '7' } = 1 ; #TODO: figure out which of these is redundant, the string or the number variant
for my $ device ( @$ devices ) {
if ( $ device - > { controllerKey } eq $ contKey ) {
$ usedids { $ device - > { unitNumber } } = 1 ;
}
}
return \ % usedids ;
}
2010-06-08 21:07:19 +00:00
sub getAvailUnit {
2010-06-07 21:24:22 +00:00
my $ contKey = shift ;
my $ devices = shift ;
2010-08-26 19:23:25 +00:00
my % args = @ _ ;
my $ maxunit = - 1 ;
if ( defined $ args { maxnum } ) {
$ maxunit = $ args { maxnum } ;
}
2010-06-08 21:07:19 +00:00
my % usedids ;
$ usedids { 7 } = 1 ;
$ usedids { '7' } = 1 ; #TODO: figure out which of these is redundant, the string or the number variant
2010-06-07 21:24:22 +00:00
for my $ device ( @$ devices ) {
2010-06-08 21:07:19 +00:00
if ( $ device - > { controllerKey } eq $ contKey ) {
$ usedids { $ device - > { unitNumber } } = 1 ;
2010-06-07 21:24:22 +00:00
}
}
2010-06-08 21:07:19 +00:00
my $ highestUnit = 0 ;
while ( $ usedids { $ highestUnit } ) {
2010-08-26 19:23:25 +00:00
if ( $ highestUnit == $ maxunit ) {
return - 1 ;
}
2010-06-08 21:07:19 +00:00
$ highestUnit + + ;
}
2010-06-07 21:24:22 +00:00
return $ highestUnit ;
}
2010-06-07 19:44:48 +00:00
#given a device list from a vm and a label for a hard disk, returns the device object
sub getDiskByLabel {
my $ cmdLabel = shift ;
my $ devices = shift ;
my $ device ;
my $ label ;
$ cmdLabel = commandLabel ( $ cmdLabel ) ;
foreach $ device ( @$ devices ) {
$ label = $ device - > deviceInfo - > label ;
if ( $ cmdLabel eq $ label ) {
return $ device ;
2010-08-26 19:23:25 +00:00
} elsif ( ( $ label =~ /^Hard disk/ ) and ( $ cmdLabel =~ /^d(.*)/ ) ) {
my $ desc = $ 1 ;
if ( $ desc =~ /(.*):(.*)/ ) { #specific
my $ controller = $ 1 ;
my $ unit = $ 2 ;
if ( $ device - > unitNumber == $ unit and $ device - > controllerKey == $ controller ) {
return $ device ;
}
} elsif ( $ desc =~ /\d+/ and $ device - > unitNumber == $ desc ) { #not specific
2010-06-08 20:56:51 +00:00
return $ device ;
}
2010-06-07 19:44:48 +00:00
}
2010-06-08 20:56:51 +00:00
2010-06-07 19:44:48 +00:00
}
return undef ;
}
#takes a label for a hard disk and prepends "Hard disk " if it's not there already
sub commandLabel {
my $ label = shift ;
2010-06-08 20:56:51 +00:00
if ( ( $ label =~ /^Hard disk/ ) or ( $ label =~ /^d\d+/ ) ) {
2010-06-07 19:44:48 +00:00
return $ label ;
}
return "Hard disk " . $ label ;
2010-06-04 19:15:18 +00:00
}
2009-06-22 16:00:28 +00:00
#this function will check pending task status
sub process_tasks {
foreach ( keys % running_tasks ) {
my $ curcon ;
if ( defined $ running_tasks { $ _ } - > { conn } ) {
$ curcon = $ running_tasks { $ _ } - > { conn } ;
2010-09-17 21:10:58 +00:00
} elsif ( $ running_tasks { $ _ } - > { hyp } ) {
$ curcon = $ hyphash { $ running_tasks { $ _ } - > { hyp } } - > { conn } ;
} elsif ( $ running_tasks { $ _ } - > { vm } ) {
$ curcon = $ vmhash { $ running_tasks { $ _ } - > { vm } } - > { conn } ;
2010-09-20 19:06:42 +00:00
} elsif ( $ running_tasks { $ _ } - > { cluster } ) {
$ curcon = $ clusterhash { $ running_tasks { $ _ } - > { cluster } } - > { conn } ;
2009-06-22 16:00:28 +00:00
} else {
2010-09-17 21:10:58 +00:00
use Carp qw/confess/ ;
confess "This stack trace indicates a cluster unfriendly path" ;
2009-06-22 16:00:28 +00:00
}
my $ curt = $ curcon - > get_view ( mo_ref = > $ running_tasks { $ _ } - > { task } ) ;
my $ state = $ curt - > info - > state - > val ;
unless ( $ state eq 'running' or $ state eq 'queued' ) {
$ running_tasks { $ _ } - > { callback } - > ( $ curt , $ running_tasks { $ _ } - > { data } ) ;
delete $ running_tasks { $ _ } ;
}
2010-03-19 20:03:48 +00:00
if ( $ state eq 'running' and not $ running_tasks { $ _ } - > { questionasked } ) { # and $curt->info->progress == 95) { #This is unfortunate, there should be a 'state' to indicate a question is blocking
#however there isn't, so if we see something running at 95%, we just manually see if a question blocked the rest
2010-07-09 18:18:15 +00:00
my $ vm ;
$@ = "" ;
eval {
$ vm = $ curcon - > get_view ( mo_ref = > $ curt - > info - > entity ) ;
} ;
if ( $@ ) { $ vm = 0 ; }
if ( $ vm and $ vm - > { summary } and $ vm - > summary - > { runtime } and $ vm - > summary - > runtime - > { question } and $ vm - > summary - > runtime - > question ) {
2010-03-19 20:03:48 +00:00
$ running_tasks { $ _ } - > { questionasked } = 1 ;
$ running_tasks { $ _ } - > { callback } - > ( $ curt , $ running_tasks { $ _ } - > { data } , $ vm - > summary - > runtime - > question , $ vm ) ;
}
}
2009-06-22 16:00:28 +00:00
}
}
#this function is a barrier to ensure prerequisites are met
sub wait_for_tasks {
while ( scalar keys % running_tasks ) {
process_tasks ;
sleep ( 1 ) ; #We'll check back in every second. Unfortunately, we have to poll since we are in web service land
}
}
sub connecthost_callback {
my $ task = shift ;
my $ args = shift ;
my $ hv = $ args - > { hostview } ;
my $ state = $ task - > info - > state - > val ;
if ( $ state eq "success" ) {
2009-10-01 14:35:41 +00:00
$ hypready { $ args - > { hypname } } = 1 ; #declare readiness
enable_vmotion ( hypname = > $ args - > { hypname } , hostview = > $ args - > { hostview } , conn = > $ args - > { conn } ) ;
2010-08-09 15:12:05 +00:00
$ vcenterhash { $ args - > { vcenter } } - > { goodhyps } - > { $ args - > { hypname } } = 1 ;
2009-06-22 16:00:28 +00:00
if ( defined $ args - > { depfun } ) { #If a function is waiting for the host connect to go valid, call it
$ args - > { depfun } - > ( $ args - > { depargs } ) ;
}
return ;
}
my $ thumbprint ;
eval {
$ thumbprint = $ task - > { info } - > error - > fault - > thumbprint ;
} ;
2009-10-01 14:35:41 +00:00
if ( $ thumbprint ) { #was an unknown certificate error, retry and accept the unknown certificate
2009-06-22 16:00:28 +00:00
$ args - > { connspec } - > { sslThumbprint } = $ task - > info - > error - > fault - > thumbprint ;
my $ task ;
if ( defined $ args - > { hostview } ) { #It was a reconnect request
$ task = $ hv - > ReconnectHost_Task ( cnxSpec = > $ args - > { connspec } ) ;
} elsif ( defined $ args - > { foldview } ) { #was an add host request
$ task = $ args - > { foldview } - > AddStandaloneHost_Task ( spec = > $ args - > { connspec } , addConnected = > 1 ) ;
2009-10-01 14:35:41 +00:00
} elsif ( defined $ args - > { cluster } ) { #was an add host to cluster request
$ task = $ args - > { cluster } - > AddHost_Task ( spec = > $ args - > { connspec } , asConnected = > 1 ) ;
2009-06-22 16:00:28 +00:00
}
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & connecthost_callback ;
$ running_tasks { $ task } - > { conn } = $ args - > { conn } ;
$ running_tasks { $ task } - > { data } = $ args ; #{ conn_spec=>$connspec,hostview=>$hv,hypname=>$args->{hypname},vcenter=>$args->{vcenter} };
} elsif ( $ state eq 'error' ) {
my $ error = $ task - > info - > error - > localizedMessage ;
if ( defined ( $ task - > info - > error - > fault - > faultMessage ) ) { #Only in 4.0, support of 3.5 must be careful?
foreach ( @ { $ task - > info - > error - > fault - > faultMessage } ) {
$ error . = $ _ - > message ;
}
}
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , $ error ] , $ output_handler ) ; #,$node);
2009-10-01 14:35:41 +00:00
$ hypready { $ args - > { hypname } } = - 1 ; #Impossible for this hypervisor to ever be ready
2010-08-09 15:12:05 +00:00
$ vcenterhash { $ args - > { vcenter } } - > { badhyps } - > { $ args - > { hypname } } = 1 ;
2009-06-22 16:00:28 +00:00
}
}
2011-03-09 15:20:00 +00:00
sub delhost_callback { #only called in rmhypervisor -f case during validate vcenter phase
my $ task = shift ;
my $ args = shift ;
my $ hv = $ args - > { hostview } ;
my $ state = $ task - > info - > state - > val ;
if ( $ state eq "success" ) {
xCAT::SvrUtils:: sendmsg ( "removed" , $ output_handler , $ args - > { hypname } ) ;
$ hypready { $ args - > { hypname } } = - 1 ; #Impossible for this hypervisor to ever be ready
$ vcenterhash { $ args - > { vcenter } } - > { badhyps } - > { $ args - > { hypname } } = 1 ;
} elsif ( $ state eq 'error' ) {
my $ error = $ task - > info - > error - > localizedMessage ;
if ( defined ( $ task - > info - > error - > fault - > faultMessage ) ) { #Only in 4.0, support of 3.5 must be careful?
foreach ( @ { $ task - > info - > error - > fault - > faultMessage } ) {
$ error . = $ _ - > message ;
}
}
xCAT::SvrUtils:: sendmsg ( [ 1 , $ error ] , $ output_handler ) ; #,$node);
$ hypready { $ args - > { hypname } } = - 1 ; #Impossible for this hypervisor to ever be ready
$ vcenterhash { $ args - > { vcenter } } - > { badhyps } - > { $ args - > { hypname } } = 1 ;
}
}
2009-09-30 21:14:24 +00:00
sub get_clusterview {
my % args = @ _ ;
my $ clustname = $ args { clustname } ;
my % subargs = (
view_type = > 'ClusterComputeResource' ,
) ;
if ( $ args { properties } ) {
$ subargs { properties } = $ args { properties } ;
}
2010-07-07 19:32:04 +00:00
$ subargs { filter } = { name = > $ clustname } ;
my $ view = $ args { conn } - > find_entity_view ( % subargs ) ;
return $ view ;
#foreach (@{$args{conn}->find_entity_views(%subargs)}) {
# if ($_->name eq "$clustname") {
# return $_;
# last;
# }
#}
2009-09-30 21:14:24 +00:00
}
2009-06-22 16:00:28 +00:00
sub get_hostview {
my % args = @ _ ;
my $ host = $ args { hypname } ;
my % subargs = (
view_type = > 'HostSystem' ,
) ;
if ( $ args { properties } ) {
$ subargs { properties } = $ args { properties } ;
}
2009-12-09 21:05:35 +00:00
my @ addrs = gethostbyname ( $ host ) ;
2010-02-22 20:40:42 +00:00
my $ ip ;
my $ name ;
my $ aliases ;
if ( $ addrs [ 4 ] ) {
$ ip = inet_ntoa ( $ addrs [ 4 ] ) ;
( $ name , $ aliases ) = gethostbyaddr ( $ addrs [ 4 ] , AF_INET ) ; #TODO: IPv6
} else {
( $ ip , $ name , $ aliases ) = ( $ host , $ host , "" ) ;
}
2009-12-09 21:05:35 +00:00
my @ matchvalues = ( $ host , $ ip , $ name ) ;
foreach ( split /\s+/ , $ aliases ) {
push @ matchvalues , $ _ ;
}
2010-07-07 19:13:40 +00:00
my $ view ;
2010-07-09 17:02:25 +00:00
$ subargs { filter } = { 'name' = > qr/$host(?:\.|\z)/ } ;
2010-07-07 19:13:40 +00:00
$ view = $ args { conn } - > find_entity_view ( % subargs ) ;
if ( $ view ) { return $ view ; }
foreach ( @ matchvalues ) {
2010-07-09 17:02:25 +00:00
$ subargs { filter } = { 'name' = > qr/$_(?:\.|\z)/ } ;
2010-07-07 19:13:40 +00:00
$ view = $ args { conn } - > find_entity_view ( % subargs ) ;
if ( $ view ) { return $ view ; }
}
2010-07-09 17:02:25 +00:00
$ subargs { filter } = { 'name' = > qr/localhost(?:\.|\z)/ } ;
2010-07-07 19:13:40 +00:00
$ view = $ args { conn } - > find_entity_view ( % subargs ) ;
if ( $ view ) { return $ view ; }
2010-07-07 19:26:55 +00:00
return undef ; #rest of function should be obsoleted, going to run with that assumption for 2.5 at least
2010-07-08 17:58:19 +00:00
# $subargs{filter}={'name' =~ qr/.*/};
2010-07-07 19:26:55 +00:00
# foreach (@{$args{conn}->find_entity_views(%subargs)}) {
# my $view = $_;
# if ($_->name =~ /$host(?:\.|\z)/ or $_->name =~ /localhost(?:\.|\z)/ or grep { $view->name =~ /$_(?:\.|\z)/ } @matchvalues) {
# return $view;
# last;
# }
# }
2009-06-22 16:00:28 +00:00
}
sub enable_vmotion {
#TODO: vmware 3.x semantics too? this is 4.0...
my % args = @ _ ;
unless ( $ args { hostview } ) {
$ args { hostview } = get_hostview ( conn = > $ args { conn } , hypname = > $ args { hypname } , properties = > [ 'configManager' , 'name' ] ) ;
}
my $ nicmgr = $ args { conn } - > get_view ( mo_ref = > $ args { hostview } - > configManager - > virtualNicManager ) ;
my $ qnc = $ nicmgr - > QueryNetConfig ( nicType = > "vmotion" ) ;
if ( $ qnc - > { selectedVnic } ) {
return 1 ;
} else {
2011-09-13 16:16:17 +00:00
my $ vniccount = scalar @ { $ qnc - > candidateVnic } ;
if ( $ vniccount == 1 or ( $ vniccount == 2 and $ qnc - > candidateVnic - > [ 1 ] - > spec - > ip - > ipAddress =~ /^169.254/ ) ) { #There is only one possible path, use it
2009-06-22 16:00:28 +00:00
$ nicmgr - > SelectVnicForNicType ( nicType = > "vmotion" , device = > $ qnc - > candidateVnic - > [ 0 ] - > device ) ;
return 1 ;
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "TODO: use configuration to pick the nic " . $ args { hypname } ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
}
return 0 ;
}
}
sub mkvm_callback {
my $ task = shift ;
my $ args = shift ;
my $ node = $ args - > { node } ;
2010-06-23 14:50:46 +00:00
my $ hyp = $ args - > { hyp } ;
2009-06-22 16:00:28 +00:00
if ( $ task - > info - > state - > val eq 'error' ) {
my $ error = $ task - > info - > error - > localizedMessage ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , $ error ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
}
}
sub relay_vmware_err {
my $ task = shift ;
my $ extratext = shift ;
my @ nodes = @ _ ;
my $ error = $ task - > info - > error - > localizedMessage ;
if ( defined ( $ task - > info - > error - > fault - > faultMessage ) ) { #Only in 4.0, support of 3.5 must be careful?
foreach ( @ { $ task - > info - > error - > fault - > faultMessage } ) {
$ error . = $ _ - > message ;
}
}
if ( @ nodes ) {
foreach ( @ nodes ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , $ extratext . $ error ] , $ output_handler , $ _ ) ;
2009-06-22 16:00:28 +00:00
}
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , $ extratext . $ error ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
}
}
2010-06-08 18:14:04 +00:00
sub relocate_callback {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
if ( $ state eq 'success' ) {
2010-06-17 18:55:31 +00:00
my $ vmtab = xCAT::Table - > new ( 'vm' ) ; #TODO: update vm.storage?
2010-06-17 19:42:22 +00:00
my $ prevloc = $ tablecfg { vm } - > { $ parms - > { node } } - > [ 0 ] - > { storage } ;
2010-06-17 18:55:31 +00:00
my $ model ;
( $ prevloc , $ model ) = split /=/ , $ prevloc ;
my $ target = $ parms - > { target } ;
2010-06-17 19:42:22 +00:00
if ( $ model ) {
$ target . = "=$model" ;
2010-06-17 18:55:31 +00:00
}
2010-06-17 19:42:22 +00:00
$ vmtab - > setNodeAttribs ( $ parms - > { node } , { storage = > $ target } ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( ":relocated to to " . $ parms - > { target } , $ output_handler , $ parms - > { node } ) ;
2010-06-08 18:14:04 +00:00
} else {
relay_vmware_err ( $ task , "Relocating to " . $ parms - > { target } . " " , $ parms - > { node } ) ;
}
}
2010-06-12 01:57:12 +00:00
sub migrate_ok { #look like a successful migrate, callback for registering a vm
my % args = @ _ ;
my $ vmtab = xCAT::Table - > new ( 'vm' ) ;
$ vmtab - > setNodeAttribs ( $ args { nodes } - > [ 0 ] , { host = > $ args { target } } ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "migrated to " . $ args { target } , $ output_handler , $ args { nodes } - > [ 0 ] ) ;
2010-06-12 01:57:12 +00:00
}
2009-06-22 16:00:28 +00:00
sub migrate_callback {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
2010-06-12 01:57:12 +00:00
if ( not $ parms - > { skiptodeadsource } and $ state eq 'success' ) {
2009-06-22 16:00:28 +00:00
my $ vmtab = xCAT::Table - > new ( 'vm' ) ;
$ vmtab - > setNodeAttribs ( $ parms - > { node } , { host = > $ parms - > { target } } ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "migrated to " . $ parms - > { target } , $ output_handler , $ parms - > { node } ) ;
2010-06-12 01:57:12 +00:00
} elsif ( $ parms - > { offline } ) { #try a forceful RegisterVM instead
2010-06-14 17:40:06 +00:00
my $ target = $ parms - > { target } ;
my $ hostview = $ hyphash { $ target } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' ] , filter = > { name = > $ parms - > { node } } ) ;
if ( $ hostview ) { #this means vcenter still has it in inventory, but on a dead node...
#unfortunately, vcenter won't give up the old one until we zap the dead hypervisor
#also unfortunately, it doesn't make it easy to find said hypervisor..
$ hostview = $ hyphash { $ parms - > { src } } - > { conn } - > get_view ( mo_ref = > $ hyphash { $ parms - > { src } } - > { deletionref } ) ;
$ task = $ hostview - > Destroy_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & migrate_callback ;
$ running_tasks { $ task } - > { conn } = $ hyphash { $ target } - > { vcenter } - > { conn } ;
2010-06-14 18:49:25 +00:00
$ running_tasks { $ task } - > { data } = { offline = > 1 , src = > $ parms - > { src } , node = > $ parms - > { node } , target = > $ target , skiptodeadsource = > 1 } ;
2010-06-12 01:57:12 +00:00
} else { #it is completely gone, attempt a register_vm strategy
2010-06-14 18:49:25 +00:00
register_vm ( $ target , $ parms - > { node } , undef , \ & migrate_ok , { nodes = > [ $ parms - > { node } ] , target = > $ target , } , "failonerror" ) ;
2010-06-12 01:57:12 +00:00
}
2009-06-22 16:00:28 +00:00
} else {
relay_vmware_err ( $ task , "Migrating to " . $ parms - > { target } . " " , $ parms - > { node } ) ;
}
}
2010-03-19 20:03:48 +00:00
sub poweron_task_callback {
my $ task = shift ;
my $ parms = shift ;
my $ q = shift ; #question if blocked
my $ vm = shift ; #path to answer questions if asked
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ intent , $ output_handler , $ node ) ;
2010-03-19 20:03:48 +00:00
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
} elsif ( $ q and $ q - > text =~ /^msg.uuid.altered:/ and ( $ q - > choice - > choiceInfo - > [ 0 ] - > summary eq 'Cancel' and ( $ q - > choice - > choiceInfo - > [ 0 ] - > key eq '0' ) ) ) { #make sure it is what is what we have seen it to be
2011-02-25 20:40:35 +00:00
if ( $ parms - > { forceon } and $ q - > choice - > choiceInfo - > [ 1 ] - > summary =~ /I (_)?moved it/ and $ q - > choice - > choiceInfo - > [ 1 ] - > key eq '1' ) { #answer the question as 'moved'
2010-03-19 20:03:48 +00:00
$ vm - > AnswerVM ( questionId = > $ q - > id , answerChoice = > '1' ) ;
} else {
$ vm - > AnswerVM ( questionId = > $ q - > id , answerChoice = > '0' ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Failure powering on VM, it mismatched against the hypervisor. If positive VM is not running on another hypervisor, use -f to force VM on" ] , $ output_handler , $ node ) ;
2010-03-19 20:03:48 +00:00
}
} elsif ( $ q ) {
if ( $ q - > choice - > choiceInfo - > [ 0 ] - > summary eq 'Cancel' ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ":Cancelling due to unexpected question executing task: " . $ q - > text ] , $ output_handler , $ node ) ;
2010-03-19 20:03:48 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ":Task hang due to unexpected question executing task, need to use VMware tools to clean up the mess for now: " . $ q - > text ] , $ output_handler , $ node ) ;
2010-03-19 20:03:48 +00:00
}
}
}
2010-09-17 14:07:23 +00:00
sub chvm_task_callback {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
my $ updatehash ;
if ( $ parms - > { cpus } and $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cpus } ) { #need to update
$ updatehash - > { cpus } = $ parms - > { cpus } ;
}
if ( $ parms - > { mem } and $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { memory } ) { #need to update
$ updatehash - > { memory } = $ parms - > { mem } ;
}
if ( $ updatehash ) {
my $ vmtab = xCAT::Table - > new ( 'vm' , - create = > 1 ) ;
$ vmtab - > setNodeAttribs ( $ node , $ updatehash ) ;
}
xCAT::SvrUtils:: sendmsg ( $ intent , $ output_handler , $ node ) ;
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
2009-06-22 16:00:28 +00:00
sub generic_task_callback {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ intent , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
2010-06-08 18:14:04 +00:00
sub migrate {
my % args = @ _ ;
2009-06-22 16:00:28 +00:00
my @ nodes = @ { $ args { nodes } } ;
my $ hyp = $ args { hyp } ;
2010-06-08 18:14:04 +00:00
my $ vcenter = $ hyphash { $ hyp } - > { vcenter } - > { name } ;
my $ datastoredest ;
2010-06-12 01:57:12 +00:00
my $ offline ;
2010-06-08 18:14:04 +00:00
@ ARGV = @ { $ args { exargs } } ;
unless ( GetOptions (
's=s' = > \ $ datastoredest ,
2010-06-12 01:57:12 +00:00
'f' = > \ $ offline ,
2010-06-08 18:14:04 +00:00
) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Error parsing arguments" ] , $ output_handler ) ;
2010-06-08 18:14:04 +00:00
return ;
}
my $ target = $ hyp ; #case for storage migration
2010-11-08 15:24:14 +00:00
if ( $ datastoredest ) { $ datastoredest =~ s/=.*// ; } #remove =scsi and similar if specified
2010-06-08 18:14:04 +00:00
if ( $ datastoredest and scalar @ ARGV ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to mix storage migration and processing of arguments " . join ( ' ' , @ ARGV ) ] , $ output_handler ) ;
2010-06-08 18:14:04 +00:00
return ;
} elsif ( @ ARGV ) {
$ target = shift @ ARGV ;
if ( @ ARGV ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unrecognized arguments " . join ( ' ' , @ ARGV ) ] , $ output_handler ) ;
2010-06-08 18:14:04 +00:00
return ;
}
} elsif ( $ datastoredest ) { #storage migration only
unless ( validate_datastore_prereqs ( [] , $ hyp , { $ datastoredest = > \ @ nodes } ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to find/mount target datastore $datastoredest" ] , $ output_handler ) ;
2010-06-08 18:14:04 +00:00
return ;
}
foreach ( @ nodes ) {
my $ hostview = $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' ] , filter = > { name = > $ _ } ) ;
my $ relocatspec = VirtualMachineRelocateSpec - > new (
datastore = > $ hyphash { $ hyp } - > { datastorerefmap } - > { $ datastoredest } ,
) ;
my $ task = $ hostview - > RelocateVM_Task ( spec = > $ relocatspec ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & relocate_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ _ , target = > $ datastoredest } ;
2010-07-08 15:19:18 +00:00
process_tasks ; #check for tasks needing followup actions before the task is forgotten (VMWare's memory is fairly short at times
2010-06-08 18:14:04 +00:00
}
return ;
}
2010-08-09 15:12:05 +00:00
if ( ( not $ offline and $ vcenterhash { $ vcenter } - > { badhyps } - > { $ hyp } ) or $ vcenterhash { $ vcenter } - > { badhyps } - > { $ target } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to migrate " . join ( ',' , @ nodes ) . " to $target due to inability to validate vCenter connectivity" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
2010-08-09 15:12:05 +00:00
if ( ( $ offline or $ vcenterhash { $ vcenter } - > { goodhyps } - > { $ hyp } ) and $ vcenterhash { $ vcenter } - > { goodhyps } - > { $ target } ) {
2009-06-22 16:00:28 +00:00
unless ( validate_datastore_prereqs ( \ @ nodes , $ target ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to verify storage state on target system" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
unless ( validate_network_prereqs ( \ @ nodes , $ target ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to verify target network state" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
2009-07-14 20:43:59 +00:00
my $ dstview = get_hostview ( conn = > $ hyphash { $ target } - > { conn } , hypname = > $ target , properties = > [ 'name' , 'parent' ] ) ;
2009-06-22 16:00:28 +00:00
unless ( $ hyphash { $ target } - > { pool } ) {
$ hyphash { $ target } - > { pool } = $ hyphash { $ target } - > { conn } - > get_view ( mo_ref = > $ dstview - > parent , properties = > [ 'resourcePool' ] ) - > resourcePool ;
}
foreach ( @ nodes ) {
2010-07-08 15:19:18 +00:00
process_tasks ; #check for tasks needing followup actions before the task is forgotten (VMWare's memory is fairly short at times
2010-06-12 01:57:12 +00:00
my $ srcview = $ hyphash { $ target } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' ] , filter = > { name = > $ _ } ) ;
if ( $ offline and not $ srcview ) { #we have a request to resurrect the dead..
register_vm ( $ target , $ _ , undef , \ & migrate_ok , { nodes = > [ $ _ ] , exargs = > $ args { exargs } , target = > $ target , hyp = > $ args { hyp } , offline = > $ offline } , "failonerror" ) ;
return ;
} elsif ( not $ srcview ) {
$ srcview = $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' ] , filter = > { name = > $ _ } ) ;
}
unless ( $ srcview ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to locate node in vCenter" ] , $ output_handler , $ _ ) ;
2010-06-12 01:57:12 +00:00
next ;
}
2009-06-22 16:00:28 +00:00
my $ task = $ srcview - > MigrateVM_Task (
host = > $ dstview ,
pool = > $ hyphash { $ target } - > { pool } ,
priority = > VirtualMachineMovePriority - > new ( 'highPriority' ) ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & migrate_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
2010-06-12 01:57:12 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ _ , src = > $ hyp , target = > $ target , offline = > $ offline } ;
2009-06-22 16:00:28 +00:00
}
} else {
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("Waiting for BOTH to be 'good'");
2009-06-22 16:00:28 +00:00
return ; #One of them is still 'pending'
}
}
sub reconfig_callback {
my $ task = shift ;
my $ args = shift ;
#$args->{reconfig_args}->{vmview}->update_view_data();
delete $ args - > { reconfig_args } - > { vmview } ; #Force a reload of the view, update_view_data seems to not work as advertised..
$ args - > { reconfig_fun } - > ( % { $ args - > { reconfig_args } } ) ;
}
sub repower { #Called to try power again after power down for reconfig
my $ task = shift ;
my $ args = shift ;
my $ powargs = $ args - > { power_args } ;
$ powargs - > { pretendop } = 1 ;
#$powargs->{vmview}->update_view_data();
delete $ powargs - > { vmview } ; #Force a reload of the view, update_view_data seems to not work as advertised..
power ( %$ powargs ) ;
}
sub retry_rmvm {
my $ task = shift ;
my $ args = shift ;
my $ node = $ args - > { node } ;
my $ state = $ task - > info - > state - > val ;
if ( $ state eq "success" ) {
2009-09-18 19:53:48 +00:00
#$Data::Dumper::Maxdepth=2;
2009-06-22 16:00:28 +00:00
delete $ args - > { args } - > { vmview } ;
rmvm ( % { $ args - > { args } } ) ;
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
sub rmvm {
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
if ( not defined $ args { vmview } ) { #attempt one refresh
2010-09-17 21:10:58 +00:00
$ args { vmview } = $ vmhash { $ node } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' , 'runtime.powerState' ] , filter = > { name = > $ node } ) ;
2010-07-29 20:03:05 +00:00
if ( not defined $ args { vmview } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM does not appear to exist" ] , $ output_handler , $ node ) ;
2010-07-29 20:03:05 +00:00
return ;
}
2009-06-22 16:00:28 +00:00
}
@ ARGV = @ { $ args { exargs } } ;
require Getopt::Long ;
my $ forceremove ;
my $ purge ;
GetOptions (
'f' = > \ $ forceremove ,
'p' = > \ $ purge ,
) ;
my $ task ;
unless ( $ args { vmview } - > { 'runtime.powerState' } - > val eq 'poweredOff' ) {
if ( $ forceremove ) {
$ task = $ args { vmview } - > PowerOffVM_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & retry_rmvm ,
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , args = > \ % args } ;
return ;
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot rmvm active guest (use -f argument to force)" ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return ;
}
}
if ( $ purge ) {
$ task = $ args { vmview } - > Destroy_Task ( ) ;
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > 'purged' } ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ; #$hyp_conns->{$hyp};
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ;
2009-06-22 16:00:28 +00:00
} else {
$ task = $ args { vmview } - > UnregisterVM ( ) ;
}
}
sub getreconfigspec {
my % args = @ _ ;
my $ node = $ args { node } ;
my $ vmview = $ args { view } ;
2010-08-09 21:02:17 +00:00
my $ currid = $ args { view } - > { 'config.guestId' } ;
2009-06-22 16:00:28 +00:00
my $ rightid = getguestid ( $ node ) ;
my % conargs ;
my $ reconfigneeded = 0 ;
if ( $ currid ne $ rightid ) {
$ reconfigneeded = 1 ;
$ conargs { guestId } = $ rightid ;
}
my $ newmem ;
2010-09-17 14:07:23 +00:00
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { memory } and $ newmem = getUnits ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { memory } , "M" , 1048576 ) ) {
2010-08-09 21:02:17 +00:00
my $ currmem = $ vmview - > { 'config.hardware.memoryMB' } ;
2009-06-22 16:00:28 +00:00
if ( $ newmem ne $ currmem ) {
$ conargs { memoryMB } = $ newmem ;
$ reconfigneeded = 1 ;
}
}
my $ newcpus ;
2010-09-17 14:07:23 +00:00
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cpus } and $ newcpus = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cpus } ) {
2010-08-09 21:02:17 +00:00
my $ currncpu = $ vmview - > { 'config.hardware.numCPU' } ;
2009-06-22 16:00:28 +00:00
if ( $ newcpus ne $ currncpu ) {
$ conargs { numCPUs } = $ newcpus ;
$ reconfigneeded = 1 ;
}
}
if ( $ reconfigneeded ) {
return VirtualMachineConfigSpec - > new ( % conargs ) ;
} else {
return 0 ;
}
}
#This routine takes a single node, managing vmv instance, and task tracking hash to submit a power on request
sub power {
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
my $ pretendop = $ args { pretendop } ; #to pretend a system was on for reset or boot when we have to turn it off internally for reconfig
if ( not defined $ args { vmview } ) { #attempt one refresh
2010-09-17 21:10:58 +00:00
$ args { vmview } = $ vmhash { $ node } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' , 'config.guestId' , 'config.hardware.memoryMB' , 'config.hardware.numCPU' , 'runtime.powerState' ] , filter = > { name = > $ node } ) ;
2010-08-10 18:22:21 +00:00
#vmview not existing now is not an issue, this function
#is designed to handle that and correct if reasonably possible
#comes into play particularly in a stateless context
2009-06-22 16:00:28 +00:00
}
2010-03-19 20:03:48 +00:00
@ ARGV = @ { $ args { exargs } } ; #for getoptions;
my $ forceon ;
require Getopt::Long ;
GetOptions (
'force|f' = > \ $ forceon ,
) ;
my $ subcmd = $ ARGV [ 0 ] ;
2009-06-22 16:00:28 +00:00
my $ intent = "" ;
my $ task ;
my $ currstat ;
if ( $ args { vmview } ) {
$ currstat = $ args { vmview } - > { 'runtime.powerState' } - > val ;
if ( grep /$subcmd/ , qw/on reset boot/ ) {
my $ reconfigspec ;
2010-11-09 19:01:33 +00:00
if ( $ reconfigreset and ( $ reconfigspec = getreconfigspec ( node = > $ node , view = > $ args { vmview } ) ) ) {
2009-06-22 16:00:28 +00:00
if ( $ currstat eq 'poweredOff' ) {
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("Correcting guestId because $currid and $rightid are not the same...");#DEBUG
2009-06-22 16:00:28 +00:00
my $ task = $ args { vmview } - > ReconfigVM_Task ( spec = > $ reconfigspec ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & reconfig_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , reconfig_fun = > \ & power , reconfig_args = > \ % args } ;
return ;
} elsif ( grep /$subcmd/ , qw/reset boot/ ) { #going to have to do a 'cycle' and present it up normally..
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("DEBUG: forcing a cycle");
2009-06-22 16:00:28 +00:00
$ task = $ args { vmview } - > PowerOffVM_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & repower ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , power_args = > \ % args } ;
return ; #we have to wait
}
#TODO: fixit
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("I see vm has $currid and I want it to be $rightid");
2009-06-22 16:00:28 +00:00
}
}
} else {
$ currstat = 'off' ;
}
if ( $ currstat eq 'poweredOff' ) {
$ currstat = 'off' ;
} elsif ( $ currstat eq 'poweredOn' ) {
$ currstat = 'on' ;
} elsif ( $ currstat eq 'suspended' ) {
$ currstat = 'suspend' ;
}
if ( $ subcmd =~ /^stat/ ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ currstat , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return ;
}
if ( $ subcmd =~ /boot/ ) {
$ intent = "$currstat " ;
if ( $ currstat eq 'on' or $ args { pretendop } ) {
$ intent = "on " ;
$ subcmd = 'reset' ;
} else {
$ subcmd = 'on' ;
}
}
if ( $ subcmd =~ /on/ ) {
2010-03-26 12:46:05 +00:00
if ( $ currstat eq 'off' or $ currstat eq 'suspend' ) {
2009-06-22 16:00:28 +00:00
if ( not $ args { vmview } ) { #We are asking to turn on a system the hypervisor
#doesn't know, attempt to register it first
register_vm ( $ hyp , $ node , undef , \ & power , \ % args ) ;
return ; #We'll pick it up on the retry if it gets registered
}
eval {
2010-09-17 21:10:58 +00:00
if ( $ hyp ) {
$ task = $ args { vmview } - > PowerOnVM_Task ( host = > $ hyphash { $ hyp } - > { hostview } ) ;
} else {
$ task = $ args { vmview } - > PowerOnVM_Task ( ) ; #DRS may have it's way with me
}
2009-06-22 16:00:28 +00:00
} ;
if ( $@ ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ":" . $@ ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return ;
}
$ running_tasks { $ task } - > { task } = $ task ;
2010-03-19 20:03:48 +00:00
$ running_tasks { $ task } - > { callback } = \ & poweron_task_callback ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ; #$hyp_conns->{$hyp};
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ;
2010-03-19 20:03:48 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > $ intent . 'on' , forceon = > $ forceon } ;
2009-06-22 16:00:28 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ currstat , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
}
2010-09-16 20:41:23 +00:00
} elsif ( $ subcmd =~ /softoff/ ) {
if ( $ currstat eq 'on' ) {
$ args { vmview } - > ShutdownGuest ( ) ;
xCAT::SvrUtils:: sendmsg ( "softoff" , $ output_handler , $ node ) ;
} else {
xCAT::SvrUtils:: sendmsg ( $ currstat , $ output_handler , $ node ) ;
}
2009-06-22 16:00:28 +00:00
} elsif ( $ subcmd =~ /off/ ) {
if ( $ currstat eq 'on' ) {
$ task = $ args { vmview } - > PowerOffVM_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > 'off' } ;
2010-03-26 12:46:05 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ currstat , $ output_handler , $ node ) ;
2010-03-26 12:46:05 +00:00
}
} elsif ( $ subcmd =~ /suspend/ ) {
if ( $ currstat eq 'on' ) {
$ task = $ args { vmview } - > SuspendVM_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ;
2010-03-26 12:46:05 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > 'suspend' } ;
2009-06-22 16:00:28 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "off" , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
}
} elsif ( $ subcmd =~ /reset/ ) {
if ( $ currstat eq 'on' ) {
$ task = $ args { vmview } - > ResetVM_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > $ intent . 'reset' } ;
} elsif ( $ args { pretendop } ) { #It is off, but pretend it was on
eval {
2010-09-17 21:10:58 +00:00
if ( $ hyp ) {
$ task = $ args { vmview } - > PowerOnVM_Task ( host = > $ hyphash { $ hyp } - > { hostview } ) ;
} else {
$ task = $ args { vmview } - > PowerOnVM_Task ( ) ; #allow DRS
}
2009-06-22 16:00:28 +00:00
} ;
if ( $@ ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ":" . $@ ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return ;
}
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > $ intent . 'reset' } ;
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ currstat , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
}
}
}
sub generic_vm_operation { #The general form of firing per-vm requests to ESX hypervisor
my $ properties = shift ; #The relevant properties to the general task, MUST INCLUDE config.name
my $ function = shift ; #The function to actually run against the right VM view
my @ exargs = @ _ ; #Store the rest to pass on
2009-10-21 16:39:17 +00:00
my $ hyp ;
my $ vmviews ;
my % vcviews ; #views populated once per vcenter server for improved performance
2010-08-09 20:46:36 +00:00
my $ node ;
2010-02-24 21:22:40 +00:00
foreach $ hyp ( keys % hyphash ) {
if ( $ viavcenterbyhyp - > { $ hyp } ) {
2010-09-17 21:10:58 +00:00
foreach $ node ( keys % { $ hyphash { $ hyp } - > { nodes } } ) {
2010-08-09 20:46:36 +00:00
$ vcenterhash { $ hyphash { $ hyp } - > { vcenter } - > { name } } - > { vms } - > { $ node } = 1 ;
2010-07-08 14:22:33 +00:00
}
2010-08-09 20:46:36 +00:00
}
}
2012-06-23 13:45:52 +00:00
foreach ( keys % limbonodes ) {
$ vcenterhash { $ limbonodes { $ _ } } - > { vms } - > { $ _ } = 1 ;
}
2010-09-17 21:10:58 +00:00
my $ cluster ;
foreach $ cluster ( keys % clusterhash ) {
foreach $ node ( keys % { $ clusterhash { $ cluster } - > { nodes } } ) {
$ vcenterhash { $ clusterhash { $ cluster } - > { vcenter } - > { name } } - > { vms } - > { $ node } = 1 ;
}
}
2010-08-09 20:46:36 +00:00
my $ currentvcenter ;
2012-06-23 13:45:52 +00:00
my % foundlimbo ;
2010-08-09 20:46:36 +00:00
foreach $ currentvcenter ( keys % vcenterhash ) {
#retrieve all vm views in one gulp
my $ vmsearchstring = join ( ")|(" , keys % { $ vcenterhash { $ currentvcenter } - > { vms } } ) ;
$ vmsearchstring = '^((' . $ vmsearchstring . '))(\z|\.)' ;
2010-11-19 20:30:56 +00:00
my $ regex = qr/$vmsearchstring/ ;
2010-08-09 20:46:36 +00:00
$ vcviews { $ currentvcenter } = $ vcenterhash { $ currentvcenter } - > { conn } - > find_entity_views ( view_type = > 'VirtualMachine' , properties = > $ properties , filter = > { 'config.name' = > $ regex } ) ;
2012-06-23 13:45:52 +00:00
foreach ( @ { $ vcviews { $ currentvcenter } } ) {
2009-10-21 16:39:17 +00:00
my $ node = $ _ - > { 'config.name' } ;
unless ( defined $ tablecfg { vm } - > { $ node } ) {
$ node =~ s/\..*// ; #try the short name;
}
if ( defined $ tablecfg { vm } - > { $ node } ) { #see if the host pointer requires a refresh
2011-02-25 23:02:08 +00:00
my $ hostref = $ hostrefbynode { $ node } ;
2012-06-23 13:45:52 +00:00
if ( $ hostref and $ hostref eq $ _ - > { 'runtime.host' } - > value ) { next ; } #the actual host reference matches the one that we got when populating hostviews based on what the table had to say #TODO: does this mean it is buggy if we want to mkvm/rmigrate/etc if the current vm.host is wrong and the noderange doesn't have something on the right hostview making us not get it in the
2011-02-25 23:02:08 +00:00
#mass request? Or is it just slower because it hand gets host views?
2012-06-23 13:45:52 +00:00
my $ host = $ vcenterhash { $ currentvcenter } - > { conn } - > get_view ( mo_ref = > $ _ - > { 'runtime.host' } , properties = > [ 'summary.config.name' ] ) ;
2011-02-25 23:02:08 +00:00
$ host = $ host - > { 'summary.config.name' } ;
2009-10-21 16:39:17 +00:00
my $ shost = $ host ;
$ shost =~ s/\..*// ;
#time to figure out which of these is a node
my @ nodes = noderange ( "$host,$shost" ) ;
my $ vmtab = xCAT::Table - > new ( "vm" , - create = > 1 ) ;
unless ( $ vmtab ) {
die "Error opening vm table" ;
}
if ( $ nodes [ 0 ] ) {
2012-06-23 13:45:52 +00:00
if ( $ limbonodes { $ node } ) { $ foundlimbo { $ node } = $ currentvcenter ; }
2009-10-21 16:39:17 +00:00
$ vmtab - > setNodeAttribs ( $ node , { host = > $ nodes [ 0 ] } ) ;
2010-02-09 16:26:40 +00:00
} #else {
# $vmtab->setNodeAttribs($node,{host=>$host});
#}
2009-10-21 16:39:17 +00:00
}
}
}
2012-06-23 13:45:52 +00:00
foreach my $ lnode ( keys % foundlimbo ) {
$ vmviews = $ vcviews { $ foundlimbo { $ lnode } } ;
my % mgdvms ; #sort into a hash for convenience
foreach ( @$ vmviews ) {
$ mgdvms { $ _ - > { 'config.name' } } = $ _ ;
}
$ function - > (
node = > $ lnode ,
vm = > $ lnode ,
vmview = > $ mgdvms { $ node } ,
exargs = > \ @ exargs
) ;
}
2010-09-17 21:10:58 +00:00
my @ entitylist ;
push @ entitylist , keys % hyphash ;
push @ entitylist , keys % clusterhash ;
foreach my $ entity ( @ entitylist ) {
if ( $ hyphash { $ entity } ) {
$ hyp = $ entity ; #save some retyping...
if ( $ viavcenterbyhyp - > { $ hyp } ) {
$ vmviews = $ vcviews { $ hyphash { $ hyp } - > { vcenter } - > { name } }
} else {
$ vmviews = [] ;
my $ node ;
foreach $ node ( sort ( keys % { $ hyphash { $ hyp } - > { nodes } } ) ) {
push @ { $ vmviews } , $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > $ properties , filter = > { 'config.name' = > qr/^$node/ } ) ;
}
#$vmviews = $hyphash{$hyp}->{conn}->find_entity_views(view_type => 'VirtualMachine',properties=>$properties);
}
my % mgdvms ; #sort into a hash for convenience
foreach ( @$ vmviews ) {
$ mgdvms { $ _ - > { 'config.name' } } = $ _ ;
2010-07-08 14:22:33 +00:00
}
2010-09-17 21:10:58 +00:00
my $ node ;
foreach $ node ( sort ( keys % { $ hyphash { $ hyp } - > { nodes } } ) ) {
$ function - > (
node = > $ node ,
hyp = > $ hyp ,
vmview = > $ mgdvms { $ node } ,
exargs = > \ @ exargs
) ;
2010-07-08 15:19:18 +00:00
process_tasks ; #check for tasks needing followup actions before the task is forgotten (VMWare's memory is fairly short at times
2009-06-22 16:00:28 +00:00
}
2010-09-17 21:10:58 +00:00
} else { #a cluster.
$ vmviews = $ vcviews { $ clusterhash { $ entity } - > { vcenter } - > { name } } ;
my % mgdvms ; #sort into a hash for convenience
foreach ( @$ vmviews ) {
$ mgdvms { $ _ - > { 'config.name' } } = $ _ ;
}
my $ node ;
foreach $ node ( sort ( keys % { $ clusterhash { $ entity } - > { nodes } } ) ) {
$ function - > (
node = > $ node ,
cluster = > $ entity ,
vm = > $ node ,
vmview = > $ mgdvms { $ node } ,
exargs = > \ @ exargs
) ;
}
}
2009-06-22 16:00:28 +00:00
}
}
sub generic_hyp_operation { #The general form of firing per-hypervisor requests to ESX hypervisor
my $ function = shift ; #The function to actually run against the right VM view
my @ exargs = @ _ ; #Store the rest to pass on
my $ hyp ;
2011-03-11 21:00:31 +00:00
if ( scalar keys % limbonodes ) { #we are in forced migration with dead sources, try to register them
@ ARGV = @ exargs ;
my $ datastoredest ;
my $ offline ;
unless ( GetOptions (
's=s' = > \ $ datastoredest ,
'f' = > \ $ offline ,
) ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Error parsing arguments" ] , $ output_handler ) ;
return ;
}
if ( $ datastoredest ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Storage migration impossible with dead hypervisor, must be migrated to live hypervisor first" ] , $ output_handler ) ;
return ;
} elsif ( @ ARGV ) {
my $ target = shift @ ARGV ;
if ( @ ARGV ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unrecognized arguments " . join ( ' ' , @ ARGV ) ] , $ output_handler ) ;
return ;
}
foreach ( keys % limbonodes ) {
register_vm ( $ target , $ _ , undef , \ & migrate_ok , { nodes = > [ $ _ ] , target = > $ target , } , "failonerror" ) ;
}
} else { #storage migration only
xCAT::SvrUtils:: sendmsg ( [ 1 , "No target hypervisor specified" ] , $ output_handler ) ;
}
}
2009-06-22 16:00:28 +00:00
foreach $ hyp ( keys % hyphash ) {
2010-07-08 15:19:18 +00:00
process_tasks ; #check for tasks needing followup actions before the task is forgotten (VMWare's memory is fairly short at times
2009-06-22 16:00:28 +00:00
my @ relevant_nodes = sort ( keys % { $ hyphash { $ hyp } - > { nodes } } ) ;
unless ( scalar @ relevant_nodes ) {
next ;
}
$ function - > (
nodes = > \ @ relevant_nodes ,
hyp = > $ hyp ,
exargs = > \ @ exargs
) ;
#my $vmviews = $hyp_conns->{$hyp}->find_entity_views(view_type => 'VirtualMachine',properties=>['runtime.powerState','config.name']);
#my %mgdvms; #sort into a hash for convenience
#foreach (@$vmviews) {
# $mgdvms{$_->{'config.name'}} = $_;
#}
#my $node;
#foreach $node (sort (keys %{$hyp_hash->{$hyp}->{nodes}})){
# $function->($node,$mgdvms{$node},$taskstotrack,$callback,@exargs);
#REMINDER FOR RINV TO COME
# foreach (@nothing) { #@{$mgdvms{$node}->config->hardware->device}) {
# if (defined $_->{macAddress}) {
# print "\nFound a mac: ".$_->macAddress."\n";
# }
# }
# }
}
2010-09-17 21:10:58 +00:00
foreach $ hyp ( keys % clusterhash ) { #clonevm, mkvm, rmigrate could land here in clustered mode with DRS/HA
process_tasks ;
my @ relevant_nodes = sort ( keys % { $ clusterhash { $ hyp } - > { nodes } } ) ;
unless ( scalar @ relevant_nodes ) {
next ;
}
2010-09-20 17:26:24 +00:00
$ function - > ( nodes = > \ @ relevant_nodes , cluster = > $ hyp , exargs = > \ @ exargs , conn = > $ clusterhash { $ hyp } - > { conn } ) ;
2010-09-17 21:10:58 +00:00
}
2009-06-22 16:00:28 +00:00
}
2010-02-22 20:40:42 +00:00
sub rmhypervisor_disconnected {
my $ task = shift ;
my $ parms = shift ;
my $ node = $ parms - > { node } ;
my $ hyp = $ node ;
my $ state = $ task - > info - > state - > val ;
if ( $ state eq 'success' ) {
my $ task = $ hyphash { $ hyp } - > { hostview } - > Destroy_Task ( ) ;
2011-03-09 15:20:00 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > 'removed' } ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
2010-02-22 20:40:42 +00:00
$ running_tasks { $ task } - > { hyp } = $ hyp ;
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
sub rmhypervisor_inmaintenance {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
my $ hyp = $ parms - > { node } ;
my $ task = $ hyphash { $ hyp } - > { hostview } - > DisconnectHost_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & rmhypervisor_disconnected ;
$ running_tasks { $ task } - > { hyp } = $ hyp ;
$ running_tasks { $ task } - > { data } = { node = > $ hyp } ;
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
2010-06-08 16:54:13 +00:00
sub lsvm {
my % args = @ _ ;
my $ hyp = $ args { hyp } ;
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager']);
use Data::Dumper ;
2010-06-08 18:19:48 +00:00
my $ vms = $ hyphash { $ hyp } - > { hostview } - > vm ;
unless ( $ vms ) {
return ;
}
foreach ( @$ vms ) {
2010-06-08 16:54:13 +00:00
my $ vmv = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ _ ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ vmv - > name , $ output_handler , $ hyp ) ;
2010-06-08 16:54:13 +00:00
}
return ;
}
2011-06-09 20:59:43 +00:00
sub chhypervisor {
my % args = @ _ ;
@ ARGV = @ { $ args { exargs } } ; #for getoptions;
my $ maintenance ;
my $ online ;
my $ stat ;
require Getopt::Long ;
GetOptions (
'maintenance|m' = > \ $ maintenance ,
'online|o' = > \ $ online ,
'show|s' = > \ $ stat ,
) ;
my $ hyp = $ args { hyp } ;
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager']);
if ( $ maintenance ) {
if ( defined $ hyphash { $ hyp } - > { hostview } ) {
my $ task = $ hyphash { $ hyp } - > { hostview } - > EnterMaintenanceMode_Task ( timeout = > 0 ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ hyp , successtext = > "hypervisor in maintenance mode" } ;
}
} elsif ( $ online ) {
if ( defined $ hyphash { $ hyp } - > { hostview } ) {
my $ task = $ hyphash { $ hyp } - > { hostview } - > ExitMaintenanceMode_Task ( timeout = > 0 ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ hyp , successtext = > "hypervisor online" } ;
}
} elsif ( $ stat ) {
if ( defined $ hyphash { $ hyp } - > { hostview } ) {
if ( $ hyphash { $ hyp } - > { hostview } - > runtime - > inMaintenanceMode ) {
xCAT::SvrUtils:: sendmsg ( "hypervisor in maintenance mode" , $ output_handler , $ hyp ) ;
} else {
xCAT::SvrUtils:: sendmsg ( "hypervisor online" , $ output_handler , $ hyp ) ;
}
}
}
return ;
}
2010-09-17 13:02:26 +00:00
sub rshutdown { #TODO: refactor with next function too
my % args = @ _ ;
my $ hyp = $ args { hyp } ;
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager']);
if ( defined $ hyphash { $ hyp } - > { hostview } ) {
my $ task = $ hyphash { $ hyp } - > { hostview } - > EnterMaintenanceMode_Task ( timeout = > 0 ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & rshutdown_inmaintenance ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ hyp } ;
}
return ;
}
sub rshutdown_inmaintenance {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
my $ hyp = $ parms - > { node } ;
if ( defined $ hyphash { $ hyp } - > { hostview } ) {
my $ task = $ hyphash { $ hyp } - > { hostview } - > ShutdownHost_Task ( force = > 0 ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ hyp ;
$ running_tasks { $ task } - > { data } = { node = > $ hyp , successtext = > "shutdown initiated" } ;
}
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
return ;
}
2010-02-22 20:40:42 +00:00
sub rmhypervisor {
my % args = @ _ ;
my $ hyp = $ args { hyp } ;
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager']);
if ( defined $ hyphash { $ hyp } - > { hostview } ) {
my $ task = $ hyphash { $ hyp } - > { hostview } - > EnterMaintenanceMode_Task ( timeout = > 0 ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & rmhypervisor_inmaintenance ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ hyp } ;
}
return ;
}
2010-08-31 20:53:55 +00:00
sub clonevms {
my % args = @ _ ;
my $ nodes = $ args { nodes } ;
my $ hyp = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
my $ cluster = $ args { cluster } ;
2010-08-31 20:53:55 +00:00
@ ARGV = @ { $ args { exargs } } ; #for getoptions;
my $ base ;
my $ force ;
my $ detach ;
2012-05-14 21:02:35 +00:00
my $ specialize ;
2010-08-31 20:53:55 +00:00
my $ target ;
require Getopt::Long ;
GetOptions (
'b=s' = > \ $ base ,
'f' = > \ $ force ,
'd' = > \ $ detach ,
2012-05-14 21:02:35 +00:00
'specialize' = > \ $ specialize ,
2010-08-31 20:53:55 +00:00
't=s' = > \ $ target ,
) ;
if ( $ base and $ target ) {
foreach my $ node ( @$ nodes ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot specify both base (-b) and target (-t)" ] , $ output_handler , $ node ) ;
}
return ;
}
unless ( $ base or $ target ) {
foreach my $ node ( @$ nodes ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Must specify one of base (-b) or target (-t)" ] , $ output_handler , $ node ) ;
}
return ;
}
if ( $ target and ( scalar @ { $ nodes } != 1 ) ) {
foreach my $ node ( @$ nodes ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot specify mulitple nodes to create a master from" ] , $ output_handler , $ node ) ;
}
return ;
}
2010-09-17 21:10:58 +00:00
if ( $ hyp ) {
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ;
}
2010-08-31 20:53:55 +00:00
my $ newdatastores ;
my $ mastername ;
my $ url ;
2010-09-01 20:43:34 +00:00
my $ masterref ;
2010-08-31 20:53:55 +00:00
if ( $ base ) { #if base, we need to pull in the target datastores
my $ mastertab = xCAT::Table - > new ( 'vmmaster' ) ;
2012-07-13 13:42:33 +00:00
$ masterref = $ mastertab - > getNodeAttribs ( $ base , [ qw/storage os arch profile storagemodel nics specializeparameters/ ] ) ;
2010-08-31 20:53:55 +00:00
unless ( $ masterref ) {
foreach my $ node ( @$ nodes ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot find master $base in vmmaster table" ] , $ output_handler , $ node ) ;
}
return ;
}
2010-09-01 20:50:34 +00:00
$ newdatastores - > { $ masterref - > { storage } } = [] ; #make sure that the master datastore is mounted...
2010-08-31 20:53:55 +00:00
foreach ( @$ nodes ) {
my $ url ;
if ( $ tablecfg { vm } - > { $ _ } - > [ 0 ] - > { storage } ) {
$ url = $ tablecfg { vm } - > { $ _ } - > [ 0 ] - > { storage } ;
2012-06-08 13:35:13 +00:00
$ url =~ s/=.*// ;
2010-08-31 20:53:55 +00:00
} else {
$ url = $ masterref - > { storage } ;
}
unless ( $ url ) { die "Shouldn't be possible" ; }
if ( ref $ newdatastores - > { $ _ } ) {
push @ { $ newdatastores - > { $ url } } , $ _ ;
} else {
$ newdatastores - > { $ url } = [ $ _ ] ;
}
}
} elsif ( $ target ) {
2011-05-01 16:46:48 +00:00
if ( $ url =~ m !/! ) {
2010-08-31 20:53:55 +00:00
$ url = $ target ;
2011-05-01 16:46:48 +00:00
$ url =~ s!/([^/]*)\z!! ;
2010-08-31 20:53:55 +00:00
$ mastername = $ 1 ;
2011-05-01 16:46:48 +00:00
} else {
2011-05-01 16:43:50 +00:00
$ url = $ tablecfg { vm } - > { $ nodes - > [ 0 ] } - > [ 0 ] - > { storage } ;
$ url =~ s/.*\|// ;
$ url =~ s/=(.*)// ;
$ url =~ s/,.*// ;
2011-05-01 16:46:48 +00:00
$ mastername = $ target
}
$ newdatastores - > { $ url } = [ $ nodes - > [ 0 ] ] ;
2010-08-31 20:53:55 +00:00
}
2010-09-17 21:10:58 +00:00
if ( $ hyp ) {
unless ( validate_datastore_prereqs ( $ nodes , $ hyp , $ newdatastores ) ) {
return ;
}
} else { #need to build datastore map for cluster
refreshclusterdatastoremap ( $ cluster ) ;
2010-08-31 20:53:55 +00:00
}
2010-09-17 21:10:58 +00:00
sortoutdatacenters ( nodes = > $ nodes , hyp = > $ hyp , cluster = > $ cluster ) ;
2010-08-31 20:53:55 +00:00
if ( $ target ) {
2010-09-17 21:10:58 +00:00
return promote_vm_to_master ( node = > $ nodes - > [ 0 ] , target = > $ target , force = > $ force , detach = > $ detach , cluster = > $ cluster , hyp = > $ hyp , url = > $ url , mastername = > $ mastername ) ;
2010-08-31 20:53:55 +00:00
} elsif ( $ base ) {
2012-05-14 21:02:35 +00:00
return clone_vms_from_master ( nodes = > $ nodes , base = > $ base , detach = > $ detach , cluster = > $ cluster , hyp = > $ hyp , mastername = > $ base , masterent = > $ masterref , specialize = > $ specialize ) ;
2010-08-31 20:53:55 +00:00
}
}
2010-09-17 16:52:55 +00:00
sub sortoutdatacenters { #figure out all the vmfolders for all the nodes passed in
my % args = @ _ ;
my $ nodes = $ args { nodes } ;
my $ hyp = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
my $ cluster = $ args { cluster } ;
2010-09-17 16:52:55 +00:00
my % nondefaultdcs ;
2010-09-17 21:10:58 +00:00
my $ deffolder ;
my $ conn ;
if ( $ hyp ) {
unless ( defined $ hyphash { $ hyp } - > { vmfolder } ) {
$ hyphash { $ hyp } - > { vmfolder } = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'vmFolder' ] ) - > vmFolder ) ;
}
$ conn = $ hyphash { $ hyp } - > { conn } ;
$ deffolder = $ hyphash { $ hyp } - > { vmfolder } ;
} else { #clustered
unless ( defined $ clusterhash { $ cluster } - > { vmfolder } ) {
$ clusterhash { $ cluster } - > { vmfolder } = $ clusterhash { $ cluster } - > { conn } - > get_view ( mo_ref = > $ clusterhash { $ cluster } - > { conn } - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'vmFolder' ] ) - > vmFolder ) ;
}
$ deffolder = $ clusterhash { $ cluster } - > { vmfolder } ;
$ conn = $ clusterhash { $ cluster } - > { conn } ;
2010-09-17 16:52:55 +00:00
}
foreach ( @$ nodes ) {
if ( $ tablecfg { vm } - > { $ _ } - > [ 0 ] - > { datacenter } ) {
$ nondefaultdcs { $ tablecfg { vm } - > { $ _ } - > [ 0 ] - > { datacenter } } - > { $ _ } = 1 ;
} else {
2010-09-17 21:10:58 +00:00
$ vmhash { $ _ } - > { vmfolder } = $ deffolder ;
2010-09-17 16:52:55 +00:00
}
}
my $ datacenter ;
foreach $ datacenter ( keys % nondefaultdcs ) {
2010-09-17 21:10:58 +00:00
my $ vmfolder = $ conn - > get_view ( mo_ref = > $ conn - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'vmFolder' ] , filter = > { name = > $ datacenter } ) - > vmFolder , filter = > { name = > $ datacenter } ) ;
2010-09-17 16:52:55 +00:00
foreach ( keys % { $ nondefaultdcs { $ datacenter } } ) {
$ vmhash { $ _ } - > { vmfolder } = $ vmfolder ;
}
}
}
2010-09-01 20:43:34 +00:00
sub clone_vms_from_master {
my % args = @ _ ;
my $ mastername = $ args { mastername } ;
2012-05-14 21:02:35 +00:00
my $ specialize = $ args { specialize } ;
2010-09-01 20:50:34 +00:00
my $ hyp = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
my $ cluster = $ args { cluster } ;
2010-09-03 20:08:41 +00:00
my $ regex = qr/^$mastername\z/ ;
2010-09-01 20:43:34 +00:00
my @ nodes = @ { $ args { nodes } } ;
my $ node ;
2010-09-17 21:10:58 +00:00
my $ conn ;
if ( $ hyp ) {
$ conn = $ hyphash { $ hyp } - > { conn } ;
} else {
$ conn = $ clusterhash { $ cluster } - > { conn } ;
}
my $ masterviews = $ conn - > find_entity_views ( view_type = > 'VirtualMachine' , filter = > { 'config.name' = > $ regex } ) ;
2010-09-01 20:43:34 +00:00
if ( scalar ( @$ masterviews ) != 1 ) {
foreach $ node ( @ nodes ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to find master $mastername in VMWare infrastructure" ] , $ output_handler , $ node ) ;
}
return ;
}
my $ masterview = $ masterviews - > [ 0 ] ;
my $ masterent = $ args { masterent } ;
2012-06-01 02:22:14 +00:00
my $ ostype ;
2010-09-01 20:43:34 +00:00
foreach $ node ( @ nodes ) {
2010-09-01 20:50:34 +00:00
my $ destination = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storage } ;
2010-09-01 20:43:34 +00:00
my $ nodetypeent ;
my $ vment ;
2012-06-01 02:22:14 +00:00
$ ostype = $ masterent - > { 'os' } ;
2010-09-01 20:43:34 +00:00
foreach ( qw/os arch profile/ ) {
$ nodetypeent - > { $ _ } = $ masterent - > { $ _ } ;
}
foreach ( qw/storagemodel nics/ ) {
$ vment - > { $ _ } = $ masterent - > { $ _ } ;
}
$ vment - > { master } = $ args { mastername } ;
unless ( $ destination ) {
$ destination = $ masterent - > { storage } ;
$ vment - > { storage } = $ destination ;
}
2012-06-08 13:35:13 +00:00
$ destination =~ s/=.*// ;
2010-09-20 19:06:42 +00:00
my $ placement_resources = get_placement_resources ( hyp = > $ hyp , cluster = > $ cluster , destination = > $ destination ) ;
my $ pool = $ placement_resources - > { pool } ;
my $ dstore = $ placement_resources - > { datastore } ;
2010-09-20 19:50:43 +00:00
my % relocatespecargs = (
2010-09-17 21:10:58 +00:00
datastore = > $ dstore , #$hyphash{$hyp}->{datastorerefmap}->{$destination},
pool = > $ pool ,
2010-09-20 19:50:43 +00:00
#diskMoveType=>"createNewChildDiskBacking", #fyi, requires a snapshot, which isn't compatible with templates, moveChildMostDiskBacking would potentially be fine, but either way is ha incopmatible and limited to 8, arbitrary limitations hard to work around...
) ;
2011-04-30 17:01:48 +00:00
unless ( $ args { detach } ) {
$ relocatespecargs { diskMoveType } = "createNewChildDiskBacking" ;
}
2010-09-20 19:50:43 +00:00
if ( $ hyp ) { $ relocatespecargs { host } = $ hyphash { $ hyp } - > { hostview } }
my $ relocatespec = VirtualMachineRelocateSpec - > new ( % relocatespecargs ) ;
2011-04-30 17:01:48 +00:00
my % clonespecargs = (
2010-09-01 20:43:34 +00:00
location = > $ relocatespec ,
template = > 0 ,
powerOn = > 0
) ;
2011-04-30 17:01:48 +00:00
unless ( $ args { detach } ) {
$ clonespecargs { snapshot } = $ masterview - > snapshot - > currentSnapshot ;
}
2012-05-14 21:02:35 +00:00
if ( $ specialize ) {
2012-07-13 13:42:33 +00:00
my % custargs ;
if ( $ masterent - > { specializeparameters } ) { % custargs = ( parameters = > $ masterent - > { specializeparameters } ) ; }
$ clonespecargs { customization } = make_customization_spec ( $ node , ostype = > $ ostype , % custargs ) ;
2012-05-14 21:02:35 +00:00
}
2011-04-30 17:01:48 +00:00
my $ clonespec = VirtualMachineCloneSpec - > new ( % clonespecargs ) ;
2010-09-17 16:52:55 +00:00
my $ vmfolder = $ vmhash { $ node } - > { vmfolder } ;
my $ task = $ masterview - > CloneVM_Task ( folder = > $ vmfolder , name = > $ node , spec = > $ clonespec ) ;
2011-05-01 12:58:24 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , conn = > $ conn , successtext = > 'Successfully cloned from ' . $ args { mastername } ,
mastername = > $ args { mastername } , nodetypeent = > $ nodetypeent , vment = > $ vment ,
hyp = > $ args { hyp } ,
} ;
2010-09-01 20:43:34 +00:00
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & clone_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ; #$hyp_conns->{$hyp};
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ; #$hyp_conns->{$hyp};
2010-09-01 20:43:34 +00:00
}
}
2012-05-14 21:02:35 +00:00
sub make_customization_spec {
my $ node = shift ;
2012-06-01 02:22:14 +00:00
my % args = @ _ ;
2012-05-14 21:02:35 +00:00
my $ password = "Passw0rd" ;
2012-05-17 21:06:53 +00:00
my $ wintimezone ;
2012-05-14 21:02:35 +00:00
#map of number to strings can be found at
#http://osman-shener-en.blogspot.com/2008/02/unattendedtxt-time-zone-index.html
2012-05-18 16:36:16 +00:00
my $ fullname = "Unspecified User" ;
my $ orgName = "Unspecified Organization" ;
if ( $ ::XCATSITEVALS { winfullname } ) { $ fullname = $ ::XCATSITEVALS { winfullname } ; }
2012-05-18 16:54:47 +00:00
if ( $ ::XCATSITEVALS { winorgname } ) { $ orgName = $ ::XCATSITEVALS { winorgname } ; }
2012-05-17 21:06:53 +00:00
my @ runonce = ( ) ; #to be read in from postscripts table
2012-05-18 16:36:16 +00:00
$ wintimezone = xCAT::TZUtils:: get_wintimezonenum ( ) ;
2012-05-17 21:06:53 +00:00
my $ ptab = xCAT::Table - > new ( 'postscripts' , - create = > 0 ) ;
if ( $ ptab ) {
my $ psent = $ ptab - > getNodeAttribs ( $ node , [ qw/postscripts postbootscripts/ ] ) ;
if ( $ psent and $ psent - > { postscripts } ) {
push @ runonce , split /,/ , $ psent - > { postscripts } ;
}
if ( $ psent and $ psent - > { postbootscripts } ) {
push @ runonce , split /,/ , $ psent - > { postbootscripts } ;
}
}
$ ptab = xCAT::Table - > new ( 'passwd' , - create = > 0 ) ;
unless ( $ ptab ) {
die "passwd table needed" ;
}
my ( $ passent ) = $ ptab - > getAttribs ( { "key" = > "system" , username = > "Administrator" } , 'password' ) ;
unless ( $ passent ) {
die "need passwd table entry for system account Administrator" ;
}
$ password = $ passent - > { password } ;
2012-06-01 02:22:14 +00:00
my % lfpd ;
if ( $ args { ostype } and $ args { ostype } =~ /win2k3/ ) {
% lfpd = (
licenseFilePrintData = > CustomizationLicenseFilePrintData - > new (
autoMode = > CustomizationLicenseDataMode - > new (
2012-06-07 19:46:54 +00:00
'perSeat'
2012-06-01 02:22:14 +00:00
)
)
) ;
}
2012-05-17 21:06:53 +00:00
my % runonce ;
if ( scalar @ runonce ) { #skip section if no postscripts or postbootscripts
% runonce = (
guiRunOnce = > CustomizationGuiRunOnce - > new (
2012-05-18 16:54:47 +00:00
commandList = > \ @ runonce ,
)
2012-05-17 21:06:53 +00:00
) ;
}
2012-07-13 13:42:33 +00:00
my % autologonargs = ( autoLogon = > 0 , autoLogonCount = > 1 , ) ;
if ( $ args { parameters } and $ args { parameters } =~ /autoLogonCount=([^,]*)/i ) {
my $ count = $ 1 ;
if ( $ count ) {
$ autologonargs { autoLogon } = 1 ;
$ autologonargs { autoLogonCount } = $ count ;
}
}
2012-05-14 21:02:35 +00:00
my $ identity = CustomizationSysprep - > new (
2012-05-17 21:06:53 +00:00
% runonce ,
2012-06-01 02:22:14 +00:00
% lfpd ,
2012-05-14 21:02:35 +00:00
guiUnattended = > CustomizationGuiUnattended - > new (
2012-07-13 13:42:33 +00:00
% autologonargs ,
2012-05-14 21:02:35 +00:00
password = > CustomizationPassword - > new (
plainText = > 1 ,
value = > $ password ,
) ,
timeZone = > $ wintimezone ,
) ,
identification = > get_customizedidentification ( ) ,
userData = > CustomizationUserData - > new (
computerName = > CustomizationFixedName - > new ( name = > $ node ) ,
fullName = > $ fullname ,
orgName = > $ orgName ,
productId = > "" ,
) ,
) ;
my $ options = CustomizationWinOptions - > new ( changeSID = > 1 , deleteAccounts = > 0 ) ;
my $ customizationspec = CustomizationSpec - > new (
globalIPSettings = > CustomizationGlobalIPSettings - > new ( ) ,
identity = > $ identity ,
nicSettingMap = > [
CustomizationAdapterMapping - > new ( adapter = > CustomizationIPSettings - > new ( ip = > CustomizationDhcpIpGenerator - > new ( ) ) )
] ,
options = > $ options ,
) ;
return $ customizationspec ;
}
sub get_customizedidentification {
2012-05-18 16:36:16 +00:00
#for now, just do a 'TBD' workgroup. VMWare not supporting joining without domain admin password is rather unfortunate
2012-05-14 21:02:35 +00:00
return CustomizationIdentification - > new (
2012-05-18 16:36:16 +00:00
joinWorkgroup = > "TBD" ,
2012-05-14 21:02:35 +00:00
) ;
}
2010-09-20 19:06:42 +00:00
sub get_placement_resources {
my % args = @ _ ;
my $ pool ;
my $ dstore ;
my $ hyp = $ args { hyp } ;
my $ cluster = $ args { cluster } ;
my $ destination = $ args { destination } ;
if ( $ hyp ) {
unless ( defined $ hyphash { $ hyp } - > { pool } ) {
$ hyphash { $ hyp } - > { pool } = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hyphash { $ hyp } - > { hostview } - > parent , properties = > [ 'resourcePool' ] ) - > resourcePool ;
}
$ pool = $ hyphash { $ hyp } - > { pool } ;
if ( $ destination ) { $ dstore = $ hyphash { $ hyp } - > { datastorerefmap } - > { $ destination } } ;
} else { #clustered...
unless ( defined $ clusterhash { $ cluster } - > { pool } ) {
my $ cview = get_clusterview ( clustname = > $ cluster , conn = > $ clusterhash { $ cluster } - > { conn } ) ;
$ clusterhash { $ cluster } - > { pool } = $ cview - > resourcePool ;
}
$ pool = $ clusterhash { $ cluster } - > { pool } ;
if ( $ destination ) { $ dstore = $ clusterhash { $ cluster } - > { datastorerefmap } - > { $ destination } } ;
}
return {
pool = > $ pool ,
datastore = > $ dstore ,
}
}
2010-09-01 20:43:34 +00:00
sub clone_task_callback {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
2011-05-01 12:31:54 +00:00
my $ conn = $ parms - > { conn } ;
2010-09-01 20:43:34 +00:00
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
2011-05-01 12:53:16 +00:00
#xCAT::SvrUtils::sendmsg($intent, $output_handler,$node);
2010-09-01 20:43:34 +00:00
my $ nodetype = xCAT::Table - > new ( 'nodetype' , - create = > 1 ) ;
my $ vm = xCAT::Table - > new ( 'vm' , - create = > 1 ) ;
2010-09-01 20:50:34 +00:00
$ vm - > setAttribs ( { node = > $ node } , $ parms - > { vment } ) ;
2011-05-01 12:31:54 +00:00
2010-09-01 20:50:34 +00:00
$ nodetype - > setAttribs ( { node = > $ node } , $ parms - > { nodetypeent } ) ;
2011-05-01 12:53:16 +00:00
foreach ( keys % { $ parms - > { vment } } ) {
2011-05-01 12:58:24 +00:00
$ tablecfg { vm } - > { $ node } - > [ 0 ] - > { $ _ } = $ parms - > { vment } - > { $ _ } ;
2011-05-01 12:31:54 +00:00
}
2011-05-01 12:55:00 +00:00
2011-05-01 12:53:16 +00:00
my @ networks = split /,/ , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { nics } ;
2011-05-01 12:31:54 +00:00
my @ macs = xCAT::VMCommon:: getMacAddresses ( \ % tablecfg , $ node , scalar @ networks ) ;
#now with macs, change all macs in the vm to match our generated macs
my $ regex = qr/^$node(\z|\.)/ ;
#have to do an expensive pull of the vm view, since it is brand new
my $ nodeviews = $ conn - > find_entity_views ( view_type = > 'VirtualMachine' , filter = > { 'config.name' = > $ regex } ) ;
unless ( scalar @$ nodeviews == 1 ) { die "this should be impossible" ; }
2011-05-01 12:55:00 +00:00
my $ vpdtab = xCAT::Table - > new ( 'vpd' , - create = > 1 ) ;
2011-05-01 12:58:24 +00:00
$ vpdtab - > setAttribs ( { node = > $ node } , { uuid = > $ nodeviews - > [ 0 ] - > config - > uuid } ) ;
2011-05-01 12:53:16 +00:00
my $ ndev ;
my @ devstochange ;
2011-05-01 13:03:54 +00:00
foreach $ ndev ( @ { $ nodeviews - > [ 0 ] - > config - > hardware - > device } ) {
unless ( $ ndev - > { macAddress } ) { next ; } #not an ndev
$ ndev - > { macAddress } = shift @ macs ;
2012-04-09 14:05:25 +00:00
$ ndev - > { addressType } = "manual" ;
2011-05-01 12:53:16 +00:00
push @ devstochange , VirtualDeviceConfigSpec - > new (
device = > $ ndev ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'edit' ) ) ;
}
if ( @ devstochange ) {
my $ reconfigspec = VirtualMachineConfigSpec - > new ( deviceChange = > \ @ devstochange ) ;
2011-05-01 12:58:24 +00:00
my $ task = $ nodeviews - > [ 0 ] - > ReconfigVM_Task ( spec = > $ reconfigspec ) ;
2011-05-01 12:53:16 +00:00
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
2011-05-01 12:58:24 +00:00
$ running_tasks { $ task } - > { hyp } = $ parms - > { hyp } ;
2011-05-17 13:57:17 +00:00
$ running_tasks { $ task } - > { conn } = $ parms - > { conn } ;
2011-05-01 12:58:24 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > $ intent } ;
2011-05-01 12:53:16 +00:00
}
2011-05-01 12:31:54 +00:00
2010-09-01 20:43:34 +00:00
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
2010-08-31 20:53:55 +00:00
sub promote_vm_to_master {
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
2010-09-17 21:10:58 +00:00
my $ cluster = $ args { cluster } ;
2010-08-31 20:53:55 +00:00
my $ regex = qr/^$node(\z|\.)/ ;
2010-09-17 21:10:58 +00:00
my $ conn ;
if ( $ hyp ) {
$ conn = $ hyphash { $ hyp } - > { conn } ;
} else {
$ conn = $ clusterhash { $ cluster } - > { conn } ;
}
my $ nodeviews = $ conn - > find_entity_views ( view_type = > 'VirtualMachine' , filter = > { 'config.name' = > $ regex } ) ;
2010-08-31 20:53:55 +00:00
if ( scalar ( @$ nodeviews ) != 1 ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot find $node in VMWare infrastructure" ] , $ output_handler , $ node ) ;
return ;
}
my $ nodeview = shift @$ nodeviews ;
2010-09-17 21:10:58 +00:00
my $ dstore ;
if ( $ hyp ) {
$ dstore = $ hyphash { $ hyp } - > { datastorerefmap } - > { $ args { url } } ,
} else {
$ dstore = $ clusterhash { $ cluster } - > { datastorerefmap } - > { $ args { url } } ,
}
2010-08-31 20:53:55 +00:00
my $ relocatespec = VirtualMachineRelocateSpec - > new (
2010-09-17 21:10:58 +00:00
datastore = > $ dstore ,
2010-08-31 20:53:55 +00:00
) ;
my $ clonespec = VirtualMachineCloneSpec - > new (
location = > $ relocatespec ,
2011-04-30 17:25:59 +00:00
template = > 0 , #can't go straight to template, need to clone, then snap, then templatify
2010-08-31 20:53:55 +00:00
powerOn = > 0
) ;
2010-10-01 17:18:37 +00:00
2010-09-17 16:52:55 +00:00
my $ vmfolder = $ vmhash { $ node } - > { vmfolder } ;
my $ task = $ nodeview - > CloneVM_Task ( folder = > $ vmfolder , name = > $ args { mastername } , spec = > $ clonespec ) ;
2011-04-30 17:01:48 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , hyp = > $ args { hyp } , conn = > $ conn , successtext = > 'Successfully copied to ' . $ args { mastername } , mastername = > $ args { mastername } , url = > $ args { url } } ;
2010-08-31 20:53:55 +00:00
$ running_tasks { $ task } - > { task } = $ task ;
2010-09-01 19:43:09 +00:00
$ running_tasks { $ task } - > { callback } = \ & promote_task_callback ;
2010-08-31 20:53:55 +00:00
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ; #$hyp_conns->{$hyp};
2010-09-17 21:10:58 +00:00
$ running_tasks { $ task } - > { vm } = $ node ;
2010-08-31 20:53:55 +00:00
}
2010-09-01 19:43:09 +00:00
sub promote_task_callback {
2011-04-30 17:01:48 +00:00
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) { #now, we have to make one snapshot for linked clones
my $ mastername = $ parms - > { mastername } ;
my $ regex = qr/^$mastername\z/ ;
my $ masterviews = $ parms - > { conn } - > find_entity_views ( view_type = > 'VirtualMachine' , filter = > { 'config.name' = > $ regex } ) ;
unless ( scalar @$ masterviews == 1 ) {
die "Impossible" ;
}
my $ masterview = $ masterviews - > [ 0 ] ;
my $ task = $ masterview - > CreateSnapshot_Task ( name = > "xcatsnap" , memory = > "false" , quiesce = > "false" ) ;
2011-04-30 17:23:56 +00:00
$ parms - > { masterview } = $ masterview ;
2011-04-30 17:01:48 +00:00
$ running_tasks { $ task } - > { data } = $ parms ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & promotesnap_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ parms - > { hyp } ; #$hyp_conns->{$hyp};
$ running_tasks { $ task } - > { vm } = $ parms - > { node } ;
#xCAT::SvrUtils::sendmsg($intent, $output_handler,$node);
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
sub promotesnap_task_callback {
2010-09-01 19:43:09 +00:00
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
2011-04-30 17:23:56 +00:00
$ parms - > { masterview } - > MarkAsTemplate ; #time to be a template
2010-09-01 19:43:09 +00:00
xCAT::SvrUtils:: sendmsg ( $ intent , $ output_handler , $ node ) ;
my $ mastertabentry = {
originator = > $ requester ,
2010-09-03 18:51:19 +00:00
vintage = > scalar ( localtime ) ,
2010-09-01 19:43:09 +00:00
storage = > $ parms - > { url } ,
} ;
foreach ( qw/os arch profile/ ) {
if ( defined ( $ tablecfg { nodetype } - > { $ node } - > [ 0 ] - > { $ _ } ) ) {
$ mastertabentry - > { $ _ } = $ tablecfg { nodetype } - > { $ node } - > [ 0 ] - > { $ _ } ;
}
}
foreach ( qw/storagemodel nics/ ) {
if ( defined ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { $ _ } ) ) {
$ mastertabentry - > { $ _ } = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { $ _ } ;
}
}
my $ vmmastertab = xCAT::Table - > new ( 'vmmaster' , - create = > 1 ) ;
my $ date = scalar ( localtime ) ;
2010-09-01 20:50:34 +00:00
$ vmmastertab - > setAttribs ( { name = > $ parms - > { mastername } } , $ mastertabentry ) ;
2010-09-01 19:43:09 +00:00
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
2009-06-22 16:00:28 +00:00
sub mkvms {
my % args = @ _ ;
my $ nodes = $ args { nodes } ;
my $ hyp = $ args { hyp } ;
2010-09-20 17:26:24 +00:00
my $ cluster = $ args { cluster } ;
2009-06-22 16:00:28 +00:00
@ ARGV = @ { $ args { exargs } } ; #for getoptions;
my $ disksize ;
require Getopt::Long ;
2010-09-17 14:22:17 +00:00
my $ cpuCount ;
my $ memory ;
2009-06-22 16:00:28 +00:00
GetOptions (
2010-09-17 14:22:17 +00:00
'size|s=s' = > \ $ disksize ,
"cpus=s" = > \ $ cpuCount ,
"mem=s" = > \ $ memory
2009-06-22 16:00:28 +00:00
) ;
my $ node ;
2010-09-20 19:06:42 +00:00
my $ conn ;
2010-09-20 17:26:24 +00:00
if ( $ hyp ) {
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager']);
unless ( validate_datastore_prereqs ( $ nodes , $ hyp ) ) {
return ;
}
2010-09-20 19:06:42 +00:00
$ conn = $ hyphash { $ hyp } - > { conn } ;
} else {
refreshclusterdatastoremap ( $ cluster ) ;
$ conn = $ clusterhash { $ cluster } - > { conn } ;
2009-06-22 16:00:28 +00:00
}
2010-09-20 17:26:24 +00:00
sortoutdatacenters ( nodes = > $ nodes , hyp = > $ hyp , cluster = > $ cluster ) ;
2010-09-20 19:06:42 +00:00
my $ placement_resources = get_placement_resources ( hyp = > $ hyp , cluster = > $ cluster ) ;
#$hyphash{$hyp}->{pool} = $hyphash{$hyp}->{conn}->get_view(mo_ref=>$hyphash{$hyp}->{hostview}->parent,properties=>['resourcePool'])->resourcePool;
2009-06-22 16:00:28 +00:00
my $ cfg ;
foreach $ node ( @$ nodes ) {
2010-07-08 15:19:18 +00:00
process_tasks ; #check for tasks needing followup actions before the task is forgotten (VMWare's memory is fairly short at times
2010-09-20 19:06:42 +00:00
if ( $ conn - > find_entity_view ( view_type = > "VirtualMachine" , filter = > { name = > $ node } ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Virtual Machine already exists" ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
next ;
} else {
2010-09-20 19:06:42 +00:00
register_vm ( $ hyp , $ node , $ disksize , undef , undef , undef , cpus = > $ cpuCount , memory = > $ memory , cluster = > $ cluster ) ;
2009-06-22 16:00:28 +00:00
}
}
my @ dhcpnodes ;
foreach ( keys % { $ tablecfg { dhcpneeded } } ) {
push @ dhcpnodes , $ _ ;
delete $ tablecfg { dhcpneeded } - > { $ _ } ;
}
2010-10-20 18:41:08 +00:00
unless ( $ ::XCATSITEVALS { 'dhcpsetup' } and ( $ ::XCATSITEVALS { 'dhcpsetup' } =~ /^n/i or $ ::XCATSITEVALS { 'dhcpsetup' } =~ /^d/i or $ ::XCATSITEVALS { 'dhcpsetup' } eq '0' ) ) {
$ executerequest - > ( { command = > [ 'makedhcp' ] , node = > \ @ dhcpnodes } ) ;
}
2009-06-22 16:00:28 +00:00
}
sub setboot {
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
if ( not defined $ args { vmview } ) { #attempt one refresh
2010-09-17 21:10:58 +00:00
$ args { vmview } = $ vmhash { $ node } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' ] , filter = > { name = > $ node } ) ;
2010-07-29 20:03:05 +00:00
if ( not defined $ args { vmview } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM does not appear to exist" ] , $ output_handler , $ node ) ;
2010-07-29 20:03:05 +00:00
return ;
}
2009-06-22 16:00:28 +00:00
}
my $ bootorder = $ { $ args { exargs } } [ 0 ] ;
#NOTE: VMware simply does not currently seem to allow programatically changing the boot
2009-07-13 18:04:39 +00:00
#order like other virtualization solutions supported by xCAT.
2009-06-22 16:00:28 +00:00
#This doesn't behave quite like any existing mechanism:
#vm.bootorder was meant to take the place of system nvram, vmware imitates that unfortunate aspect of bare metal too well..
#rsetboot was created to describe the ipmi scenario of a transient boot device, this is persistant *except* for setup, which is not
#rbootseq was meant to be entirely persistant and ordered.
#rsetboot is picked, the usage scenario matches about as good as I could think of
my $ reconfigspec ;
if ( $ bootorder =~ /setup/ ) {
unless ( $ bootorder eq 'setup' ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "rsetboot parameter may not contain 'setup' with other items, assuming vm.bootorder is just 'setup'" ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
}
$ reconfigspec = VirtualMachineConfigSpec - > new (
bootOptions = > VirtualMachineBootOptions - > new ( enterBIOSSetup = > 1 ) ,
) ;
} else {
$ bootorder = "allow:" . $ bootorder ;
$ reconfigspec = VirtualMachineConfigSpec - > new (
bootOptions = > VirtualMachineBootOptions - > new ( enterBIOSSetup = > 0 ) ,
extraConfig = > [ OptionValue - > new ( key = > 'bios.bootDeviceClasses' , value = > $ bootorder ) ]
) ;
}
my $ task = $ args { vmview } - > ReconfigVM_Task ( spec = > $ reconfigspec ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > $ { $ args { exargs } } [ 0 ] } ;
}
sub register_vm { #Attempt to register existing instance of a VM
my $ hyp = shift ;
my $ node = shift ;
my $ disksize = shift ;
my $ blockedfun = shift ; #a pointer to a blocked function to call on success
my $ blockedargs = shift ; #hash reference to call blocked function with
2010-06-12 01:57:12 +00:00
my $ failonerr = shift ;
2010-09-17 14:22:17 +00:00
my % args = @ _ ; #ok, went overboard with positional arguments, from now on, named arguments
2009-06-22 16:00:28 +00:00
my $ task ;
2010-09-20 19:06:42 +00:00
if ( $ hyp ) {
validate_network_prereqs ( [ keys % { $ hyphash { $ hyp } - > { nodes } } ] , $ hyp ) ;
unless ( defined $ hyphash { $ hyp } - > { datastoremap } or validate_datastore_prereqs ( [ keys % { $ hyphash { $ hyp } - > { nodes } } ] , $ hyp ) ) {
die "unexpected condition" ;
}
} else {
scan_cluster_networks ( $ args { cluster } ) ;
2009-06-22 16:00:28 +00:00
}
2010-09-20 19:06:42 +00:00
sortoutdatacenters ( nodes = > [ $ node ] , hyp = > $ hyp , cluster = > $ args { cluster } ) ;
my $ placement_resources = get_placement_resources ( hyp = > $ hyp , cluster = > $ args { cluster } ) ;
2010-07-09 06:24:18 +00:00
# Try to add an existing VM to the machine folder
2009-06-22 16:00:28 +00:00
my $ success = eval {
2010-09-20 19:06:42 +00:00
if ( $ hyp ) {
2011-06-09 20:16:23 +00:00
$ task = $ vmhash { $ node } - > { vmfolder } - > RegisterVM_Task ( path = > getcfgdatastore ( $ node , $ hyphash { $ hyp } - > { datastoremap } ) . " /$node/$node.vmx" , name = > $ node , pool = > $ hyphash { $ hyp } - > { pool } , host = > $ hyphash { $ hyp } - > { hostview } , asTemplate = > 0 ) ;
2010-09-20 19:06:42 +00:00
} else {
$ task = $ vmhash { $ node } - > { vmfolder } - > RegisterVM_Task ( path = > getcfgdatastore ( $ node , $ clusterhash { $ args { cluster } } - > { datastoremap } ) . " /$node/$node.vmx" , name = > $ node , pool = > $ placement_resources - > { pool } , asTemplate = > 0 ) ;
}
2009-06-22 16:00:28 +00:00
} ;
2010-07-09 06:24:18 +00:00
# if we couldn't add it then it means it wasn't created yet. So we create it.
2010-09-20 19:06:42 +00:00
my $ cluster = $ args { cluster } ;
2009-06-22 16:00:28 +00:00
if ( $@ or not $ success ) {
2010-07-09 06:24:18 +00:00
#if (ref($@) eq 'SoapFault') {
# if (ref($@->detail) eq 'NotFound') {
2009-06-22 16:00:28 +00:00
register_vm_callback ( undef , {
node = > $ node ,
disksize = > $ disksize ,
blockedfun = > $ blockedfun ,
blockedargs = > $ blockedargs ,
2010-06-12 01:57:12 +00:00
errregister = > $ failonerr ,
2010-09-17 14:22:17 +00:00
cpus = > $ args { cpus } ,
memory = > $ args { memory } ,
2010-09-20 19:06:42 +00:00
hyp = > $ hyp ,
cluster = > $ cluster ,
2009-06-22 16:00:28 +00:00
} ) ;
}
if ( $ task ) {
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & register_vm_callback ;
$ running_tasks { $ task } - > { hyp } = $ hyp ;
2010-09-20 19:06:42 +00:00
$ running_tasks { $ task } - > { cluster } = $ cluster ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { data } = {
node = > $ node ,
disksize = > $ disksize ,
blockedfun = > $ blockedfun ,
blockedargs = > $ blockedargs ,
2010-06-12 01:57:12 +00:00
errregister = > $ failonerr ,
2010-09-17 14:22:17 +00:00
cpus = > $ args { cpus } ,
memory = > $ args { memory } ,
2010-09-20 19:06:42 +00:00
hyp = > $ hyp ,
cluster = > $ cluster ,
2009-06-22 16:00:28 +00:00
} ;
}
}
sub register_vm_callback {
my $ task = shift ;
my $ args = shift ;
if ( not $ task or $ task - > info - > state - > val eq 'error' ) { #TODO: fail for 'rpower' flow, mkvm is too invasive in VMWare to be induced by 'rpower on'
if ( not defined $ args - > { blockedfun } ) {
2010-09-17 14:22:17 +00:00
mknewvm ( $ args - > { node } , $ args - > { disksize } , $ args - > { hyp } , $ args ) ;
2010-06-12 01:57:12 +00:00
} elsif ( $ args - > { errregister } ) {
relay_vmware_err ( $ task , "" , $ args - > { node } ) ;
2009-06-22 16:00:28 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "mkvm must be called before use of this function" ] , $ output_handler , $ args - > { node } ) ;
2009-06-22 16:00:28 +00:00
}
} elsif ( defined $ args - > { blockedfun } ) { #If there is a blocked function, call it here)
$ args - > { blockedfun } - > ( % { $ args - > { blockedargs } } ) ;
}
}
2010-07-09 06:24:18 +00:00
sub getURI {
my $ method = shift ;
my $ location = shift ;
my $ uri = '' ;
if ( $ method =~ /nfs/ ) {
( my $ server , my $ path ) = split /\// , $ location , 2 ;
$ server =~ s/:$// ; #tolerate habitual colons
my $ servern = inet_aton ( $ server ) ;
unless ( $ servern ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "could not resolve '$server' to an address from vm.storage/vm.cfgstore" ] , $ output_handler ) ;
2010-07-09 06:24:18 +00:00
}
$ server = inet_ntoa ( $ servern ) ;
$ uri = "nfs://$server/$path" ;
} elsif ( $ method =~ /vmfs/ ) {
( my $ name , undef ) = split /\// , $ location , 2 ;
$ name =~ s/:$// ; #remove a : if someone put it in for some reason.
$ uri = "vmfs://$name" ;
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unsupported VMware Storage Method: $method. Please use 'vmfs or nfs'" ] , $ output_handler ) ;
2010-07-09 06:24:18 +00:00
}
return $ uri ;
}
2009-06-22 16:00:28 +00:00
sub getcfgdatastore {
my $ node = shift ;
my $ dses = shift ;
2009-07-10 19:08:06 +00:00
my $ cfgdatastore = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cfgstore } ;
unless ( $ cfgdatastore ) {
$ cfgdatastore = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storage } ;
#TODO: if multiple drives are specified, make sure to split this out
2009-09-28 18:25:12 +00:00
#DONE: I believe the regex after this conditional takes care of that case already..
2009-07-10 19:08:06 +00:00
}
2010-03-24 12:08:21 +00:00
$ cfgdatastore =~ s/=.*// ;
2010-01-26 15:44:00 +00:00
( my $ method , my $ location ) = split /:\/\// , $ cfgdatastore , 2 ;
2010-09-20 19:06:42 +00:00
my $ uri = $ cfgdatastore ;
unless ( $ dses - > { $ uri } ) { #don't call getURI if map works out fine already
$ uri = getURI ( $ method , $ location ) ;
}
2010-01-26 15:44:00 +00:00
$ cfgdatastore = "[" . $ dses - > { $ uri } . "]" ;
#$cfgdatastore =~ s/,.*$//; #these two lines of code were kinda pointless
#$cfgdatastore =~ s/\/$//;
2009-06-22 16:00:28 +00:00
return $ cfgdatastore ;
}
sub mknewvm {
my $ node = shift ;
my $ disksize = shift ;
my $ hyp = shift ;
2010-09-17 14:22:17 +00:00
my $ otherargs = shift ;
2010-09-20 19:06:42 +00:00
my $ cluster = $ otherargs - > { cluster } ;
my $ placement_resources = get_placement_resources ( hyp = > $ hyp , cluster = > $ cluster ) ;
my $ pool = $ placement_resources - > { pool } ;
my $ cfg ;
if ( $ hyp ) {
$ cfg = build_cfgspec ( $ node , $ hyphash { $ hyp } - > { datastoremap } , $ hyphash { $ hyp } - > { nets } , $ disksize , $ hyp , $ otherargs ) ;
} else { #cluster based..
$ cfg = build_cfgspec ( $ node , $ clusterhash { $ cluster } - > { datastoremap } , $ clusterhash { $ cluster } - > { nets } , $ disksize , $ hyp , $ otherargs ) ;
}
2010-09-17 21:10:58 +00:00
my $ task ;
if ( $ hyp ) {
$ task = $ vmhash { $ node } - > { vmfolder } - > CreateVM_Task ( config = > $ cfg , pool = > $ hyphash { $ hyp } - > { pool } , host = > $ hyphash { $ hyp } - > { hostview } ) ;
} else {
2010-09-20 19:06:42 +00:00
$ task = $ vmhash { $ node } - > { vmfolder } - > CreateVM_Task ( config = > $ cfg , pool = > $ pool ) ; #drs away
2010-09-17 21:10:58 +00:00
}
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & mkvm_callback ;
$ running_tasks { $ task } - > { hyp } = $ hyp ;
2010-09-20 19:06:42 +00:00
$ running_tasks { $ task } - > { cluster } = $ cluster ;
$ running_tasks { $ task } - > { data } = { hyp = > $ hyp , cluster = > $ cluster , node = > $ node } ;
2009-06-22 16:00:28 +00:00
}
sub getUnits {
my $ amount = shift ;
my $ defunit = shift ;
my $ divisor = shift ;
unless ( $ amount ) { return ; }
unless ( $ divisor ) {
$ divisor = 1 ;
}
if ( $ amount =~ /(\D)$/ ) { #If unitless, add unit
$ defunit = $ 1 ;
chop $ amount ;
}
if ( $ defunit =~ /k/i ) {
return $ amount * 1024 / $ divisor ;
} elsif ( $ defunit =~ /m/i ) {
return $ amount * 1048576 / $ divisor ;
} elsif ( $ defunit =~ /g/i ) {
return $ amount * 1073741824 / $ divisor ;
}
}
sub getguestid {
my $ osfound = 0 ;
my $ node = shift ;
2010-11-09 19:01:33 +00:00
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { guestostype } ) { #if admin wants to skip derivation from nodetype.os value, let em
return $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { guestostype } ;
}
2009-06-22 16:00:28 +00:00
my $ nodeos = $ tablecfg { nodetype } - > { $ node } - > [ 0 ] - > { os } ;
my $ nodearch = $ tablecfg { nodetype } - > { $ node } - > [ 0 ] - > { arch } ;
foreach ( keys % guestidmap ) {
2010-07-09 06:24:18 +00:00
if ( defined ( $ nodeos ) and $ nodeos =~ /$_/ ) {
2009-06-22 16:00:28 +00:00
if ( $ nodearch eq 'x86_64' ) {
$ nodeos = $ guestidmap { $ _ } . "64Guest" ;
} else {
$ nodeos = $ guestidmap { $ _ } ;
$ nodeos =~ s/_$// ;
$ nodeos . = "Guest" ;
}
$ osfound = 1 ;
last ;
}
}
unless ( $ osfound ) {
2010-07-09 06:24:18 +00:00
if ( defined ( $ nodearch ) and $ nodearch eq 'x86_64' ) {
2009-06-22 16:00:28 +00:00
$ nodeos = "otherGuest64" ;
} else {
$ nodeos = "otherGuest" ;
}
}
return $ nodeos ;
}
sub build_cfgspec {
my $ node = shift ;
my $ dses = shift ; #map to match vm table to datastore names
my $ netmap = shift ;
my $ disksize = shift ;
2009-07-17 14:18:25 +00:00
my $ hyp = shift ;
2010-09-17 14:22:17 +00:00
my $ otherargs = shift ;
2009-06-22 16:00:28 +00:00
my $ memory ;
my $ ncpus ;
2010-09-17 14:22:17 +00:00
my $ updatehash ;
if ( $ otherargs - > { memory } ) {
2010-09-17 14:36:27 +00:00
$ memory = getUnits ( $ otherargs - > { memory } , "M" , 1048576 ) ;
2010-09-17 14:22:17 +00:00
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { memory } ) {
$ updatehash - > { memory } = $ memory ;
}
} elsif ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { memory } ) {
2010-09-17 14:36:27 +00:00
$ memory = getUnits ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { memory } , "M" , 1048576 ) ;
2010-09-17 14:22:17 +00:00
} else {
2009-06-22 16:00:28 +00:00
$ memory = 512 ;
}
2010-09-17 14:22:17 +00:00
if ( $ otherargs - > { cpus } ) {
$ ncpus = $ otherargs - > { cpus } ;
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cpus } ) {
$ updatehash - > { cpus } = $ ncpus ;
}
} elsif ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cpus } ) {
$ ncpus = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cpus } ;
} else {
2009-06-22 16:00:28 +00:00
$ ncpus = 1 ;
}
2010-09-17 14:22:17 +00:00
if ( $ updatehash ) {
my $ vmtab = xCAT::Table - > new ( 'vm' , - create = > 1 ) ;
$ vmtab - > setNodeAttribs ( $ node , $ updatehash ) ;
}
2009-06-22 16:00:28 +00:00
my @ devices ;
$ currkey = 0 ;
2011-05-10 14:22:03 +00:00
my $ opticalbacking = VirtualCdromRemoteAtapiBackingInfo - > new ( deviceName = > "" ) ;
my $ opticalconnectable = VirtualDeviceConnectInfo - > new ( startConnected = > 0 , allowGuestControl = > 1 , connected = > 0 ) ;
my $ optical = VirtualCdrom - > new ( controllerKey = > 201 ,
connectable = > $ opticalconnectable ,
backing = > $ opticalbacking ,
key = > $ currkey + + ,
unitNumber = > 0 , ) ;
push @ devices , VirtualDeviceConfigSpec - > new ( device = > $ optical , operation = > VirtualDeviceConfigSpecOperation - > new ( 'add' ) ) ;
2009-06-22 16:00:28 +00:00
push @ devices , create_storage_devs ( $ node , $ dses , $ disksize ) ;
2009-07-17 14:18:25 +00:00
push @ devices , create_nic_devs ( $ node , $ netmap , $ hyp ) ;
2009-07-13 20:10:08 +00:00
#my $cfgdatastore = $tablecfg{vm}->{$node}->[0]->{storage}; #TODO: need a new cfglocation field in case of stateless guest?
#$cfgdatastore =~ s/,.*$//;
#$cfgdatastore =~ s/\/$//;
#$cfgdatastore = "[".$dses->{$cfgdatastore}."]";
my $ cfgdatastore = getcfgdatastore ( $ node , $ dses ) ;
2009-06-22 16:00:28 +00:00
my $ vfiles = VirtualMachineFileInfo - > new ( vmPathName = > $ cfgdatastore ) ;
#my $nodeos = $tablecfg{nodetype}->{$node}->[0]->{os};
#my $nodearch = $tablecfg{nodetype}->{$node}->[0]->{arch};
my $ nodeos = getguestid ( $ node ) ; #nodeos=>$nodeos,nodearch=>$nodearch);
2010-06-23 14:50:46 +00:00
my $ uuid ;
if ( $ tablecfg { vpd } - > { $ node } - > [ 0 ] - > { uuid } ) {
$ uuid = $ tablecfg { vpd } - > { $ node } - > [ 0 ] - > { uuid } ;
} else {
if ( $ tablecfg { mac } - > { $ node } - > [ 0 ] - > { mac } ) { #a uuidv1 is possible, generate that for absolute uniqueness guarantee
my $ mac = $ tablecfg { mac } - > { $ node } - > [ 0 ] - > { mac } ;
$ mac =~ s/\|.*// ;
$ mac =~ s/!.*// ;
$ uuid = xCAT::Utils:: genUUID ( mac = > $ mac ) ;
} else {
$ uuid = xCAT::Utils:: genUUID ( ) ;
}
2010-07-09 06:24:18 +00:00
2010-06-23 14:50:46 +00:00
my $ vpdtab = xCAT::Table - > new ( 'vpd' ) ;
2010-07-09 06:24:18 +00:00
$ vpdtab - > setNodeAttribs ( $ node , { uuid = > $ uuid } ) ;
2010-06-23 14:50:46 +00:00
}
2012-03-06 20:17:47 +00:00
$ uuid =~ s/^(..)(..)(..)(..)-(..)(..)-(..)(..)/$4$3$2$1-$6$5-$8$7/ ;
2011-02-08 21:33:06 +00:00
my @ optionvals ;
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { othersettings } ) {
my $ key ;
my $ value ;
foreach ( split /;/ , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { othersettings } ) {
( $ key , $ value ) = split /=/ ;
if ( $ value ) {
push @ optionvals , OptionValue - > new ( key = > $ key , value = > $ value ) ;
} else {
push @ optionvals , OptionValue - > new ( key = > $ key ) ;
}
}
}
my % specargs = (
2009-06-22 16:00:28 +00:00
name = > $ node ,
files = > $ vfiles ,
guestId = > $ nodeos ,
memoryMB = > $ memory ,
numCPUs = > $ ncpus ,
deviceChange = > \ @ devices ,
2010-06-23 14:50:46 +00:00
uuid = > $ uuid ,
2011-02-08 21:33:06 +00:00
) ;
if ( @ optionvals ) {
$ specargs { extraConfig } = \ @ optionvals ;
}
return VirtualMachineConfigSpec - > new ( % specargs ) ;
2009-06-22 16:00:28 +00:00
}
sub create_nic_devs {
my $ node = shift ;
my $ netmap = shift ;
2009-07-17 14:18:25 +00:00
my $ hyp = shift ;
2009-06-22 16:00:28 +00:00
my @ networks = split /,/ , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { nics } ;
my @ devs ;
my $ idx = 0 ;
2009-08-19 15:14:38 +00:00
my @ macs = xCAT::VMCommon:: getMacAddresses ( \ % tablecfg , $ node , scalar @ networks ) ;
2009-06-22 16:00:28 +00:00
my $ connprefs = VirtualDeviceConnectInfo - > new (
allowGuestControl = > 1 ,
connected = > 0 ,
startConnected = > 1
) ;
2010-09-03 18:54:30 +00:00
my $ model = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { nicmodel } ;
2010-09-01 18:12:41 +00:00
unless ( $ model ) {
$ model = 'e1000' ;
}
2009-06-22 16:00:28 +00:00
foreach ( @ networks ) {
2010-09-20 19:06:42 +00:00
my $ pgname = $ _ ;
if ( $ hyp ) {
$ pgname = $ hyphash { $ hyp } - > { pgnames } - > { $ _ } ;
}
2009-06-22 16:00:28 +00:00
s/.*:// ;
2010-09-01 18:12:41 +00:00
s/=(.*)$// ;
my $ tmpmodel = $ model ;
if ( $ 1 ) { $ tmpmodel = $ 1 ; }
2009-06-22 16:00:28 +00:00
my $ netname = $ _ ;
my $ backing = VirtualEthernetCardNetworkBackingInfo - > new (
2009-07-17 14:18:25 +00:00
network = > $ netmap - > { $ pgname } ,
deviceName = > $ pgname ,
2009-06-22 16:00:28 +00:00
) ;
2010-09-01 18:12:41 +00:00
my % newcardargs = (
2009-06-22 16:00:28 +00:00
key = > 0 , #3, #$currkey++,
backing = > $ backing ,
addressType = > "manual" ,
macAddress = > shift @ macs ,
connectable = > $ connprefs ,
wakeOnLanEnabled = > 1 , #TODO: configurable in tables?
) ;
2010-09-01 18:12:41 +00:00
my $ newcard ;
if ( $ tmpmodel eq 'e1000' ) {
$ newcard = VirtualE1000 - > new ( % newcardargs ) ;
} elsif ( $ tmpmodel eq 'vmxnet3' ) {
$ newcard = VirtualVmxnet3 - > new ( % newcardargs ) ;
} elsif ( $ tmpmodel eq 'pcnet32' ) {
$ newcard = VirtualPCNet32 - > new ( % newcardargs ) ;
} elsif ( $ tmpmodel eq 'vmxnet2' ) {
$ newcard = VirtualVmxnet2 - > new ( % newcardargs ) ;
} elsif ( $ tmpmodel eq 'vmxnet' ) {
$ newcard = VirtualVmxnet - > new ( % newcardargs ) ;
} else {
xCAT::SvrUtils:: sendmsg ( [ 1 , "$tmpmodel not a recognized nic type, falling back to e1000 (vmxnet3, e1000, pcnet32, vmxnet2, vmxnet are recognized" ] , $ output_handler , $ node ) ;
$ newcard = VirtualE1000 - > new ( % newcardargs ) ;
}
2009-06-22 16:00:28 +00:00
push @ devs , VirtualDeviceConfigSpec - > new ( device = > $ newcard ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'add' ) ) ;
$ idx + + ;
}
return @ devs ;
die "Stop running for test" ;
}
sub create_storage_devs {
my $ node = shift ;
my $ sdmap = shift ;
2009-09-28 17:59:43 +00:00
my $ sizes = shift ;
my @ sizes = split /[,:]/ , $ sizes ;
2010-06-07 21:24:22 +00:00
my $ existingScsiCont = shift ;
my $ scsiUnit = shift ;
my $ existingIdeCont = shift ;
my $ ideUnit = shift ;
2010-06-17 21:10:39 +00:00
my $ devices = shift ;
2010-08-26 19:23:25 +00:00
my % args = @ _ ;
2009-06-22 16:00:28 +00:00
my $ scsicontrollerkey = 0 ;
my $ idecontrollerkey = 200 ; #IDE 'controllers' exist at 200 and 201 invariably, with no flexibility?
#Cannot find documentation that declares this absolute, but attempts to do otherwise
#lead in failure, also of note, these are single-channel controllers, so two devs per controller
my $ backingif ;
my @ devs ;
my $ havescsidevs = 0 ;
my $ disktype = 'ide' ;
2010-06-07 21:24:22 +00:00
my $ ideunitnum = 0 ;
my $ scsiunitnum = 0 ;
2010-06-08 13:22:33 +00:00
my $ havescsicontroller = 0 ;
2010-06-17 21:10:39 +00:00
my % usedideunits ;
my % usedscsiunits = ( 7 = > 1 , '7' = > 1 ) ;
2010-06-07 21:24:22 +00:00
if ( defined $ existingScsiCont ) {
2010-06-08 13:22:33 +00:00
$ havescsicontroller = 1 ;
2010-06-07 21:24:22 +00:00
$ scsicontrollerkey = $ existingScsiCont - > { key } ;
$ scsiunitnum = $ scsiUnit ;
2010-06-17 21:10:39 +00:00
% usedscsiunits = % { getUsedUnits ( $ scsicontrollerkey , $ devices ) } ;
2010-06-07 21:24:22 +00:00
}
if ( defined $ existingIdeCont ) {
$ idecontrollerkey = $ existingIdeCont - > { key } ;
$ ideunitnum = $ ideUnit ;
2010-06-17 21:10:39 +00:00
% usedideunits = % { getUsedUnits ( $ idecontrollerkey , $ devices ) } ;
2010-06-07 21:24:22 +00:00
}
my $ unitnum ;
2009-06-22 16:00:28 +00:00
my % disktocont ;
my $ dev ;
2009-09-28 18:25:12 +00:00
my @ storelocs = split /,/ , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storage } ;
2010-08-25 15:42:36 +00:00
my $ globaldisktype = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storagemodel } ;
unless ( $ globaldisktype ) { $ globaldisktype = 'ide' ; }
2009-09-28 18:25:12 +00:00
#number of devices is the larger of the specified sizes (TODO: masters) or storage pools to span
my $ numdevs = ( scalar @ storelocs > scalar @ sizes ? scalar @ storelocs : scalar @ sizes ) ;
while ( $ numdevs - - > 0 ) {
my $ storeloc = shift @ storelocs ;
unless ( scalar @ storelocs ) { @ storelocs = ( $ storeloc ) ; } #allow reuse of one cfg specified pool for multiple devs
2009-09-28 17:59:43 +00:00
my $ disksize = shift @ sizes ;
unless ( scalar @ sizes ) { @ sizes = ( $ disksize ) ; } #if we emptied the array, stick the last entry back on to allow it to specify all remaining disks
$ disksize = getUnits ( $ disksize , 'G' , 1024 ) ;
2010-08-25 15:42:36 +00:00
$ disktype = $ globaldisktype ;
2010-03-19 17:19:43 +00:00
if ( $ storeloc =~ /=/ ) {
( $ storeloc , $ disktype ) = split /=/ , $ storeloc ;
}
2010-08-26 19:23:25 +00:00
if ( $ disktype eq 'ide' and $ args { idefull } ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM is at capacity for IDE devices, a drive was not added" ] , $ output_handler , $ node ) ;
return ;
} elsif ( $ disktype eq 'scsi' and $ args { scsifull } ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "SCSI Controller at capacity, a drive was not added" ] , $ output_handler , $ node ) ;
return ;
}
2009-09-28 18:25:12 +00:00
$ storeloc =~ s/\/$// ;
2010-02-01 14:11:02 +00:00
( my $ method , my $ location ) = split /:\/\// , $ storeloc , 2 ;
2010-09-20 19:06:42 +00:00
my $ uri = $ storeloc ;
unless ( $ sdmap - > { $ uri } ) { #don't call getURI if map works out fine already
$ uri = getURI ( $ method , $ location ) ;
}
2010-07-09 06:24:18 +00:00
#(my $server,my $path) = split/\//,$location,2;
#$server =~ s/:$//; #tolerate habitual colons
#my $servern = inet_aton($server);
#unless ($servern) {
2010-08-06 15:29:07 +00:00
# xCAT::SvrUtils::sendmsg([1,"could not resolve '$server' to an address from vm.storage"]);
2010-07-09 06:24:18 +00:00
# return;
#}
#$server = inet_ntoa($servern);
#my $uri = "nfs://$server/$path";
2009-06-22 16:00:28 +00:00
$ backingif = VirtualDiskFlatVer2BackingInfo - > new ( diskMode = > 'persistent' ,
2010-07-09 19:46:21 +00:00
thinProvisioned = > 1 ,
2010-01-26 15:44:00 +00:00
fileName = > "[" . $ sdmap - > { $ uri } . "]" ) ;
2010-08-26 19:23:25 +00:00
if ( $ disktype eq 'ide' and $ idecontrollerkey == 1 and $ ideunitnum == 0 ) { #reserve a spot for CD
2010-06-07 21:24:22 +00:00
$ ideunitnum = 1 ;
2010-08-26 19:23:25 +00:00
} elsif ( $ disktype eq 'ide' and $ ideunitnum == 2 ) { #go from current to next ide 'controller'
2009-06-22 16:00:28 +00:00
$ idecontrollerkey + + ;
2010-06-07 21:24:22 +00:00
$ ideunitnum = 0 ;
2009-06-22 16:00:28 +00:00
}
2010-03-19 17:19:43 +00:00
unless ( $ disktype eq 'ide' ) {
push @ { $ disktocont { $ scsicontrollerkey } } , $ currkey ;
}
2009-06-22 16:00:28 +00:00
my $ controllerkey ;
if ( $ disktype eq 'ide' ) {
$ controllerkey = $ idecontrollerkey ;
2010-06-17 21:10:39 +00:00
$ unitnum = 0 ;
while ( $ usedideunits { $ unitnum } ) {
$ unitnum + + ;
}
2011-05-10 14:27:50 +00:00
if ( $ unitnum == 2 ) {
$ idecontrollerkey + + ;
$ ideunitnum = 1 ;
$ unitnum = 1 ;
$ controllerkey = $ idecontrollerkey ;
}
2010-06-17 21:10:39 +00:00
$ usedideunits { $ unitnum } = 1 ;
2009-06-22 16:00:28 +00:00
} else {
$ controllerkey = $ scsicontrollerkey ;
2010-06-17 21:10:39 +00:00
$ unitnum = 0 ;
while ( $ usedscsiunits { $ unitnum } ) {
$ unitnum + + ;
}
$ usedscsiunits { $ unitnum } = 1 ;
2010-03-19 17:19:43 +00:00
$ havescsidevs = 1 ;
2009-06-22 16:00:28 +00:00
}
$ dev = VirtualDisk - > new ( backing = > $ backingif ,
2010-03-19 17:19:43 +00:00
controllerKey = > $ controllerkey ,
2009-06-22 16:00:28 +00:00
key = > $ currkey + + ,
2010-06-07 21:24:22 +00:00
unitNumber = > $ unitnum ,
2009-09-28 17:59:43 +00:00
capacityInKB = > $ disksize ) ;
2009-06-22 16:00:28 +00:00
push @ devs , VirtualDeviceConfigSpec - > new ( device = > $ dev ,
fileOperation = > VirtualDeviceConfigSpecFileOperation - > new ( 'create' ) ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'add' ) ) ;
}
2009-09-28 18:25:12 +00:00
2009-06-22 16:00:28 +00:00
#It *seems* that IDE controllers are not subject to require creation, so we skip it
2010-06-08 13:22:33 +00:00
if ( $ havescsidevs and not $ havescsicontroller ) { #need controllers to attach the disks to
foreach ( 0 .. $ scsicontrollerkey ) {
$ dev = VirtualLsiLogicController - > new ( key = > $ _ ,
device = > \ @ { $ disktocont { $ _ } } ,
sharedBus = > VirtualSCSISharing - > new ( 'noSharing' ) ,
busNumber = > $ _ ) ;
push @ devs , VirtualDeviceConfigSpec - > new ( device = > $ dev ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'add' ) ) ;
}
}
2009-06-22 16:00:28 +00:00
return @ devs ;
# my $ctlr = VirtualIDEController->new(
}
2009-10-01 14:35:41 +00:00
sub declare_ready {
my % args = % { shift ( ) } ;
$ hypready { $ args { hyp } } = 1 ;
}
2010-08-09 18:43:26 +00:00
sub populate_vcenter_hostviews {
my $ vcenter = shift ;
my @ hypervisors ;
my % nametohypmap ;
my $ iterations = 1 ;
if ( $ usehostnamesforvcenter and $ usehostnamesforvcenter !~ /no/i ) {
$ iterations = 2 ; #two passes possible
my $ hyp ;
foreach $ hyp ( keys % { $ vcenterhash { $ vcenter } - > { allhyps } } ) {
if ( $ tablecfg { hosts } - > { $ hyp } - > [ 0 ] - > { hostnames } ) {
$ nametohypmap { $ tablecfg { hosts } - > { $ hyp } - > [ 0 ] - > { hostnames } } = $ hyp ;
}
}
@ hypervisors = keys % nametohypmap ;
} else {
@ hypervisors = keys % { $ vcenterhash { $ vcenter } - > { allhyps } } ;
}
while ( $ iterations and scalar ( @ hypervisors ) ) {
my $ hosts = join ( ")|(" , @ hypervisors ) ;
$ hosts = '^((' . $ hosts . '))(\z|\.)' ;
2010-11-19 20:30:56 +00:00
my $ search = qr/$hosts/ ;
2011-02-25 23:02:08 +00:00
my @ hypviews = @ { $ vcenterhash { $ vcenter } - > { conn } - > find_entity_views ( view_type = > 'HostSystem' , properties = > [ 'summary.config.name' , 'summary.runtime.connectionState' , 'runtime.inMaintenanceMode' , 'parent' , 'configManager' , 'summary.host' ] , filter = > { 'summary.config.name' = > $ search } ) } ;
2010-08-09 18:43:26 +00:00
foreach ( @ hypviews ) {
my $ hypname = $ _ - > { 'summary.config.name' } ;
2011-02-25 23:02:08 +00:00
my $ hypv = $ _ ;
my $ hyp ;
2010-08-09 18:43:26 +00:00
if ( $ vcenterhash { $ vcenter } - > { allhyps } - > { $ hypname } ) { #simplest case, config.name is exactly the same as node name
$ vcenterhash { $ vcenter } - > { hostviews } - > { $ hypname } = $ _ ;
2011-02-25 23:02:08 +00:00
$ hyp = $ hypname ;
2010-08-09 18:43:26 +00:00
} elsif ( $ nametohypmap { $ hypname } ) { #second case, there is a name mapping this to a real name
$ vcenterhash { $ vcenter } - > { hostviews } - > { $ nametohypmap { $ hypname } } = $ _ ;
2011-02-25 23:02:08 +00:00
$ hyp = $ nametohypmap { $ hypname } ;
2010-08-09 18:43:26 +00:00
} else { #name as-is doesn't work, start stripping domain and hope for the best
$ hypname =~ s/\..*// ;
if ( $ vcenterhash { $ vcenter } - > { allhyps } - > { $ hypname } ) { #shortname is a node
$ vcenterhash { $ vcenter } - > { hostviews } - > { $ hypname } = $ _ ;
2011-02-25 23:02:08 +00:00
$ hyp = $ hypname ;
2010-08-09 18:43:26 +00:00
} elsif ( $ nametohypmap { $ hypname } ) { #alias for node
$ vcenterhash { $ vcenter } - > { hostviews } - > { $ nametohypmap { $ hypname } } = $ _ ;
2011-02-25 23:02:08 +00:00
$ hyp = $ nametohypmap { $ hypname } ;
2010-08-09 18:43:26 +00:00
}
}
2011-02-25 23:02:08 +00:00
foreach my $ nodename ( keys % { $ hyphash { $ hyp } - > { nodes } } ) {
$ hostrefbynode { $ nodename } = $ hypv - > { 'summary.host' } - > value ;
}
2010-08-09 18:43:26 +00:00
}
$ iterations - - ;
@ hypervisors = ( ) ;
if ( $ usehostnamesforvcenter and $ usehostnamesforvcenter !~ /no/i ) { #check for hypervisors by native node name if missed above
foreach my $ hyp ( keys % { $ vcenterhash { $ vcenter } - > { allhyps } } ) {
unless ( $ vcenterhash { $ vcenter } - > { hostviews } - > { $ hyp } ) {
push @ hypervisors , $ hyp ;
}
}
}
}
}
2009-06-22 16:00:28 +00:00
sub validate_vcenter_prereqs { #Communicate with vCenter and ensure this host is added correctly to a vCenter instance when an operation requires it
my $ hyp = shift ;
my $ depfun = shift ;
my $ depargs = shift ;
my $ vcenter = $ hyphash { $ hyp } - > { vcenter } - > { name } ;
unless ( $ hyphash { $ hyp } - > { vcenter } - > { conn } ) {
2010-01-14 20:32:51 +00:00
eval {
$ hyphash { $ hyp } - > { vcenter } - > { conn } = Vim - > new ( service_url = > "https://$vcenter/sdk" ) ;
$ hyphash { $ hyp } - > { vcenter } - > { conn } - > login ( user_name = > $ hyphash { $ hyp } - > { vcenter } - > { username } , password = > $ hyphash { $ hyp } - > { vcenter } - > { password } ) ;
} ;
if ( $@ ) {
$ hyphash { $ hyp } - > { vcenter } - > { conn } = undef ;
}
2009-06-22 16:00:28 +00:00
}
unless ( $ hyphash { $ hyp } - > { vcenter } - > { conn } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Unable to reach vCenter server managing $hyp" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return undef ;
}
my $ foundhyp ;
2009-12-09 21:05:35 +00:00
my $ name = $ hyp ;
if ( $ usehostnamesforvcenter and $ usehostnamesforvcenter !~ /no/i ) {
if ( $ tablecfg { hosts } - > { $ hyp } - > [ 0 ] - > { hostnames } ) {
$ name = $ tablecfg { hosts } - > { $ hyp } - > [ 0 ] - > { hostnames } ;
}
}
2009-06-22 16:00:28 +00:00
my $ connspec = HostConnectSpec - > new (
2009-12-09 21:05:35 +00:00
hostName = > $ name ,
2009-06-22 16:00:28 +00:00
password = > $ hyphash { $ hyp } - > { password } ,
userName = > $ hyphash { $ hyp } - > { username } ,
force = > 1 ,
) ;
2010-07-07 19:58:25 +00:00
my $ hview ;
2010-08-09 18:43:26 +00:00
$ hview = $ vcenterhash { $ vcenter } - > { hostviews } - > { $ hyp } ;
2010-07-07 19:58:25 +00:00
if ( $ hview ) {
if ( $ hview - > { 'summary.config.name' } =~ /^$hyp(?:\.|\z)/ or $ hview - > { 'summary.config.name' } =~ /^$name(?:\.|\z)/ ) { #Looks good, call the dependent function after declaring the state of vcenter to hypervisor as good
if ( $ hview - > { 'summary.runtime.connectionState' } - > val eq 'connected' ) {
2010-11-04 18:49:27 +00:00
if ( $ vcenterautojoin ) { #admin has requested manual vcenter management, don't mess with vmotion settings
2010-11-02 20:20:56 +00:00
enable_vmotion ( hypname = > $ hyp , hostview = > $ hview , conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } ) ;
}
2010-08-09 15:12:05 +00:00
$ vcenterhash { $ vcenter } - > { goodhyps } - > { $ hyp } = 1 ;
2009-06-22 16:00:28 +00:00
$ depfun - > ( $ depargs ) ;
2010-07-07 19:58:25 +00:00
if ( $ hview - > parent - > type eq 'ClusterComputeResource' ) { #if it is in a cluster, we can directly remove it
$ hyphash { $ hyp } - > { deletionref } = $ hview - > { mo_ref } ;
} elsif ( $ hview - > parent - > type eq 'ComputeResource' ) { #For some reason, we must delete the container instead
$ hyphash { $ hyp } - > { deletionref } = $ hview - > { parent } ; #save off a reference to delete hostview off just in case
2010-06-14 17:40:06 +00:00
}
2009-06-22 16:00:28 +00:00
return 1 ;
2011-03-09 15:20:00 +00:00
} elsif ( $ vcenterautojoin or $ vcenterforceremove ) { #if allowed autojoin and the current view seems corrupt, throw it away and rejoin
2009-10-01 20:54:11 +00:00
my $ ref_to_delete ;
2010-07-07 19:58:25 +00:00
if ( $ hview - > parent - > type eq 'ClusterComputeResource' ) { #We are allowed to specifically kill a host in a cluster
$ ref_to_delete = $ hview - > { mo_ref } ;
} elsif ( $ hview - > parent - > type eq 'ComputeResource' ) { #For some reason, we must delete the container instead
$ ref_to_delete = $ hview - > { parent } ;
2009-10-01 20:54:11 +00:00
}
my $ task = $ hyphash { $ hyp } - > { vcenter } - > { conn } - > get_view ( mo_ref = > $ ref_to_delete ) - > Destroy_Task ( ) ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { task } = $ task ;
2011-03-09 15:20:00 +00:00
if ( $ vcenterautojoin ) {
$ running_tasks { $ task } - > { callback } = \ & addhosttovcenter ;
} elsif ( $ vcenterforceremove ) {
$ running_tasks { $ task } - > { callback } = \ & delhost_callback ;
}
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { conn } = $ hyphash { $ hyp } - > { vcenter } - > { conn } ;
2010-07-07 19:58:25 +00:00
$ running_tasks { $ task } - > { data } = { depfun = > $ depfun , depargs = > $ depargs , conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } , connspec = > $ connspec , hostview = > $ hview , hypname = > $ hyp , vcenter = > $ vcenter } ;
2009-06-22 16:00:28 +00:00
return undef ;
#The rest would be shorter/ideal, but seems to be confused a lot by stateless
#Maybe in a future VMWare technology level the following would work better
#than it does today
2010-07-07 19:58:25 +00:00
# my $task = $hview_->ReconnectHost_Task(cnxSpec=>$connspec);
# my $task = $hview->DisconnectHost_Task();
2009-06-22 16:00:28 +00:00
# $running_tasks{$task}->{task} = $task;
# $running_tasks{$task}->{callback} = \&disconnecthost_callback;
# $running_tasks{$task}->{conn} = $hyphash{$hyp}->{vcenter}->{conn};
2010-07-07 19:58:25 +00:00
# $running_tasks{$task}->{data} = { depfun => $depfun, depargs => $depargs, conn=> $hyphash{$hyp}->{vcenter}->{conn}, connspec=>$connspec,hostview=>$hview,hypname=>$hyp,vcenter=>$vcenter };
2009-06-22 16:00:28 +00:00
#ADDHOST
2010-11-02 20:20:56 +00:00
} else {
2011-06-24 18:25:46 +00:00
if ( $ hyphash { $ hyp } - > { offline } ) {
xCAT::SvrUtils:: sendmsg ( ": Failed to communicate with $hyp, vCenter reports it as in inventory but not connected and xCAT is set to not autojoin" , $ output_handler ) ;
} else {
2010-11-02 20:20:56 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Failed to communicate with $hyp, vCenter reports it as in inventory but not connected and xCAT is set to not autojoin" ] , $ output_handler ) ;
2011-06-24 18:25:46 +00:00
}
2010-11-02 20:20:56 +00:00
$ hyphash { $ hyp } - > { conn } = undef ;
return "failed" ;
2009-06-22 16:00:28 +00:00
}
}
}
2010-11-04 18:49:27 +00:00
unless ( $ vcenterautojoin ) {
2011-06-24 18:25:46 +00:00
if ( $ hyphash { $ hyp } - > { offline } ) {
xCAT::SvrUtils:: sendmsg ( ": Failed to communicate with $hyp, vCenter does not have it in inventory and xCAT is set to not autojoin" , $ output_handler ) ;
} else {
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Failed to communicate with $hyp, vCenter does not have it in inventory and xCAT is set to not autojoin" ] , $ output_handler ) ;
}
2010-11-02 20:20:56 +00:00
$ hyphash { $ hyp } - > { conn } = undef ;
return "failed" ;
}
2009-06-22 16:00:28 +00:00
#If still in function, haven't found any likely host entries, make a new one
2010-06-14 17:40:06 +00:00
unless ( $ hyphash { $ hyp } - > { offline } ) {
eval {
$ hyphash { $ hyp } - > { conn } = Vim - > new ( service_url = > "https://$hyp/sdk" ) ; #Direct connect to install/check licenses
$ hyphash { $ hyp } - > { conn } - > login ( user_name = > $ hyphash { $ hyp } - > { username } , password = > $ hyphash { $ hyp } - > { password } ) ;
} ;
if ( $@ ) {
2010-09-17 21:10:58 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Failed to communicate with $hyp due to $@" ] , $ output_handler ) ;
2010-06-14 17:40:06 +00:00
$ hyphash { $ hyp } - > { conn } = undef ;
return "failed" ;
}
validate_licenses ( $ hyp ) ;
2010-01-14 20:32:51 +00:00
}
2009-06-22 16:00:28 +00:00
addhosttovcenter ( undef , {
depfun = > $ depfun ,
depargs = > $ depargs ,
conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } ,
connspec = > $ connspec ,
hypname = > $ hyp ,
vcenter = > $ vcenter ,
} ) ;
}
sub addhosttovcenter {
my $ task = shift ;
my $ args = shift ;
my $ hyp = $ args - > { hypname } ;
my $ depfun = $ args - > { depfun } ;
my $ depargs = $ args - > { depargs } ;
my $ connspec = $ args - > { connspec } ;
my $ vcenter = $ args - > { vcenter } ;
if ( $ task ) {
my $ state = $ task - > info - > state - > val ;
if ( $ state eq 'error' ) {
die ;
}
}
2010-06-14 17:40:06 +00:00
if ( $ hyphash { $ args - > { hypname } } - > { offline } ) { #let it stay offline
$ hypready { $ args - > { hypname } } = 1 ; #declare readiness
#enable_vmotion(hypname=>$args->{hypname},hostview=>$args->{hostview},conn=>$args->{conn});
2010-08-09 15:12:05 +00:00
$ vcenterhash { $ args - > { vcenter } } - > { goodhyps } - > { $ args - > { hypname } } = 1 ;
2010-06-14 17:40:06 +00:00
if ( defined $ args - > { depfun } ) { #If a function is waiting for the host connect to go valid, call it
$ args - > { depfun } - > ( $ args - > { depargs } ) ;
}
return ;
}
2009-10-01 14:35:41 +00:00
if ( $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { cluster } ) {
my $ cluster = get_clusterview ( clustname = > $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { cluster } , conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } ) ;
unless ( $ cluster ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { cluster } . " is not a known cluster to the vCenter server." ] , $ output_handler ) ;
2009-10-01 14:35:41 +00:00
$ hypready { $ hyp } = - 1 ; #Declare impossiblility to be ready
return ;
}
$ task = $ cluster - > AddHost_Task ( spec = > $ connspec , asConnected = > 1 ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & connecthost_callback ;
$ running_tasks { $ task } - > { conn } = $ hyphash { $ hyp } - > { vcenter } - > { conn } ;
$ running_tasks { $ task } - > { data } = { depfun = > $ depfun , depargs = > $ depargs , conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } , connspec = > $ connspec , cluster = > $ cluster , hypname = > $ hyp , vcenter = > $ vcenter } ;
} else {
my $ datacenter = validate_datacenter_prereqs ( $ hyp ) ;
2010-09-17 16:58:57 +00:00
unless ( $ datacenter ) { return ; }
2009-10-01 14:35:41 +00:00
my $ hfolder = $ datacenter - > hostFolder ; #$hyphash{$hyp}->{vcenter}->{conn}->find_entity_view(view_type=>'Datacenter',properties=>['hostFolder'])->hostFolder;
$ hfolder = $ hyphash { $ hyp } - > { vcenter } - > { conn } - > get_view ( mo_ref = > $ hfolder ) ;
$ task = $ hfolder - > AddStandaloneHost_Task ( spec = > $ connspec , addConnected = > 1 ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & connecthost_callback ;
$ running_tasks { $ task } - > { conn } = $ hyphash { $ hyp } - > { vcenter } - > { conn } ;
$ running_tasks { $ task } - > { data } = { depfun = > $ depfun , depargs = > $ depargs , conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } , connspec = > $ connspec , foldview = > $ hfolder , hypname = > $ hyp , vcenter = > $ vcenter } ;
}
2009-06-22 16:00:28 +00:00
#print Dumper @{$hyphash{$hyp}->{vcenter}->{conn}->find_entity_views(view_type=>'HostSystem',properties=>['runtime.connectionState'])};
}
2009-07-13 18:04:39 +00:00
sub validate_datacenter_prereqs {
my ( $ hyp ) = @ _ ;
2010-09-17 16:58:57 +00:00
my $ datacenter ;
if ( $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { datacenter } ) {
$ datacenter = $ hyphash { $ hyp } - > { vcenter } - > { conn } - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'hostFolder' ] , filter = > { name = > $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { datacenter } } ) ;
unless ( $ datacenter ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Unable to find requested datacenter (hypervisor.datacenter for $hyp is " . $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { datacenter } . ")" ] , $ output_handler ) ;
return ;
}
} else {
$ datacenter = $ hyphash { $ hyp } - > { vcenter } - > { conn } - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'hostFolder' ] ) ;
}
2009-07-13 18:04:39 +00:00
if ( ! defined $ datacenter ) {
my $ vconn = $ hyphash { $ hyp } - > { vcenter } - > { conn } ;
my $ root_folder = $ vconn - > get_view ( mo_ref = > $ vconn - > get_service_content ( ) - > rootFolder ) ;
$ root_folder - > CreateDatacenter ( name = > 'xcat-datacenter' ) ;
$ datacenter = $ hyphash { $ hyp } - > { vcenter } - > { conn } - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'hostFolder' ] ) ;
}
return $ datacenter ;
}
2009-07-17 20:09:05 +00:00
sub get_default_switch_for_hypervisor {
#This will make sure the default, implicit switch is in order in accordance
#with the configuration. If nothing specified, it just spits out vSwitch0
#if something specified, make sure it exists
#if it doesn't exist, and the syntax explains how to build it, build it
#return undef if something is specified, doesn't exist, and lacks instruction
my $ hyp = shift ;
my $ defswitch = 'vSwitch0' ;
my $ switchmembers ;
if ( $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { defaultnet } ) {
$ defswitch = $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { defaultnet } ;
( $ defswitch , $ switchmembers ) = split /=/ , $ defswitch , 2 ;
my $ vswitch ;
my $ hostview = $ hyphash { $ hyp } - > { hostview } ;
foreach $ vswitch ( @ { $ hostview - > config - > network - > vswitch } ) {
if ( $ vswitch - > name eq $ defswitch ) {
return $ defswitch ;
}
}
#If still here, means we need to build the switch
unless ( $ switchmembers ) { return undef ; } #No hope, no idea how to make it
return create_vswitch ( $ hyp , $ defswitch , split ( /&/ , $ switchmembers ) ) ;
} else {
return 'vSwitch0' ;
}
}
2009-07-16 20:43:36 +00:00
sub get_switchname_for_portdesc {
#Thisk function will examine all current switches to find or create a switch to match the described requirement
2009-07-14 20:43:59 +00:00
my $ hyp = shift ;
2009-07-16 20:43:36 +00:00
my $ portdesc = shift ;
my $ description ; #actual name to use for the virtual switch
if ( $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { netmap } ) {
foreach ( split /,/ , $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { netmap } ) {
if ( /^$portdesc=/ ) {
( $ description , $ portdesc ) = split /=/ , $ _ , 2 ;
last ;
}
}
} else {
$ description = 'vsw' . $ portdesc ;
}
unless ( $ description ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Invalid format for hypervisor.netmap detected for $hyp" ] , $ output_handler ) ;
2009-07-16 20:43:36 +00:00
return undef ;
}
my % requiredports ;
my % portkeys ;
foreach ( split /&/ , $ portdesc ) {
$ requiredports { $ _ } = 1 ;
}
2009-07-14 20:43:59 +00:00
my $ hostview = $ hyphash { $ hyp } - > { hostview } ;
unless ( $ hostview ) {
2010-09-17 21:10:58 +00:00
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager']); #clustered can't run here, hyphash conn reference good
2009-07-14 20:43:59 +00:00
$ hostview = $ hyphash { $ hyp } - > { hostview } ;
}
2009-07-16 20:43:36 +00:00
foreach ( @ { $ hostview - > config - > network - > pnic } ) {
if ( $ requiredports { $ _ - > device } ) { #We establish lookups both ways
$ portkeys { $ _ - > key } = $ _ - > device ;
delete $ requiredports { $ _ - > device } ;
}
}
if ( keys % requiredports ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ":Unable to locate the following nics on $hyp: " . join ( ',' , keys % requiredports ) ] , $ output_handler ) ;
2009-07-16 20:43:36 +00:00
return undef ;
}
my $ foundmatchswitch ;
my $ cfgmismatch = 0 ;
my $ vswitch ;
foreach $ vswitch ( @ { $ hostview - > config - > network - > vswitch } ) {
$ cfgmismatch = 0 ; #new switch, no sign of mismatch
foreach ( @ { $ vswitch - > pnic } ) {
if ( $ portkeys { $ _ } ) {
$ foundmatchswitch = $ vswitch - > name ;
delete $ requiredports { $ portkeys { $ _ } } ;
delete $ portkeys { $ _ } ;
} else {
$ cfgmismatch = 1 ; #If this turns out to have anything, it is bad
}
}
if ( $ foundmatchswitch ) { last ; }
}
if ( $ foundmatchswitch ) {
if ( $ cfgmismatch ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Aggregation mismatch detected, request nic is aggregated with a nic not requested" ] , $ output_handler ) ;
2009-07-16 20:43:36 +00:00
return undef ;
}
unless ( keys % portkeys ) {
return $ foundmatchswitch ;
}
die "TODO: add physical nics to aggregation if requested" ;
} else {
return create_vswitch ( $ hyp , $ description , values % portkeys ) ;
}
die "impossible occurance" ;
return undef ;
}
sub create_vswitch {
my $ hyp = shift ;
my $ description = shift ;
my @ ports = @ _ ;
my $ vswitch = HostVirtualSwitchBondBridge - > new (
nicDevice = > \ @ ports
) ;
my $ vswspec = HostVirtualSwitchSpec - > new (
bridge = > $ vswitch ,
2009-07-17 14:18:25 +00:00
mtu = > 1500 ,
2009-07-16 20:43:36 +00:00
numPorts = > 64
) ;
my $ hostview = $ hyphash { $ hyp } - > { hostview } ;
2010-09-17 21:10:58 +00:00
my $ netman = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hostview - > configManager - > networkSystem ) ; #can't run in clustered mode, fine path..
2009-07-16 20:43:36 +00:00
$ netman - > AddVirtualSwitch (
vswitchName = > $ description ,
spec = > $ vswspec
) ;
2009-07-17 14:18:25 +00:00
return $ description ;
2009-07-14 20:43:59 +00:00
}
2009-06-22 16:00:28 +00:00
2010-09-20 19:06:42 +00:00
sub scan_cluster_networks {
my $ cluster = shift ;
use Data::Dumper ;
my $ conn = $ clusterhash { $ cluster } - > { conn } ;
my $ cview = get_clusterview ( clustname = > $ cluster , conn = > $ conn ) ;
if ( defined $ cview - > { network } ) {
foreach ( @ { $ cview - > network } ) {
my $ nvw = $ conn - > get_view ( mo_ref = > $ _ ) ;
if ( defined $ nvw - > name ) {
$ clusterhash { $ cluster } - > { nets } - > { $ nvw - > name } = $ _ ;
}
}
}
}
2009-06-22 16:00:28 +00:00
sub validate_network_prereqs {
my $ nodes = shift ;
my $ hyp = shift ;
2010-09-17 21:10:58 +00:00
my $ hypconn = $ hyphash { $ hyp } - > { conn } ; #this function can't work in clustered mode anyway, so this is appropriote.
2009-06-22 16:00:28 +00:00
my $ hostview = $ hyphash { $ hyp } - > { hostview } ;
if ( $ hostview ) {
$ hostview - > update_view_data ( ) ; #pull in changes induced by previous activity
} else {
2009-07-14 20:50:13 +00:00
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager','network']);
2009-06-22 16:00:28 +00:00
$ hostview = $ hyphash { $ hyp } - > { hostview } ;
}
my $ node ;
my $ method ;
my $ location ;
if ( defined $ hostview - > { network } ) {
foreach ( @ { $ hostview - > network } ) {
my $ nvw = $ hypconn - > get_view ( mo_ref = > $ _ ) ;
if ( defined $ nvw - > name ) {
$ hyphash { $ hyp } - > { nets } - > { $ nvw - > name } = $ _ ;
}
}
}
foreach $ node ( @$ nodes ) {
my @ networks = split /,/ , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { nics } ;
foreach ( @ networks ) {
2009-07-17 20:09:05 +00:00
my $ switchname = get_default_switch_for_hypervisor ( $ hyp ) ;
2009-07-17 14:18:25 +00:00
my $ tabval = $ _ ;
2009-07-17 20:09:05 +00:00
my $ pgname ;
s/=.*// ; #TODO specify nic model with <blah>=model
if ( /:/ ) { #The config specifies a particular path in some way
2009-07-17 14:18:25 +00:00
s/(.*):// ;
2009-07-16 20:43:36 +00:00
$ switchname = get_switchname_for_portdesc ( $ hyp , $ 1 ) ;
2009-07-17 20:09:05 +00:00
$ pgname = $ switchname . "-" . $ _ ;
} else { #Use the default vswitch per table config to connect this through, use the same name we did before to maintain compatibility
$ pgname = $ _ ;
2009-07-14 20:43:59 +00:00
}
2009-06-22 16:00:28 +00:00
my $ netname = $ _ ;
my $ netsys ;
2009-07-17 14:18:25 +00:00
$ hyphash { $ hyp } - > { pgnames } - > { $ tabval } = $ pgname ;
2009-06-22 16:00:28 +00:00
my $ policy = HostNetworkPolicy - > new ( ) ;
2009-07-17 14:18:25 +00:00
unless ( $ hyphash { $ hyp } - > { nets } - > { $ pgname } ) {
2009-06-22 16:00:28 +00:00
my $ vlanid ;
if ( $ netname =~ /trunk/ ) {
$ vlanid = 4095 ;
} elsif ( $ netname =~ /vl(an)?(\d+)$/ ) {
$ vlanid = $ 2 ;
} else {
$ vlanid = 0 ;
}
my $ hostgroupdef = HostPortGroupSpec - > new (
2009-07-17 14:18:25 +00:00
name = > $ pgname ,
2009-06-22 16:00:28 +00:00
vlanId = > $ vlanid ,
policy = > $ policy ,
vswitchName = > $ switchname
) ;
unless ( $ netsys ) {
$ netsys = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hostview - > configManager - > networkSystem ) ;
}
$ netsys - > AddPortGroup ( portgrp = > $ hostgroupdef ) ;
#$hyphash{$hyp}->{nets}->{$netname}=1;
2010-08-05 14:49:21 +00:00
while ( ( not defined $ hyphash { $ hyp } - > { nets } - > { $ pgname } ) and sleep 1 ) { #we will only sleep if we know something will be waiting for
$ hostview - > update_view_data ( ) ; #pull in changes induced by previous activity
if ( defined $ hostview - > { network } ) { #We load the new object references
foreach ( @ { $ hostview - > network } ) {
my $ nvw = $ hypconn - > get_view ( mo_ref = > $ _ ) ;
if ( defined $ nvw - > name ) {
$ hyphash { $ hyp } - > { nets } - > { $ nvw - > name } = $ _ ;
}
2009-06-22 16:00:28 +00:00
}
}
2010-08-05 14:49:21 +00:00
} #end while loop
2009-06-22 16:00:28 +00:00
}
}
}
return 1 ;
}
2010-09-17 21:10:58 +00:00
sub refreshclusterdatastoremap {
my $ cluster = shift ;
my $ conn = $ clusterhash { $ cluster } - > { conn } ;
my $ cview = get_clusterview ( clustname = > $ cluster , conn = > $ conn ) ;
if ( defined $ cview - > { datastore } ) {
foreach ( @ { $ cview - > datastore } ) {
my $ dsv = $ conn - > get_view ( mo_ref = > $ _ ) ;
if ( defined $ dsv - > info - > { nas } ) {
if ( $ dsv - > info - > nas - > type eq 'NFS' ) {
2010-09-20 17:26:24 +00:00
my $ mnthost = $ dsv - > info - > nas - > remoteHost ;
# my $mnthost = inet_aton($dsv->info->nas->remoteHost);
# if ($mnthost) {
# $mnthost = inet_ntoa($mnthost);
# } else {
# $mnthost = $dsv->info->nas->remoteHost;
# xCAT::SvrUtils::sendmsg([1,"Unable to resolve VMware specified host '".$dsv->info->nas->remoteHost."' to an address, problems may occur"], $output_handler);
# }
2010-09-17 21:10:58 +00:00
$ clusterhash { $ cluster } - > { datastoremap } - > { "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath } = $ dsv - > info - > name ;
2011-06-13 17:30:48 +00:00
$ clusterhash { $ cluster } - > { datastoreurlmap } - > { $ dsv - > info - > name } = "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath ; #save off a suitable URL if needed
2010-09-17 21:10:58 +00:00
$ clusterhash { $ cluster } - > { datastorerefmap } - > { "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath } = $ _ ;
} #TODO: care about SMB
} elsif ( defined $ dsv - > info - > { vmfs } ) {
my $ name = $ dsv - > info - > vmfs - > name ;
$ clusterhash { $ cluster } - > { datastoremap } - > { "vmfs://" . $ name } = $ dsv - > info - > name ;
2011-06-13 17:30:48 +00:00
$ clusterhash { $ cluster } - > { datastoreurlmap } - > { $ dsv - > info - > name } = "vmfs://" . $ name ;
2011-10-05 18:46:49 +00:00
$ clusterhash { $ cluster } - > { datastorerefmap } - > { "vmfs://" . $ name } = $ _ ;
2010-09-17 21:10:58 +00:00
}
}
}
#that's... about it... not doing any of the fancy mounting and stuff, if you do it cluster style, you are on your own. It's simply too terrifying to try to fixup
#a whole cluster instead of chasing one host, a whole lot slower. One would hope vmware would've done this, but they don't
}
2009-06-22 16:00:28 +00:00
sub validate_datastore_prereqs {
2011-08-17 18:06:34 +00:00
my $ hyp = $ _ [ 1 ] ;
lockbyname ( $ hyp . ".datastores" ) ;
$@ = "" ;
my $ rc ;
eval { $ rc = validate_datastore_prereqs_inlock ( @ _ ) ; } ;
unlockbyname ( $ hyp . ".datastores" ) ;
if ( $@ ) { die $@ ; }
return $ rc ;
}
sub validate_datastore_prereqs_inlock {
2009-06-22 16:00:28 +00:00
my $ nodes = shift ;
my $ hyp = shift ;
2010-06-08 18:14:04 +00:00
my $ newdatastores = shift ; # a hash reference of URLs to afflicted nodes outside of table space
2009-06-22 16:00:28 +00:00
my $ hypconn = $ hyphash { $ hyp } - > { conn } ;
my $ hostview = $ hyphash { $ hyp } - > { hostview } ;
unless ( $ hostview ) {
2009-07-17 14:18:25 +00:00
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hypconn ) ; #,properties=>['config','configManager']);
2009-06-22 16:00:28 +00:00
$ hostview = $ hyphash { $ hyp } - > { hostview } ;
}
my $ node ;
my $ method ;
my $ location ;
2010-07-09 06:24:18 +00:00
# get all of the datastores that are currently available on this node.
# and put them into a hash
2009-06-22 16:00:28 +00:00
if ( defined $ hostview - > { datastore } ) { # only iterate if it exists
foreach ( @ { $ hostview - > datastore } ) {
my $ dsv = $ hypconn - > get_view ( mo_ref = > $ _ ) ;
if ( defined $ dsv - > info - > { nas } ) {
if ( $ dsv - > info - > nas - > type eq 'NFS' ) {
2010-01-26 15:44:00 +00:00
my $ mnthost = inet_aton ( $ dsv - > info - > nas - > remoteHost ) ;
if ( $ mnthost ) {
$ mnthost = inet_ntoa ( $ mnthost ) ;
} else {
$ mnthost = $ dsv - > info - > nas - > remoteHost ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to resolve VMware specified host '" . $ dsv - > info - > nas - > remoteHost . "' to an address, problems may occur" ] , $ output_handler ) ;
2010-01-26 15:44:00 +00:00
}
$ hyphash { $ hyp } - > { datastoremap } - > { "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath } = $ dsv - > info - > name ;
2011-06-13 17:30:48 +00:00
$ hyphash { $ hyp } - > { datastoreurlmap } - > { $ dsv - > info - > name } = "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath ;
2010-06-08 18:14:04 +00:00
$ hyphash { $ hyp } - > { datastorerefmap } - > { "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath } = $ _ ;
2009-06-22 16:00:28 +00:00
} #TODO: care about SMB
2010-07-09 06:24:18 +00:00
} elsif ( defined $ dsv - > info - > { vmfs } ) {
my $ name = $ dsv - > info - > vmfs - > name ;
$ hyphash { $ hyp } - > { datastoremap } - > { "vmfs://" . $ name } = $ dsv - > info - > name ;
2011-06-13 17:30:48 +00:00
$ hyphash { $ hyp } - > { datastoreurlmap } - > { $ dsv - > info - > name } = "vmfs://" . $ name ;
2010-10-01 17:18:37 +00:00
$ hyphash { $ hyp } - > { datastorerefmap } - > { "vmfs://" . $ name } = $ _ ;
2010-07-09 06:24:18 +00:00
}
2009-06-22 16:00:28 +00:00
}
}
2010-01-07 18:20:29 +00:00
my $ refresh_names = 0 ;
2010-07-09 06:24:18 +00:00
# now go through the nodes and make sure that we have matching datastores.
# E.g.: if its NFS, then mount it (if not mounted)
# E.g.: if its VMFS, then create it if not created already. Note: VMFS will persist on
# machine reboots, unless its destroyed by being overwritten.
2009-06-22 16:00:28 +00:00
foreach $ node ( @$ nodes ) {
my @ storage = split /,/ , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storage } ;
2009-07-16 20:43:36 +00:00
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cfgstore } ) {
push @ storage , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cfgstore } ;
}
2010-06-08 18:14:04 +00:00
foreach ( @ storage ) { #TODO: merge this with foreach loop below. Here we could build onto $newdatastores instead, for faster operation at scale
2010-03-19 17:19:43 +00:00
s/=.*// ; #remove device type information from configuration
2009-06-22 16:00:28 +00:00
s/\/$// ; #Strip trailing slash if specified, to align to VMware semantics
if ( /:\/\// ) {
( $ method , $ location ) = split /:\/\// , $ _ , 2 ;
2010-07-09 06:24:18 +00:00
if ( $ method =~ /nfs/ ) {
# go through and see if NFS is mounted, if not, then mount it.
( my $ server , my $ path ) = split /\// , $ location , 2 ;
$ server =~ s/:$// ; #remove a : if someone put it in out of nfs mount habit
my $ servern = inet_aton ( $ server ) ;
unless ( $ servern ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Unable to resolve '$server' to an address, check vm.cfgstore/vm.storage" ] , $ output_handler ) ;
2010-07-09 06:24:18 +00:00
return 0 ;
}
$ server = inet_ntoa ( $ servern ) ;
my $ uri = "nfs://$server/$path" ;
unless ( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } ) { #If not already there, must mount it
2011-08-17 14:13:42 +00:00
unless ( $ datastoreautomount ) {
2011-08-17 18:06:34 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $uri is not currently accessible at the given location and automount is disabled in site table" ] , $ output_handler , $ node ) ;
2011-08-17 14:13:42 +00:00
return 0 ;
}
2010-07-09 06:24:18 +00:00
$ refresh_names = 1 ;
( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } , $ hyphash { $ hyp } - > { datastorerefmap } - > { $ uri } ) = mount_nfs_datastore ( $ hostview , $ location ) ;
2011-06-13 17:30:48 +00:00
$ hyphash { $ hyp } - > { datastoreurlmap } - > { $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } } = $ uri ;
2010-07-09 06:24:18 +00:00
}
} elsif ( $ method =~ /vmfs/ ) {
( my $ name , undef ) = split /\// , $ location , 2 ;
$ name =~ s/:$// ; #remove a : if someone put it in for some reason.
my $ uri = "vmfs://$name" ;
# check and see if this vmfs is on the node.
unless ( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } ) { #If not already there, try creating it.
2011-09-29 20:34:06 +00:00
unless ( $ datastoreautomount ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $uri is not currently accessible at the given location and automount is disabled in site table" ] , $ output_handler , $ node ) ;
return 0 ;
}
2010-10-01 17:18:37 +00:00
$ refresh_names = 1 ;
( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } , $ hyphash { $ hyp } - > { datastorerefmap } - > { $ uri } ) = create_vmfs_datastore ( $ hostview , $ name , $ hyp ) ;
unless ( $ hyphash { hyp } - > { datastoremap } - > { $ uri } ) { return 0 ; }
2011-06-13 17:30:48 +00:00
$ hyphash { $ hyp } - > { datastoreurlmap } - > { $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } } = $ uri ;
2010-07-09 06:24:18 +00:00
}
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $method is unsupported at this time (nfs would be)" ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return 0 ;
}
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $_ not supported storage specification for ESX plugin,\n\t'nfs://<server>/<path>'\n\t\tor\n\t'vmfs://<vmfs>'\n only currently supported vm.storage supported for ESX at the moment" ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return 0 ;
} #TODO: raw device mapping, VMFS via iSCSI, VMFS via FC?
}
}
2010-07-09 06:24:18 +00:00
# newdatastores are for migrations or changing vms.
# TODO: make this work for VMFS. Right now only NFS.
2010-06-08 18:14:04 +00:00
if ( ref $ newdatastores ) {
foreach ( keys %$ newdatastores ) {
2010-08-31 20:53:55 +00:00
my $ origurl = $ _ ;
2010-06-08 18:14:04 +00:00
s/\/$// ; #Strip trailing slash if specified, to align to VMware semantics
if ( /:\/\// ) {
( $ method , $ location ) = split /:\/\// , $ _ , 2 ;
2010-10-01 17:18:37 +00:00
if ( $ method =~ /nfs/ ) {
( my $ server , my $ path ) = split /\// , $ location , 2 ;
$ server =~ s/:$// ; #remove a : if someone put it in out of nfs mount habit
my $ servern = inet_aton ( $ server ) ;
unless ( $ servern ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Unable to resolve '$server' to an address, check vm.cfgstore/vm.storage" ] , $ output_handler ) ;
return 0 ;
2010-06-08 18:14:04 +00:00
}
2010-10-01 17:18:37 +00:00
$ server = inet_ntoa ( $ servern ) ;
my $ uri = "nfs://$server/$path" ;
unless ( $ method =~ /nfs/ ) {
foreach ( @ { $ newdatastores - > { $ _ } } ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $method is unsupported at this time (nfs would be)" ] , $ output_handler , $ _ ) ;
}
return 0 ;
}
unless ( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } ) { #If not already there, must mount it
2011-08-17 14:13:42 +00:00
unless ( $ datastoreautomount ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , ":) $uri is not currently accessible at the given location and automount is disabled in site table" ] , $ output_handler , $ node ) ;
return 0 ;
}
2010-10-01 17:18:37 +00:00
$ refresh_names = 1 ;
( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } , $ hyphash { $ hyp } - > { datastorerefmap } - > { $ uri } ) = mount_nfs_datastore ( $ hostview , $ location ) ;
}
2011-06-13 17:30:48 +00:00
$ hyphash { $ hyp } - > { datastoreurlmap } - > { $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } } = $ uri ;
$ hyphash { $ hyp } - > { datastoremap } - > { $ origurl } = $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } ; #we track both the uri xCAT expected and the one vCenter actually ended up with
2010-10-01 17:18:37 +00:00
$ hyphash { $ hyp } - > { datastorerefmap } - > { $ origurl } = $ hyphash { $ hyp } - > { datastorerefmap } - > { $ uri } ;
} elsif ( $ method =~ /vmfs/ ) {
( my $ name , undef ) = split /\// , $ location , 2 ;
$ name =~ s/:$// ; #remove a : if someone put it in for some reason.
my $ uri = "vmfs://$name" ;
unless ( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } ) { #If not already there, it should be!
2011-09-29 20:34:06 +00:00
unless ( $ datastoreautomount ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $uri is not currently accessible at the given location and automount is disabled in site table" ] , $ output_handler , $ node ) ;
return 0 ;
}
2010-10-01 17:18:37 +00:00
$ refresh_names = 1 ;
( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } , $ hyphash { $ hyp } - > { datastorerefmap } - > { $ uri } ) = create_vmfs_datastore ( $ hostview , $ name , $ hyp ) ;
unless ( $ hyphash { hyp } - > { datastoremap } - > { $ uri } ) { return 0 ; }
}
2011-06-13 17:30:48 +00:00
$ hyphash { $ hyp } - > { datastoreurlmap } - > { $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } } = $ uri ;
2010-10-01 17:18:37 +00:00
$ hyphash { $ hyp } - > { datastoremap } - > { $ origurl } = $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } ;
$ hyphash { $ hyp } - > { datastorerefmap } - > { $ origurl } = $ hyphash { $ hyp } - > { datastorerefmap } - > { $ uri } ;
} else {
print "$method: not NFS and not VMFS here!\n" ;
2010-06-08 18:14:04 +00:00
}
} else {
2010-08-31 20:53:55 +00:00
my $ datastore = $ _ ;
2010-10-01 17:18:37 +00:00
foreach my $ ds ( @ { $ newdatastores - > { $ _ } } ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $datastore not supported storage specification for ESX plugin, 'nfs://<server>/<path>' only currently supported vm.storage supported for ESX at the moment" ] , $ output_handler , $ ds ) ;
2010-06-08 18:14:04 +00:00
}
return 0 ;
2010-10-01 17:18:37 +00:00
} #TODO: raw device mapping, VMFS via iSCSI, VMFS via FC, VMFS on same local drive?
2010-06-08 18:14:04 +00:00
}
}
2010-01-07 18:20:29 +00:00
if ( $ refresh_names ) { #if we are in a vcenter context, vmware can rename a datastore behind our backs immediately after adding
$ hostview - > update_view_data ( ) ;
if ( defined $ hostview - > { datastore } ) { # only iterate if it exists
foreach ( @ { $ hostview - > datastore } ) {
my $ dsv = $ hypconn - > get_view ( mo_ref = > $ _ ) ;
if ( defined $ dsv - > info - > { nas } ) {
if ( $ dsv - > info - > nas - > type eq 'NFS' ) {
2010-01-26 15:44:00 +00:00
my $ mnthost = inet_aton ( $ dsv - > info - > nas - > remoteHost ) ;
if ( $ mnthost ) {
$ mnthost = inet_ntoa ( $ mnthost ) ;
} else {
$ mnthost = $ dsv - > info - > nas - > remoteHost ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to resolve VMware specified host '" . $ dsv - > info - > nas - > remoteHost . "' to an address, problems may occur" ] , $ output_handler ) ;
2010-01-26 15:44:00 +00:00
}
$ hyphash { $ hyp } - > { datastoremap } - > { "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath } = $ dsv - > info - > name ;
2011-06-13 17:30:48 +00:00
$ hyphash { $ hyp } - > { datastoreurlmap } - > { $ dsv - > info - > name } = "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath ;
2010-06-08 18:14:04 +00:00
$ hyphash { $ hyp } - > { datastorerefmap } - > { "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath } = $ _ ;
2010-01-07 18:20:29 +00:00
} #TODO: care about SMB
} #TODO: care about VMFS
}
}
}
2009-06-22 16:00:28 +00:00
return 1 ;
}
2009-07-13 18:04:39 +00:00
sub getlabel_for_datastore {
my $ method = shift ;
2009-06-22 16:00:28 +00:00
my $ location = shift ;
2009-07-13 18:04:39 +00:00
2009-06-22 16:00:28 +00:00
$ location =~ s/\//_/g ;
2009-07-13 18:04:39 +00:00
$ location = $ method . '_' . $ location ;
2009-07-10 18:55:58 +00:00
#VMware has a 42 character limit, we will start mangling to get under 42.
#Will try to preserve as much informative detail as possible, hence several conditionals instead of taking the easy way out
if ( length ( $ location ) > 42 ) {
$ location =~ s/nfs_// ; #Ditch unique names for different protocols to the same path, seems unbelievably unlikely
}
if ( length ( $ location ) > 42 ) {
$ location =~ s/\.//g ; #Next, ditch host delimiter, it is unlikely that hosts will have unique names if their dots are removed
}
if ( length ( $ location ) > 42 ) {
$ location =~ s/_//g ; #Next, ditch path delimiter, it is unlikely that two paths will happen to look the same without delimiters
}
if ( length ( $ location ) > 42 ) { #finally, replace the middle with ellipsis
substr ( $ location , 20 , - 20 , '..' ) ;
}
2009-07-13 18:04:39 +00:00
return $ location ;
}
sub mount_nfs_datastore {
my $ hostview = shift ;
my $ location = shift ;
my $ server ;
my $ path ;
2011-08-17 14:13:42 +00:00
unless ( $ datastoreautomount ) {
die "automount of VMware datastores is disabled in site configuration, not continuing" ;
}
2009-07-13 18:04:39 +00:00
( $ server , $ path ) = split /\// , $ location , 2 ;
$ location = getlabel_for_datastore ( 'nfs' , $ location ) ;
2009-07-10 18:55:58 +00:00
2009-06-22 16:00:28 +00:00
my $ nds = HostNasVolumeSpec - > new ( accessMode = > 'readWrite' ,
remoteHost = > $ server ,
localPath = > $ location ,
remotePath = > "/" . $ path ) ;
my $ dsmv = $ hostview - > { vim } - > get_view ( mo_ref = > $ hostview - > configManager - > datastoreSystem ) ;
2009-11-23 23:11:16 +00:00
2010-06-08 18:14:04 +00:00
my $ dsref ;
2009-11-23 23:11:16 +00:00
eval {
2010-06-08 18:14:04 +00:00
$ dsref = $ dsmv - > CreateNasDatastore ( spec = > $ nds ) ;
2009-11-23 23:11:16 +00:00
} ;
if ( $@ ) {
die "$@" unless $@ =~ m/Fault detail: DuplicateNameFault/ ;
die "esx plugin: a datastore was discovered with the same name referring to a different nominatum- cannot continue\n$@"
unless & match_nfs_datastore ( $ server , "/$path" , $ hostview - > { vim } ) ;
}
2010-06-08 18:14:04 +00:00
return ( $ location , $ dsref ) ;
2009-06-22 16:00:28 +00:00
}
2010-07-09 06:24:18 +00:00
# create a VMFS data store on a node so that VMs can live locally instead of NFS
sub create_vmfs_datastore {
my $ hostview = shift ; # VM object
my $ name = shift ; # name of storage we wish to create.
2010-10-01 17:18:37 +00:00
my $ hyp = shift ;
2011-09-29 20:34:06 +00:00
unless ( $ datastoreautomount ) {
die "automount of VMware datastores is disabled in site configuration, not continuing" ;
}
2010-07-09 06:24:18 +00:00
# call some VMware API here to create
my $ hdss = $ hostview - > { vim } - > get_view ( mo_ref = > $ hostview - > configManager - > datastoreSystem ) ;
2010-10-01 17:18:37 +00:00
2010-07-09 06:24:18 +00:00
my $ diskList = $ hdss - > QueryAvailableDisksForVmfs ( ) ;
my $ count = scalar ( @$ diskList ) ; # get the number of disks available for formatting.
unless ( $ count > 0 ) {
2010-10-01 17:18:37 +00:00
#die "No disks are available to create VMFS volume for $name";
$ output_handler - > ( { error = > [ "No disks are available on $hyp to create VMFS volume for $name" ] , errorcode = > 1 } ) ;
return 0 ;
2010-07-09 06:24:18 +00:00
}
foreach my $ disk ( @$ diskList ) {
my $ options = $ hdss - > QueryVmfsDatastoreCreateOptions ( devicePath = > $ disk - > devicePath ) ;
@$ options [ 0 ] - > spec - > vmfs - > volumeName ( $ name ) ;
my $ newDatastore = $ hdss - > CreateVmfsDatastore ( spec = > @$ options [ 0 ] - > spec ) ;
#return $newDatastore;
# create it on the first disk we see.
return ( $ name , $ newDatastore ) ;
}
return 0 ;
}
2009-06-22 16:00:28 +00:00
sub build_more_info {
die ( "TODO: fix this function if called" ) ;
print "Does this acually get called????**********************************\n" ;
my $ noderange = shift ;
my $ callback = shift ;
my $ vmtab = xCAT::Table - > new ( "vm" ) ;
my @ moreinfo = ( ) ;
unless ( $ vmtab ) {
$ callback - > ( { data = > [ "Cannot open mp table" ] } ) ;
return @ moreinfo ;
}
my % mpa_hash = ( ) ;
foreach my $ node ( @$ noderange ) {
my $ ent = $ vmtab - > getNodeAttribs ( $ node , [ 'mpa' , 'id' ] ) ;
if ( defined ( $ ent - > { mpa } ) ) { push @ { $ mpa_hash { $ ent - > { mpa } } { nodes } } , $ node ; }
else {
$ callback - > ( { data = > [ "no mpa defined for node $node" ] } ) ;
2010-09-17 14:36:27 +00:00
return @ moreinfo ;
2009-06-22 16:00:28 +00:00
}
if ( defined ( $ ent - > { id } ) ) { push @ { $ mpa_hash { $ ent - > { mpa } } { ids } } , $ ent - > { id } ; }
else { push @ { $ mpa_hash { $ ent - > { mpa } } { ids } } , "" ; }
}
foreach ( keys % mpa_hash ) {
push @ moreinfo , "\[$_\]\[" . join ( ',' , @ { $ mpa_hash { $ _ } { nodes } } ) . "\]\[" . join ( ',' , @ { $ mpa_hash { $ _ } { ids } } ) . "\]" ;
}
return \ @ moreinfo ;
}
sub copycd {
my $ request = shift ;
my $ doreq = shift ;
my $ distname = "" ;
my $ path ;
my $ arch ;
my $ darch ;
my $ installroot ;
$ installroot = "/install" ;
2012-05-22 09:00:37 +00:00
#my $sitetab = xCAT::Table->new('site');
#if($sitetab){
#(my $ref) = $sitetab->getAttribs({key => 'installdir'}, 'value');
my @ entries = xCAT::Utils - > get_site_attribute ( "installdir" ) ;
my $ t_entry = $ entries [ 0 ] ;
if ( defined ( $ t_entry ) ) {
$ installroot = $ t_entry ;
2009-06-22 16:00:28 +00:00
}
2012-05-22 09:00:37 +00:00
#}
2009-06-22 16:00:28 +00:00
@ ARGV = @ { $ request - > { arg } } ;
GetOptions (
'n=s' = > \ $ distname ,
'a=s' = > \ $ arch ,
2012-06-12 06:58:09 +00:00
'm=s' = > \ $ path
2009-06-22 16:00:28 +00:00
) ;
# run a few tests to see if the copycds should use this plugin
unless ( $ path ) {
# can't use us cause we need a path and you didn't provide one!
return ;
}
if ( $ distname and $ distname !~ /^esx/ ) {
# we're for esx, so if you didn't specify that its not us!
return ;
}
my $ found = 0 ;
if ( - r $ path . "/README" and - r $ path . "/build_number" and - d $ path . "/VMware" and - r $ path . "/packages.xml" ) { #We have a probable new style ESX media
open ( LINE , $ path . "/packages.xml" ) ;
my $ product ;
my $ version ;
while ( <LINE> ) {
if ( /roductLineId>([^<]*)<\/Prod/ ) {
$ product = $ 1 ;
}
if ( /ersion>([^<]*)<\/version/ ) {
$ version = $ 1 ;
$ version =~ s/\.0$// ;
}
if ( /arch>([^>]*)<\/arch/ ) {
unless ( $ darch and $ darch =~ /x86_64/ ) { #prefer to be characterized as x86_64
$ darch = $ 1 ;
$ arch = $ 1 ;
}
}
}
close ( LINE ) ;
if ( $ product and $ version ) {
$ distname = $ product . $ version ;
$ found = 1 ;
}
} elsif ( - r $ path . "/README" and - r $ path . "/open_source_licenses.txt" and - d $ path . "/VMware" ) { #Candidate to be ESX 3.5
open ( LINE , $ path . "/README" ) ;
while ( <LINE> ) {
if ( /VMware ESX Server 3.5\s*$/ ) {
$ darch = 'x86' ;
$ arch = 'x86' ;
$ distname = 'esx3.5' ;
$ found = 1 ;
last ;
}
}
close ( LINE ) ;
} elsif ( - r $ path . "/README.txt" and - r $ path . "/vmkernel.gz" ) {
# its an esxi dvd!
# if we got here its probably ESX they want to copy
my $ line ;
my $ darch ;
open ( LINE , $ path . "/README.txt" ) or die "couldn't open!" ;
while ( $ line = <LINE> ) {
chomp ( $ line ) ;
2010-06-15 15:10:45 +00:00
if ( $ line =~ /VMware ESXi(?: version)? 4\.(\d+)/ ) {
2009-09-28 21:26:01 +00:00
$ darch = "x86_64" ;
2009-06-22 16:00:28 +00:00
$ distname = "esxi4" ;
2010-05-10 13:54:48 +00:00
if ( $ 1 ) {
$ distname . = '.' . $ 1 ;
}
2009-06-22 16:00:28 +00:00
$ found = 1 ;
if ( $ arch and $ arch ne $ darch ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Requested distribution architecture $arch, but media is $darch" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
$ arch = $ darch ;
last ; # we found our distro! end this loop madness.
}
}
close ( LINE ) ;
unless ( $ found ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "I don't recognize this VMware ESX DVD" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ; # doesn't seem to be a valid DVD or CD
}
} elsif ( - r $ path . "/vmkernel.gz" and - r $ path . "/isolinux.cfg" ) {
open ( LINE , $ path . "/isolinux.cfg" ) ;
while ( <LINE> ) {
if ( /ThinESX Installer/ ) {
$ darch = 'x86' ;
$ arch = 'x86' ;
$ distname = 'esxi3.5' ;
$ found = 1 ;
last ;
}
}
close ( LINE ) ;
2011-04-19 14:23:35 +00:00
} elsif ( - r $ path . "/vmware-esx-base-readme" ) {
open ( LINE , $ path . "/vmware-esx-base-readme" ) ;
while ( <LINE> ) {
if ( /VMware ESXi 5\.0/ ) {
$ darch = "x86_64" ;
$ arch = "x86_64" ;
$ distname = 'esxi5' ;
$ found = 1 ;
last ;
}
2012-03-05 21:50:43 +00:00
if ( /VMware ESXi 5\.1/ ) {
$ darch = "x86_64" ;
$ arch = "x86_64" ;
$ distname = 'esxi5.1' ;
$ found = 1 ;
last ;
}
2011-04-19 14:23:35 +00:00
}
}
2009-06-22 16:00:28 +00:00
unless ( $ found ) { return ; } #not our media
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "Copying media to $installroot/$distname/$arch/" , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
my $ omask = umask 0022 ;
mkpath ( "$installroot/$distname/$arch" ) ;
umask $ omask ;
my $ rc ;
my $ reaped = 0 ;
$ SIG { INT } = $ SIG { TERM } = sub {
foreach ( @ cpiopid ) {
kill 2 , $ _ ;
}
if ( $ ::CDMOUNTPATH ) {
chdir ( "/" ) ;
system ( "umount $::CDMOUNTPATH" ) ;
}
} ;
my $ KID ;
chdir $ path ;
my $ numFiles = `find . -print | wc -l` ;
my $ child = open ( $ KID , "|-" ) ;
unless ( defined $ child )
{
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Media copy operation fork failure" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
if ( $ child )
{
push @ cpiopid , $ child ;
my @ finddata = `find .` ;
for ( @ finddata )
{
print $ KID $ _ ;
}
close ( $ KID ) ;
$ rc = $? ;
}
else
{
nice 10 ;
my $ c = "nice -n 20 cpio -vdump $installroot/$distname/$arch" ;
my $ k2 = open ( PIPE , "$c 2>&1 |" ) ||
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Media copy operation fork failure" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
push @ cpiopid , $ k2 ;
my $ copied = 0 ;
my ( $ percent , $ fout ) ;
while ( <PIPE> ) {
next if /^cpio:/ ;
$ percent = $ copied / $ numFiles ;
$ fout = sprintf "%0.2f%%" , $ percent * 100 ;
$ output_handler - > ( { sinfo = > "$fout" } ) ;
+ + $ copied ;
}
exit ;
}
# let everyone read it
#chdir "/tmp";
chmod 0755 , "$installroot/$distname/$arch" ;
2011-04-22 13:49:14 +00:00
if ( $ distname =~ /esxi5/ ) { #going to tweak boot.cfg for install and default stateless case
if ( ! - r "$installroot/$distname/$arch/boot.cfg.stateless" ) {
copy ( "$installroot/$distname/$arch/boot.cfg" , "$installroot/$distname/$arch/boot.cfg.stateless" ) ;
my $ bootcfg ;
open ( $ bootcfg , "<" , "$installroot/$distname/$arch/boot.cfg" ) ;
my @ bootcfg = <$bootcfg> ;
close ( $ bootcfg ) ;
2011-04-25 17:53:31 +00:00
foreach ( @ bootcfg ) { #no point in optimizing trivial, infrequent code, readable this way
s!kernel=/!kernel=! ; # remove leading /
s!modules=/!modules=! ; #remove leading /
s!--- /!--- !g ; #remove all the 'absolute' slashes
}
open ( $ bootcfg , ">" , "$installroot/$distname/$arch/boot.cfg.install" ) ;
foreach ( @ bootcfg ) {
2012-06-22 14:52:17 +00:00
if ( /^modules=/ and $ _ !~ /xcatmod.tgz/ and not $ ::XCATSITEVALS { xcatesximoddisable } ) {
2011-10-10 18:15:12 +00:00
chomp ( ) ;
s! *\z! --- xcatmod.tgz\n! ;
}
2011-04-25 17:53:31 +00:00
print $ bootcfg $ _ ;
}
close ( $ bootcfg ) ;
2011-04-22 13:49:14 +00:00
foreach ( @ bootcfg ) { #no point in optimizing trivial, infrequent code, readable this way
s/runweasel// ; #don't run the installer in stateless mode
2011-04-25 18:00:19 +00:00
s!--- imgdb.tgz!! ; #don't need the imgdb for stateless
s!--- imgpayld.tgz!! ; #don't need the boot payload since we aren't installing
s!--- tools.t00!! ; #tools could be useful, but for now skip the memory requirement
s!--- weaselin.i00!! ; #and also don't need the weasel install images if... not installing
2011-04-25 17:53:31 +00:00
2012-06-22 14:52:17 +00:00
if ( /^modules=/ and $ _ !~ /xcatmod.tgz/ and not $ ::XCATSITEVALS { xcatesximoddisable } ) {
2011-04-22 17:38:22 +00:00
chomp ( ) ;
2011-04-25 17:56:02 +00:00
s! *\z! --- xcatmod.tgz\n! ;
2011-04-22 15:03:25 +00:00
}
2011-04-22 13:49:14 +00:00
s!Loading ESXi installer!xCAT is loading ESXi stateless! ;
}
open ( $ bootcfg , ">" , "$installroot/$distname/$arch/boot.cfg.stateless" ) ;
foreach ( @ bootcfg ) {
print $ bootcfg $ _ ;
}
close ( $ bootcfg ) ;
2011-10-03 15:54:47 +00:00
if ( grep /LSIProvi.v00/ , @ bootcfg and ! - r "$installroot/$distname/$arch/LSIProvi.v00" and - r "$installroot/$distname/$arch/lsiprovi.v00" ) { #there is media with LSIProv.v00 expected, but the install media was mal-constructed, fix it
move ( "$installroot/$distname/$arch/lsiprovi.v00" , "$installroot/$distname/$arch/LSIProvi.v00" ) ;
}
2011-04-22 17:38:22 +00:00
}
2011-04-22 13:49:14 +00:00
}
2009-06-22 16:00:28 +00:00
if ( $ rc != 0 ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Media copy operation failed, status $rc" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "Media copy operation successful" , $ output_handler ) ;
2009-09-19 17:03:14 +00:00
my @ ret = xCAT::SvrUtils - > update_tables_with_templates ( $ distname , $ arch ) ;
if ( $ ret [ 0 ] != 0 ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "Error when updating the osimage tables: " . $ ret [ 1 ] , $ output_handler ) ;
2009-09-19 17:03:14 +00:00
}
2009-06-22 16:00:28 +00:00
}
}
sub makecustomizedmod {
my $ osver = shift ;
my $ dest = shift ;
2012-06-22 14:52:17 +00:00
if ( $ ::XCATSITEVALS { xcatesximoddisable } ) { return 1 ; }
2011-04-22 18:08:16 +00:00
my $ modname ;
if ( $ osver =~ /esxi4/ ) { #want more descriptive name,but don't break esxi4 setups.
$ modname = "mod.tgz" ;
} else {
$ modname = "xcatmod.tgz" ;
}
2010-08-12 22:31:31 +00:00
# if it already exists, do not overwrite it because it may be someone
# else's custom image
2011-04-22 18:08:16 +00:00
if ( - f "$dest/$modname" ) { return 1 ; }
2009-06-22 16:00:28 +00:00
my $ passtab = xCAT::Table - > new ( 'passwd' ) ;
my $ tmp ;
my $ password ;
if ( $ passtab ) {
( $ tmp ) = $ passtab - > getAttribs ( { 'key' = > 'vmware' } , 'username' , 'password' ) ;
if ( defined ( $ tmp ) ) {
$ password = $ tmp - > { password } ;
}
}
2010-01-07 20:03:15 +00:00
unless ( $ password ) {
2012-05-31 17:39:27 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Unable to find a password entry for esxi in passwd table" ] , $ output_handler ) ;
2010-01-07 20:03:15 +00:00
return 0 ;
}
mkpath ( "/tmp/xcat" ) ;
my $ tempdir = tempdir ( "/tmp/xcat/esxmodcustXXXXXXXX" ) ;
my $ shadow ;
mkpath ( $ tempdir . "/etc/" ) ;
2010-09-16 13:41:16 +00:00
my $ oldmask = umask ( 0077 ) ;
2010-01-07 20:03:15 +00:00
open ( $ shadow , ">" , $ tempdir . "/etc/shadow" ) ;
2009-06-22 16:00:28 +00:00
$ password = crypt ( $ password , '$1$' . xCAT::Utils:: genpassword ( 8 ) ) ;
my $ dayssince1970 = int ( time ( ) /86400); #Be truthful about / etc / shadow
2011-04-27 15:09:24 +00:00
my @ otherusers = qw/nobody nfsnobody dcui daemon/ ;
if ( $ osver =~ /esxi4/ ) {
push @ otherusers , "vimuser" ;
} elsif ( $ osver =~ /esxi5/ ) {
push @ otherusers , "vpxuser" ;
}
2009-06-22 16:00:28 +00:00
print $ shadow "root:$password:$dayssince1970:0:99999:7:::\n" ;
foreach ( @ otherusers ) {
print $ shadow "$_:*:$dayssince1970:0:99999:7:::\n" ;
}
close ( $ shadow ) ;
2010-09-16 13:41:16 +00:00
umask ( $ oldmask ) ;
2011-04-22 18:08:16 +00:00
if ( $ osver =~ /esxi4/ and - e "$::XCATROOT/share/xcat/netboot/esxi/38.xcat-enableipv6" ) {
2010-08-19 13:15:57 +00:00
mkpath ( $ tempdir . "/etc/vmware/init/init.d" ) ;
copy ( "$::XCATROOT/share/xcat/netboot/esxi/38.xcat-enableipv6" , $ tempdir . "/etc/vmware/init/init.d/38.xcat-enableipv6" ) ;
2011-10-10 18:15:12 +00:00
} elsif ( $ osver =~ /esxi5/ and - e "$::XCATROOT/share/xcat/netboot/esxi/xcat-ipv6.json" ) {
mkpath ( $ tempdir . "/usr/libexec/jumpstart/plugins/" ) ;
copy ( "$::XCATROOT/share/xcat/netboot/esxi/xcat-ipv6.json" , $ tempdir . "/usr/libexec/jumpstart/plugins/xcat-ipv6.json" ) ;
2010-08-19 13:15:57 +00:00
}
2011-04-22 18:08:16 +00:00
if ( $ osver =~ /esxi4/ and - e "$::XCATROOT/share/xcat/netboot/esxi/47.xcat-networking" ) {
2010-05-15 01:32:13 +00:00
copy ( "$::XCATROOT/share/xcat/netboot/esxi/47.xcat-networking" , $ tempdir . "/etc/vmware/init/init.d/47.xcat-networking" ) ;
2011-10-10 18:15:12 +00:00
} elsif ( $ osver =~ /esxi5/ and - e "$::XCATROOT/share/xcat/netboot/esxi/39.ipv6fixup" ) {
mkpath ( $ tempdir . "/etc/init.d" ) ;
copy ( "$::XCATROOT/share/xcat/netboot/esxi/39.ipv6fixup" , $ tempdir . "/etc/init.d/39.ipv6fixup" ) ;
chmod ( 0755 , "$tempdir/etc/init.d/39.ipv6fixup" ) ;
}
if ( $ osver =~ /esxi5/ and - e "$::XCATROOT/share/xcat/netboot/esxi/48.esxifixup" ) {
mkpath ( $ tempdir . "/etc/init.d" ) ;
copy ( "$::XCATROOT/share/xcat/netboot/esxi/48.esxifixup" , $ tempdir . "/etc/init.d/48.esxifixup" ) ;
chmod ( 0755 , "$tempdir/etc/init.d/48.esxifixup" ) ;
2010-05-15 01:02:30 +00:00
}
2010-08-19 14:40:41 +00:00
if ( - e "$::XCATROOT/share/xcat/netboot/esxi/xcatsplash" ) {
2011-04-22 18:17:04 +00:00
mkpath ( $ tempdir . "/etc/vmware/" ) ;
2010-08-19 14:40:41 +00:00
copy ( "$::XCATROOT/share/xcat/netboot/esxi/xcatsplash" , $ tempdir . "/etc/vmware/welcome" ) ;
}
2010-08-20 15:12:38 +00:00
my $ dossh = 0 ;
2010-08-20 14:48:31 +00:00
if ( - r "/root/.ssh/id_rsa.pub" ) {
2010-08-20 15:12:38 +00:00
$ dossh = 1 ;
2010-08-20 14:48:31 +00:00
my $ umask = umask ( 0077 ) ; #don't remember if dropbear is picky, but just in case
2011-04-22 18:08:16 +00:00
if ( $ osver =~ /esxi4/ ) { #esxi4 used more typical path
mkpath ( $ tempdir . "/.ssh" ) ;
copy ( "/root/.ssh/id_rsa.pub" , $ tempdir . "/.ssh/authorized_keys" ) ;
} elsif ( $ osver =~ /esxi5/ ) { #weird path to keys
mkpath ( $ tempdir . "/etc/ssh/keys-root" ) ;
copy ( "/root/.ssh/id_rsa.pub" , $ tempdir . "/etc/ssh/keys-root/authorized_keys" ) ;
}
2010-08-20 14:54:36 +00:00
umask ( $ umask ) ;
2010-08-20 14:48:31 +00:00
}
2010-08-19 14:40:41 +00:00
my $ tfile ;
mkpath ( $ tempdir . "/var/run/vmware" ) ;
open $ tfile , ">" , $ tempdir . "/var/run/vmware/show-tech-support-login" ;
close ( $ tfile ) ;
2010-05-15 01:02:30 +00:00
#TODO: auto-enable ssh and request boot-time customization rather than on-demand?
2009-06-22 16:00:28 +00:00
require Cwd ;
my $ dir = Cwd:: cwd ( ) ;
chdir ( $ tempdir ) ;
2011-04-22 18:08:16 +00:00
if ( - e "$dest/$modname" ) {
unlink ( "$dest/$modname" ) ;
2009-06-22 16:00:28 +00:00
}
2011-04-22 18:14:41 +00:00
if ( $ dossh and $ osver =~ /esxi4/ ) {
2011-04-22 18:08:16 +00:00
system ( "tar czf $dest/$modname * .ssh" ) ;
2010-08-20 15:12:38 +00:00
} else {
2011-04-22 18:08:16 +00:00
system ( "tar czf $dest/$modname *" ) ;
2010-08-20 15:12:38 +00:00
}
2009-06-22 16:00:28 +00:00
chdir ( $ dir ) ;
rmtree ( $ tempdir ) ;
2010-01-07 20:03:15 +00:00
return 1 ;
2009-06-22 16:00:28 +00:00
}
2011-10-04 20:02:36 +00:00
sub getplatform {
my $ os = shift ;
if ( $ os =~ /esxi/ ) {
return "esxi" ;
}
return $ os ;
}
sub esxi_kickstart_from_template {
my % args = @ _ ;
my $ installdir = "/install" ;
if ( $ ::XCATSITEVALS { installdir } ) { $ installdir = $ ::XCATSITEVALS { installdir } ; }
my $ plat = getplatform ( $ args { os } ) ;
my $ template = xCAT::SvrUtils:: get_tmpl_file_name ( "$installdir/custom/install/$plat" , $ args { profile } , $ args { os } , $ args { arch } , $ args { os } ) ;
unless ( $ template ) {
$ template = xCAT::SvrUtils:: get_tmpl_file_name ( "$::XCATROOT/share/xcat/install/$plat" , $ args { profile } , $ args { os } , $ args { arch } , $ args { os } ) ;
}
my $ tmperr ;
if ( - r "$template" ) {
$ tmperr = xCAT::Template - > subvars ( $ template , "$installdir/autoinst/" . $ args { node } , $ args { node } , undef ) ;
} else {
$ tmperr = "Unable to find template in /install/custom/install/$plat or $::XCATROOT/share/xcat/install/$plat (for $args{profile}/$args{os}/$args{arch} combination)" ;
}
if ( $ tmperr ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , $ tmperr ] , $ output_handler , $ args { node } ) ;
}
}
sub mkinstall {
return mkcommonboot ( "install" , @ _ ) ;
}
2009-06-22 16:00:28 +00:00
sub mknetboot {
2011-10-04 20:02:36 +00:00
return mkcommonboot ( "stateless" , @ _ ) ;
}
2012-04-17 14:09:49 +00:00
sub merge_esxi5_append {
my $ tmpl = shift ;
my $ append = shift ;
my $ outfile = shift ;
my $ in ;
my $ out ;
open ( $ in , "<" , $ tmpl ) ;
open ( $ out , ">" , $ outfile ) ;
my $ line ;
while ( $ line = <$in> ) {
2012-05-14 21:02:35 +00:00
if ( $ line =~ /kernelopt=/ ) {
chomp ( $ line ) ;
$ line . = $ append . "\n" ;
#if ($line =~ /modules=b.b00/) {
# $line =~ s/modules=b.b00/modules=b.b00 $append/;
2012-04-17 14:09:49 +00:00
}
2012-05-18 18:13:37 +00:00
unless ( $ line =~ /^prefix=/ ) {
print $ out $ line ;
}
2012-04-17 14:09:49 +00:00
}
}
2011-10-04 20:02:36 +00:00
sub mkcommonboot {
my $ bootmode = shift ;
2009-06-22 16:00:28 +00:00
my $ req = shift ;
my $ doreq = shift ;
2012-02-15 21:04:42 +00:00
my $ globaltftpdir = "/tftpboot" ;
2009-06-22 16:00:28 +00:00
my @ nodes = @ { $ req - > { node } } ;
my $ ostab = xCAT::Table - > new ( 'nodetype' ) ;
2012-05-22 09:00:37 +00:00
#my $sitetab = xCAT::Table->new('site');
2009-06-22 16:00:28 +00:00
my $ bptab = xCAT::Table - > new ( 'bootparams' , - create = > 1 ) ;
my $ installroot = "/install" ;
2012-05-22 09:00:37 +00:00
#if ($sitetab){
#(my $ref) = $sitetab->getAttribs({key => 'installdir'}, 'value');
my @ entries = xCAT::Utils - > get_site_attribute ( "installdir" ) ;
my $ t_entry = $ entries [ 0 ] ;
if ( defined ( $ t_entry ) ) {
$ installroot = $ t_entry ;
2009-06-22 16:00:28 +00:00
}
2012-05-22 09:00:37 +00:00
#($ref) = $sitetab->getAttribs({key => 'tftpdir'}, 'value');
@ entries = xCAT::Utils - > get_site_attribute ( "tftpdir" ) ;
$ t_entry = $ entries [ 0 ] ;
if ( defined ( $ t_entry ) ) {
$ globaltftpdir = $ t_entry ;
2009-06-22 16:00:28 +00:00
}
2012-05-22 09:00:37 +00:00
#}
2009-06-22 16:00:28 +00:00
my % donetftp = ( ) ;
2009-08-07 17:06:22 +00:00
my $ bpadds = $ bptab - > getNodesAttribs ( \ @ nodes , [ 'addkcmdline' ] ) ;
2011-08-22 19:28:37 +00:00
my $ nodehmtab = xCAT::Table - > new ( 'nodehm' , - create = > 0 ) ;
my $ serialconfig ;
if ( $ nodehmtab ) {
$ serialconfig = $ nodehmtab - > getNodesAttribs ( \ @ nodes , [ 'serialport' , 'serialspeed' ] ) ;
}
2012-02-15 21:04:42 +00:00
my $ restab = xCAT::Table - > new ( 'noderes' , - create = > 0 ) ;
my $ resents ;
if ( $ restab ) {
2012-05-04 20:51:12 +00:00
$ resents = $ restab - > getNodesAttribs ( \ @ nodes , [ 'tftpdir' , 'nfsserver' ] ) ;
2012-02-15 21:04:42 +00:00
}
2011-08-22 19:28:37 +00:00
2009-08-19 14:28:17 +00:00
my % tablecolumnsneededforaddkcmdline ;
my % nodesubdata ;
foreach my $ key ( keys %$ bpadds ) { #First, we identify all needed table.columns needed to aggregate database call
my $ add = $ bpadds - > { $ key } - > [ 0 ] - > { addkcmdline } ;
2009-11-19 22:46:26 +00:00
next if ! defined $ add ;
2009-08-19 14:28:17 +00:00
while ( $ add =~ /#NODEATTRIB:([^:#]+):([^:#]+)#/ ) {
push @ { $ tablecolumnsneededforaddkcmdline { $ 1 } } , $ 2 ;
$ add =~ s/#NODEATTRIB:([^:#]+):([^:#]+)#// ;
}
}
foreach my $ table ( keys % tablecolumnsneededforaddkcmdline ) {
my $ tab = xCAT::Table - > new ( $ table , - create = > 0 ) ;
if ( $ tab ) {
$ nodesubdata { $ table } = $ tab - > getNodesAttribs ( \ @ nodes , $ tablecolumnsneededforaddkcmdline { $ table } ) ;
}
}
2012-02-15 21:04:42 +00:00
my $ osents = $ ostab - > getNodesAttribs ( \ @ nodes , [ 'os' , 'arch' , 'profile' ] ) ;
2009-06-22 16:00:28 +00:00
foreach my $ node ( @ nodes ) {
2012-02-15 21:04:42 +00:00
my $ ent = $ osents - > { $ node } - > [ 0 ] ;
2009-06-22 16:00:28 +00:00
my $ arch = $ ent - > { 'arch' } ;
my $ profile = $ ent - > { 'profile' } ;
my $ osver = $ ent - > { 'os' } ;
2012-02-15 21:04:42 +00:00
my $ tftpdir ;
2012-05-04 20:51:12 +00:00
my $ ksserver ;
if ( $ resents and $ resents - > { $ node } - > [ 0 ] - > { nfsserver } ) {
$ ksserver = $ resents - > { $ node } - > [ 0 ] - > { nfsserver } ;
} else {
$ ksserver = '!myipfn!' ;
}
2012-02-15 21:04:42 +00:00
if ( $ resents and $ resents - > { $ node } - > [ 0 ] - > { tftpdir } ) {
$ tftpdir = $ resents - > { $ node } - > [ 0 ] - > { tftpdir } ;
} else {
$ tftpdir = $ globaltftpdir ;
}
2009-09-28 21:26:01 +00:00
#if($arch ne 'x86'){
2010-08-06 15:29:07 +00:00
# xCAT::SvrUtils::sendmsg([1,"VMware ESX hypervisors are x86, please change the nodetype.arch value to x86 instead of $arch for $node before proceeding:
2009-09-28 21:26:01 +00:00
#e.g: nodech $node nodetype.arch=x86\n"]);
# return;
#}
2009-06-22 16:00:28 +00:00
# first make sure copycds was done:
2009-09-28 21:26:01 +00:00
my $ custprofpath = $ profile ;
unless ( $ custprofpath =~ /^\// ) { #If profile begins with a /, assume it already is a path
2010-11-05 14:26:14 +00:00
$ custprofpath = $ installroot . "/custom/install/$osver/$arch/$profile" ;
unless ( - d $ custprofpath ) {
$ custprofpath = $ installroot . "/custom/install/esxi/$arch/$profile" ;
}
2009-09-28 21:26:01 +00:00
}
2009-06-22 16:00:28 +00:00
unless (
2009-09-28 21:26:01 +00:00
- r "$custprofpath/vmkboot.gz"
2011-08-17 18:41:54 +00:00
or - r "$custprofpath/b.z"
2012-05-03 14:23:16 +00:00
or - r "$custprofpath/mboot.c32"
or - r "$custprofpath/install.tgz"
2009-09-28 21:26:01 +00:00
or - r "$installroot/$osver/$arch/mboot.c32"
2009-06-22 16:00:28 +00:00
or - r "$installroot/$osver/$arch/install.tgz" ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Please run copycds first for $osver or create custom image in $custprofpath/" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
}
2010-06-15 20:45:32 +00:00
my @ reqmods = qw/vmkboot.gz vmk.gz sys.vgz cim.vgz/ ; #Required modules for an image to be considered complete
2011-08-17 18:41:54 +00:00
if ( - r "$custprofpath/b.z" ) { #if someone hand extracts from imagedd, a different name scheme is used
@ reqmods = qw/b.z k.z s.z c.z/ ;
}
2009-09-28 21:26:01 +00:00
my % mods ;
foreach ( @ reqmods ) {
$ mods { $ _ } = 1 ;
}
2009-10-02 15:27:59 +00:00
my $ shortprofname = $ profile ;
$ shortprofname =~ s/\/\z// ;
$ shortprofname =~ s/.*\/// ;
2011-03-01 15:36:09 +00:00
mkpath ( "$tftpdir/xcat/netboot/$osver/$arch/$shortprofname/" ) ;
2012-06-22 14:52:17 +00:00
my $ havemod = 0 ;
2012-02-15 21:04:42 +00:00
unless ( $ donetftp { $ osver , $ arch , $ profile , $ tftpdir } ) {
2009-06-22 16:00:28 +00:00
my $ srcdir = "$installroot/$osver/$arch" ;
2009-10-02 15:20:52 +00:00
my $ dest = "$tftpdir/xcat/netboot/$osver/$arch/$shortprofname" ;
2011-10-04 20:02:36 +00:00
cpNetbootImages ( $ osver , $ srcdir , $ dest , $ custprofpath , \ % mods , bootmode = > $ bootmode ) ;
2012-06-22 14:52:17 +00:00
if ( $ havemod = makecustomizedmod ( $ osver , $ dest ) ) {
2010-01-07 20:03:15 +00:00
push @ reqmods , "mod.tgz" ;
$ mods { "mod.tgz" } = 1 ;
}
2011-04-22 18:36:47 +00:00
if ( $ osver =~ /esxi4/ and - r "$::XCATROOT/share/xcat/netboot/syslinux/mboot.c32" ) { #prefer xCAT patched mboot.c32 with BOOTIF for mboot
2010-05-15 01:44:30 +00:00
copy ( "$::XCATROOT/share/xcat/netboot/syslinux/mboot.c32" , $ dest ) ;
2012-05-17 17:19:37 +00:00
} elsif ( - r "$custprofpath/mboot.c32" ) {
copy ( "$custprofpath/mboot.c32" , $ dest ) ;
} elsif ( - r "$srcdir/mboot.c32" ) {
2010-05-15 01:24:56 +00:00
copy ( "$srcdir/mboot.c32" , $ dest ) ;
2012-05-17 17:19:37 +00:00
}
2011-11-02 20:28:51 +00:00
if ( - f "$srcdir/efiboot.img" ) {
copy ( "$srcdir/efiboot.img" , $ dest ) ;
print ( "$srcdir/efi" ) ;
mkpath ( "$dest/efi" ) ;
recursion_copy ( "$srcdir/efi" , "$dest/efi" ) ;
2010-05-15 01:24:56 +00:00
}
2012-02-15 21:04:42 +00:00
$ donetftp { $ osver , $ arch , $ profile , $ tftpdir } = 1 ;
2009-06-22 16:00:28 +00:00
}
2009-10-02 15:20:52 +00:00
my $ tp = "xcat/netboot/$osver/$arch/$shortprofname" ;
2011-04-22 19:09:30 +00:00
my $ kernel ;
my $ kcmdline ;
2011-04-22 19:11:23 +00:00
my $ append ;
2012-04-17 14:09:49 +00:00
my $ shortappend ;
2011-04-22 18:36:47 +00:00
if ( $ osver =~ /esxi4/ ) {
my $ bail = 0 ;
foreach ( @ reqmods ) {
unless ( - r "$tftpdir/$tp/$_" ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "$_ is missing from the target destination, ensure that either copycds has been run or that $custprofpath contains this file" ] , $ output_handler ) ;
$ bail = 1 ; #only flag to bail, present as many messages as possible to user
}
}
if ( $ bail ) { #if the above loop detected one or more failures, bail out
return ;
}
# now make <HEX> file entry stuff
2011-04-22 19:09:30 +00:00
$ kernel = "$tp/mboot.c32" ;
2011-08-17 18:41:54 +00:00
my $ prepend ;
if ( $ reqmods [ 0 ] eq "vmkboot.gz" ) {
$ prepend = "$tp/vmkboot.gz" ;
delete $ mods { "vmkboot.gz" } ;
$ append = " --- $tp/vmk.gz" ;
delete $ mods { "vmk.gz" } ;
$ append . = " --- $tp/sys.vgz" ;
delete $ mods { "sys.vgz" } ;
$ append . = " --- $tp/cim.vgz" ;
delete $ mods { "cim.vgz" } ;
} else { #the single letter style
$ prepend = "$tp/b.z" ;
delete $ mods { "b.z" } ;
$ append = " --- $tp/k.z" ;
delete $ mods { "k.z" } ;
$ append . = " --- $tp/s.z" ;
delete $ mods { "s.z" } ;
$ append . = " --- $tp/c.z" ;
delete $ mods { "c.z" } ;
}
2010-01-07 20:03:15 +00:00
if ( $ mods { "mod.tgz" } ) {
$ append . = " --- $tp/mod.tgz" ;
delete $ mods { "mod.tgz" } ;
}
2009-09-28 21:26:01 +00:00
foreach ( keys % mods ) {
$ append . = " --- $tp/$_" ;
}
2009-08-07 17:06:22 +00:00
if ( defined $ bpadds - > { $ node } - > [ 0 ] - > { addkcmdline } ) {
2009-08-18 18:59:20 +00:00
my $ modules ;
( $ kcmdline , $ modules ) = split /---/ , $ bpadds - > { $ node } - > [ 0 ] - > { addkcmdline } , 2 ;
2009-08-19 14:28:17 +00:00
$ kcmdline =~ s/#NODEATTRIB:([^:#]+):([^:#]+)#/$nodesubdata{$1}->{$node}->[0]->{$2}/eg ;
2009-08-18 18:59:20 +00:00
if ( $ modules ) {
$ append . = " --- " . $ modules ;
}
$ prepend . = " " . $ kcmdline ;
2009-08-07 17:06:22 +00:00
}
2009-08-18 18:59:20 +00:00
$ append = $ prepend . $ append ;
2011-04-22 18:36:47 +00:00
}
elsif ( $ osver =~ /esxi5/ ) { #do a more straightforward thing..
2011-04-22 19:09:30 +00:00
$ kernel = "$tp/mboot.c32" ;
2012-04-26 19:50:05 +00:00
if ( - r "$tftpdir/$tp/boot.cfg.$bootmode.tmpl" ) { #so much for straightforward..
$ shortappend = "-c $tp/boot.cfg.$bootmode.$node" ;
2012-04-17 14:09:49 +00:00
} else {
2011-10-04 20:02:36 +00:00
$ append = "-c $tp/boot.cfg.$bootmode" ;
2012-04-17 14:09:49 +00:00
}
2011-10-04 20:02:36 +00:00
if ( $ bootmode eq "install" ) {
2012-05-04 20:51:12 +00:00
$ append . = " ks=http://$ksserver/install/autoinst/$node" ;
2011-10-04 20:02:36 +00:00
esxi_kickstart_from_template ( node = > $ node , os = > $ osver , arch = > $ arch , profile = > $ profile ) ;
}
2011-10-10 18:21:32 +00:00
if ( $ bootmode ne "install" and $ serialconfig - > { $ node } ) { #don't do it for install, installer croaks currently
2011-08-22 19:28:37 +00:00
my $ comport = 1 ;
if ( defined $ serialconfig - > { $ node } - > [ 0 ] - > { serialport } ) {
$ comport = $ serialconfig - > { $ node } - > [ 0 ] - > { serialport } + 1 ;
$ append . = " -S $comport tty2port=com$comport" ;
}
if ( defined $ serialconfig - > { $ node } - > [ 0 ] - > { serialspeed } ) {
$ append . = " -s " . $ serialconfig - > { $ node } - > [ 0 ] - > { serialspeed } . " com" . $ comport . "_baud=" . $ serialconfig - > { $ node } - > [ 0 ] - > { serialspeed } ;
}
}
2012-05-04 21:02:45 +00:00
if ( defined $ bpadds - > { $ node } - > [ 0 ] - > { addkcmdline } ) {
$ append . = " " . $ bpadds - > { $ node } - > [ 0 ] - > { addkcmdline } ;
$ append =~ s/#NODEATTRIB:([^:#]+):([^:#]+)#/$nodesubdata{$1}->{$node}->[0]->{$2}/eg ;
}
2011-04-22 18:36:47 +00:00
}
2012-04-17 14:09:49 +00:00
if ( $ shortappend ) { #esxi5 user desiring to put everything in one boot config file. . .
2012-04-26 19:50:05 +00:00
merge_esxi5_append ( "$tftpdir/$tp/boot.cfg.$bootmode.tmpl" , $ append , "$tftpdir/$tp/boot.cfg.$bootmode.$node" ) ;
2012-04-17 14:09:49 +00:00
$ append = $ shortappend ;
}
2011-04-22 18:36:47 +00:00
$ output_handler - > ( { node = > [ { name = > [ $ node ] , '_addkcmdlinehandled' = > [ 1 ] } ] } ) ;
2009-08-18 18:59:20 +00:00
2009-08-07 17:06:22 +00:00
2009-06-22 16:00:28 +00:00
$ bptab - > setNodeAttribs (
$ node ,
{
kernel = > $ kernel ,
initrd = > "" ,
kcmdline = > $ append
}
) ;
} # end of node loop
}
# this is where we extract the netboot images out of the copied ISO image
sub cpNetbootImages {
my $ osver = shift ;
my $ srcDir = shift ;
my $ destDir = shift ;
2009-09-28 21:26:01 +00:00
my $ overridedir = shift ;
my $ modulestoadd = shift ;
2011-10-04 20:02:36 +00:00
my % parmargs = @ _ ;
my $ bootmode = "stateless" ;
if ( $ parmargs { bootmode } ) { $ bootmode = $ parmargs { bootmode } }
2009-06-22 16:00:28 +00:00
my $ tmpDir = "/tmp/xcat.$$" ;
if ( $ osver =~ /esxi4/ ) {
# we don't want to go through this all the time, so if its already
# there we're not going to extract:
2009-09-28 21:26:01 +00:00
unless ( - r "$destDir/vmk.gz"
2009-06-22 16:00:28 +00:00
and - r "$destDir/vmkboot.gz"
and - r "$destDir/sys.vgz"
2010-05-17 14:58:30 +00:00
and - r "$destDir/cim.vgz"
and - r "$destDir/cimstg.tgz"
2009-06-22 16:00:28 +00:00
) {
2009-09-28 21:26:01 +00:00
if ( - r "$srcDir/image.tgz" ) { #it still may work without image.tgz if profile customization has everything replaced
mkdir ( $ tmpDir ) ;
chdir ( $ tmpDir ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "extracting netboot files from OS image. This may take about a minute or two...hopefully you have ~1GB free in your /tmp dir\n" , $ output_handler ) ;
2010-11-15 19:16:33 +00:00
my $ cmd = "tar zxf $srcDir/image.tgz" ;
2009-09-28 21:26:01 +00:00
print "\n$cmd\n" ;
2010-11-15 19:16:33 +00:00
if ( system ( $ cmd ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to extract $srcDir/image.tgz\n" ] , $ output_handler ) ;
2009-09-28 21:26:01 +00:00
}
# this has the big image and may take a while.
# this should now create:
# /tmp/xcat.1234/usr/lib/vmware/installer/VMware-VMvisor-big-164009-x86_64.dd.bz2 or some other version. We need to extract partition 5 from it.
system ( "bunzip2 $tmpDir/usr/lib/vmware/installer/*bz2" ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "finished extracting, now copying files...\n" , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
2009-09-28 21:26:01 +00:00
# now we need to get partition 5 which has the installation goods in it.
my $ scmd = "fdisk -lu $tmpDir/usr/lib/vmware/installer/*dd 2>&1 | grep dd5 | awk '{print \$2}'" ;
print "running: $scmd\n" ;
my $ sector = `$scmd` ;
chomp ( $ sector ) ;
my $ offset = $ sector * 512 ;
mkdir "/mnt/xcat" ;
my $ mntcmd = "mount $tmpDir/usr/lib/vmware/installer/*dd /mnt/xcat -o loop,offset=$offset" ;
print "$mntcmd\n" ;
if ( system ( $ mntcmd ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "unable to mount partition 5 of the ESX netboot image to /mnt/xcat" ] , $ output_handler ) ;
2009-09-28 21:26:01 +00:00
return ;
}
2009-11-19 22:46:26 +00:00
if ( ! - d $ destDir ) {
2011-02-16 20:56:38 +00:00
if ( - e $ destDir ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents to $destDir, it exists but is not currently a directory" ] , $ output_handler ) ;
return ;
}
2009-11-19 22:46:26 +00:00
mkpath ( $ destDir ) ;
}
2009-09-28 21:26:01 +00:00
if ( system ( "cp /mnt/xcat/* $destDir/" ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents to $destDir" ] , $ output_handler ) ;
2009-09-28 21:26:01 +00:00
system ( "umount /mnt/xcat" ) ;
return ;
}
chdir ( "/tmp" ) ;
system ( "umount /mnt/xcat" ) ;
print "tempDir: $tmpDir\n" ;
system ( "rm -rf $tmpDir" ) ;
2010-06-15 20:45:32 +00:00
} elsif ( - r "$srcDir/cim.vgz" and - r "$srcDir/vmkernel.gz" and - r "$srcDir/vmkboot.gz" and - r "$srcDir/sys.vgz" ) {
use File::Basename ;
if ( ! - d $ destDir ) {
mkpath ( $ destDir ) ;
}
#In ESXI 4.1, the above breaks, this seems to work, much simpler too
foreach ( "$srcDir/cim.vgz" , "$srcDir/vmkernel.gz" , "$srcDir/vmkboot.gz" , "$srcDir/sys.vgz" , "$srcDir/sys.vgz" ) {
my $ mod = scalar fileparse ( $ _ ) ;
if ( $ mod =~ /vmkernel.gz/ ) {
2010-08-06 15:29:07 +00:00
copy ( $ _ , "$destDir/vmk.gz" ) or xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $_ to $destDir/$mod" ] , $ output_handler ) ;
2010-06-15 20:45:32 +00:00
} else {
2010-08-06 15:29:07 +00:00
copy ( $ _ , "$destDir/$mod" ) or xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $_ to $destDir/$mod" ] , $ output_handler ) ;
2010-06-15 20:45:32 +00:00
}
}
2009-09-28 21:26:01 +00:00
}
}
2010-11-10 16:21:58 +00:00
#this is the override directory if there is one, otherwise it's actually the default dir
if ( - d $ overridedir ) {
mkdir ( $ overridedir ) ;
2009-09-28 21:26:01 +00:00
}
2010-11-10 16:21:58 +00:00
#Copy over all modules
use File::Basename ;
foreach ( glob "$overridedir/*" ) {
my $ mod = scalar fileparse ( $ _ ) ;
if ( $ mod =~ /gz\z/ and $ mod !~ /pkgdb.tgz/ and $ mod !~ /vmkernel.gz/ ) {
$ modulestoadd - > { $ mod } = 1 ;
copy ( $ _ , "$destDir/$mod" ) or xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $overridedir to $destDir" ] , $ output_handler ) ;
} elsif ( $ mod =~ /vmkernel.gz/ ) {
$ modulestoadd - > { "vmk.gz" } = 1 ;
copy ( $ _ , "$destDir/vmk.gz" ) or xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $overridedir to $destDir" ] , $ output_handler ) ;
}
}
2009-09-28 21:26:01 +00:00
2011-04-22 15:03:25 +00:00
} elsif ( $ osver =~ /esxi5/ ) { #we need boot.cfg.stateles
2012-05-04 20:45:44 +00:00
my @ filestocopy = ( "boot.cfg.$bootmode" ) ;
if ( - r "$srcDir/boot.cfg.$bootmode" or - r "$overridedir/boot.cfg.$bootmode" ) {
@ filestocopy = ( "boot.cfg.$bootmode" ) ;
} elsif ( - r "$srcDir/boot.cfg.$bootmode.tmpl" or - r "$overridedir/boot.cfg.$bootmode.tmpl" ) {
@ filestocopy = ( "boot.cfg.$bootmode.tmpl" ) ;
} else {
2011-10-04 20:02:36 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "$srcDir is missing boot.cfg.$bootmode file required for $bootmode boot" ] , $ output_handler ) ;
2011-04-22 15:03:25 +00:00
return ;
}
my $ statelesscfg ;
2012-04-26 19:50:05 +00:00
if ( - r "$overridedir/boot.cfg.$bootmode.tmpl" ) {
open ( $ statelesscfg , "<" , "$overridedir/boot.cfg.$bootmode.tmpl" ) ;
@ filestocopy = ( "boot.cfg.$bootmode.tmpl" ) ;
} elsif ( - r "$overridedir/boot.cfg.$bootmode" ) {
2011-10-04 20:02:36 +00:00
open ( $ statelesscfg , "<" , "$overridedir/boot.cfg.$bootmode" ) ;
2012-04-26 19:50:05 +00:00
} elsif ( - r "$srcDir/boot.cfg.$bootmode.tmpl" ) {
@ filestocopy = ( "boot.cfg.$bootmode.tmpl" ) ;
open ( $ statelesscfg , "<" , "$srcDir/boot.cfg.$bootmode.tmpl" ) ;
2011-10-04 20:02:36 +00:00
} elsif ( - r "$srcDir/boot.cfg.$bootmode" ) {
open ( $ statelesscfg , "<" , "$srcDir/boot.cfg.$bootmode" ) ;
2011-04-22 18:36:47 +00:00
} else {
2011-10-04 20:02:36 +00:00
die "boot.cfg.$bootmode was missing from $srcDir???" ;
2011-04-22 15:03:25 +00:00
}
my @ statelesscfg = <$statelesscfg> ;
2011-04-22 18:36:47 +00:00
2011-04-22 15:03:25 +00:00
foreach ( @ statelesscfg ) { #search for files specified by the boot cfg and pull them in
if ( /^kernel=(.*)/ ) {
push @ filestocopy , $ 1 ;
} elsif ( /^modules=(.*)/ ) {
2011-04-22 17:38:22 +00:00
foreach ( split / --- / , $ 1 ) {
2012-06-23 13:57:52 +00:00
s/^\s*// ;
s/\s.*// ;
2011-04-22 15:03:25 +00:00
push @ filestocopy , $ _ ;
}
}
2012-02-22 21:23:12 +00:00
}
2011-04-22 15:03:25 +00:00
#now that we have a list, do the copy (mostly redundant, but PXE needs them tftp accessible)
foreach ( @ filestocopy ) {
2011-04-22 17:47:51 +00:00
chomp ;
s/ *\z// ;
2011-04-22 15:03:25 +00:00
my $ mod = scalar fileparse ( $ _ ) ;
if ( - r "$overridedir/$mod" ) {
2011-04-22 15:28:02 +00:00
copyIfNewer ( "$overridedir/$mod" , "$destDir/$mod" ) or xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $overridedir/$mod to $destDir/$mod, $!" ] , $ output_handler ) ;
2011-04-22 17:47:51 +00:00
} elsif ( - r "$srcDir/$mod" ) {
2011-04-22 15:28:02 +00:00
copyIfNewer ( $ srcDir . "/" . $ mod , "$destDir/$mod" ) or xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $srcDir/$mod to $destDir/$mod, $!" ] , $ output_handler ) ;
} elsif ( $ mod ne "xcatmod.tgz" ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $srcDir/$mod to $destDir/$mod, $srcDir/$mod not found" ] , $ output_handler ) ;
2011-04-22 15:03:25 +00:00
}
}
} else {
2011-04-22 15:28:02 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VMware $osver is not supported for netboot" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
}
}
2011-04-22 15:28:02 +00:00
sub copyIfNewer {
my $ source = shift ;
my $ dest = shift ;
2012-06-22 15:16:34 +00:00
if ( ! - e $ dest or - C $ source < - C $ dest ) {
2011-04-22 15:28:02 +00:00
return copy ( $ source , $ dest ) ;
}
return 1 ;
}
2009-06-22 16:00:28 +00:00
2009-11-23 23:11:16 +00:00
# compares nfs target described by parameters to every share mounted by target hypervisor
# returns 1 if matching datastore is present and 0 otherwise
sub match_nfs_datastore {
my ( $ host , $ path , $ hypconn ) = @ _ ;
die "esx plugin bug: no host provided for match_datastore" unless defined $ host ;
die "esx plugin bug: no path provided for match_datastore" unless defined $ path ;
my @ ip ;
eval {
if ( $ host =~ m/\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\// ) {
use Socket ;
@ ip = ( $ host ) ;
$ host = gethostbyaddr ( inet_aton ( $ host , AF_INET ) , AF_INET ) ;
} else {
use Socket ;
( undef , undef , undef , undef , @ ip ) = gethostbyname ( $ host ) ;
my @ ip_ntoa = ( ) ;
foreach ( @ ip ) {
push ( @ ip_ntoa , inet_ntoa ( $ _ ) ) ;
}
@ ip = @ ip_ntoa ;
}
} ;
if ( $@ ) {
die "error while resolving datastore host: $@\n" ;
}
my % viewcrit = (
view_type = > 'HostSystem' ,
properties = > [ 'config.fileSystemVolume' ] ,
) ;
my $ dsviews = $ hypconn - > find_entity_views ( % viewcrit ) ;
foreach ( @$ dsviews ) {
foreach my $ mount ( @ { $ _ - > get_property ( 'config.fileSystemVolume.mountInfo' ) } ) {
next unless $ mount - > { 'volume' } { 'type' } eq 'NFS' ;
my $ hostMatch = 0 ;
HOSTMATCH: foreach ( @ ip , $ host ) {
next HOSTMATCH unless $ mount - > { 'volume' } { 'remoteHost' } eq $ _ ;
$ hostMatch = 1 ;
last HOSTMATCH ;
}
next unless $ hostMatch ;
next unless $ mount - > { 'volume' } { 'remotePath' } eq $ path ;
return 1 ;
}
}
return 0 ;
}
2009-06-22 16:00:28 +00:00
1 ;
2009-07-13 18:04:39 +00:00
# vi: set ts=4 sw=4 filetype=perl: