2009-06-22 16:00:28 +00:00
package xCAT_plugin::esx ;
use strict ;
use warnings ;
use xCAT::Table ;
use xCAT::Utils ;
use Time::HiRes qw ( sleep ) ;
use xCAT::MsgUtils ;
2009-09-19 17:03:14 +00:00
use xCAT::SvrUtils ;
2009-10-21 16:39:17 +00:00
use xCAT::NodeRange ;
2009-06-22 16:00:28 +00:00
use xCAT::Common ;
use xCAT::VMCommon ;
use POSIX "WNOHANG" ;
use Getopt::Long ;
use Thread qw( yield ) ;
use POSIX qw( WNOHANG nice ) ;
2009-11-19 22:46:26 +00:00
use File::Path qw/mkpath rmtree/ ;
2009-06-22 16:00:28 +00:00
use File::Temp qw/tempdir/ ;
use File::Copy ;
use IO::Socket ; #Need name resolution
2009-09-18 19:53:48 +00:00
#use Data::Dumper;
2009-06-22 16:00:28 +00:00
Getopt::Long:: Configure ( "bundling" ) ;
Getopt::Long:: Configure ( "pass_through" ) ;
my @ cpiopid ;
our @ ISA = 'xCAT::Common' ;
#in xCAT, the lifetime of a process ends on every request
#therefore, the lifetime of assignments to these glabals as architected
#is to be cleared on every request
#my %esx_comm_pids;
my % hyphash ; #A data structure to hold hypervisor-wide variables (i.e. the current resource pool, virtual machine folder, connection object
my % vcenterhash ; #A data structure to reflect the state of vcenter connectivity to hypervisors
2009-10-01 14:35:41 +00:00
my % hypready ; #A structure for hypervisor readiness to be tracked before proceeding to normal operations
2009-06-22 16:00:28 +00:00
my % running_tasks ; #A struct to track this processes
my $ output_handler ; #Pointer to the function to drive results to client
my $ executerequest ;
2009-12-09 21:05:35 +00:00
my $ usehostnamesforvcenter ;
2009-06-22 16:00:28 +00:00
my % tablecfg ; #to hold the tables
my $ currkey ;
2010-09-01 19:43:09 +00:00
my $ requester ;
2009-10-21 16:39:17 +00:00
my $ viavcenter ;
2010-01-14 20:32:51 +00:00
my $ viavcenterbyhyp ;
2009-11-12 22:06:04 +00:00
my $ vmwaresdkdetect = eval {
require VMware::VIRuntime ;
VMware::VIRuntime - > import ( ) ;
1 ;
} ;
2009-06-22 16:00:28 +00:00
my % guestidmap = (
2010-08-20 13:32:35 +00:00
"rhel.6.*" = > "rhel6_" ,
2009-06-22 16:00:28 +00:00
"rhel.5.*" = > "rhel5_" ,
"rhel4.*" = > "rhel4_" ,
"centos5.*" = > "rhel5_" ,
"centos4.*" = > "rhel4_" ,
"sles11.*" = > "sles11_" ,
"sles10.*" = > "sles10_" ,
"win2k8" = > "winLonghorn" ,
"win2k8r2" = > "windows7Server" ,
2010-06-21 17:33:19 +00:00
"win7" = > "windows7_" ,
2009-07-28 21:09:59 +00:00
"win2k3" = > "winNetStandard" ,
"imagex" = > "winNetStandard" ,
2009-09-14 20:10:41 +00:00
"boottarget" = > "otherLinux"
2009-06-22 16:00:28 +00:00
#otherGuest, otherGuest64, otherLinuxGuest, otherLinux64Guest
) ;
sub handled_commands {
return {
copycd = > 'esx' ,
mknetboot = > "nodetype:os=(esxi.*)" ,
rpower = > 'nodehm:power,mgt' ,
rsetboot = > 'nodehm:power,mgt' ,
rmigrate = > 'nodehm:power,mgt' ,
mkvm = > 'nodehm:mgt' ,
rmvm = > 'nodehm:mgt' ,
2010-08-31 20:53:55 +00:00
clonevm = > 'nodehm:mgt' ,
2010-06-04 19:15:18 +00:00
rinv = > 'nodehm:mgt' ,
chvm = > 'nodehm:mgt' ,
2010-08-25 17:49:54 +00:00
lsvm = > [ 'hypervisor:type' , 'nodetype:os=(esx.*)' ] ,
rmhypervisor = > [ 'hypervisor:type' , 'nodetype:os=(esx.*)' ] ,
2009-10-02 15:20:52 +00:00
#lsvm => 'nodehm:mgt', not really supported yet
2009-06-22 16:00:28 +00:00
} ;
}
sub preprocess_request {
my $ request = shift ;
my $ callback = shift ;
my $ username = 'root' ;
my $ password = '' ;
my $ vusername = "Administrator" ;
my $ vpassword = "" ;
2009-10-06 20:47:59 +00:00
unless ( $ request and $ request - > { command } and $ request - > { command } - > [ 0 ] ) { return ; }
2009-06-22 16:00:28 +00:00
if ( $ request - > { command } - > [ 0 ] eq 'copycd' )
{ #don't farm out copycd
return [ $ request ] ;
} elsif ( $ request - > { command } - > [ 0 ] eq 'mknetboot' ) {
return [ $ request ] ;
}
2009-09-14 13:04:01 +00:00
xCAT::Common:: usage_noderange ( $ request , $ callback ) ;
2009-06-22 16:00:28 +00:00
2010-03-17 15:42:04 +00:00
if ( $ request - > { _xcatpreprocessed } and $ request - > { _xcatpreprocessed } - > [ 0 ] == 1 ) { return [ $ request ] ; }
2009-07-15 14:50:04 +00:00
# exit if preprocesses
2009-06-22 16:00:28 +00:00
my @ requests ;
my $ noderange = $ request - > { node } ; # array ref
my $ command = $ request - > { command } - > [ 0 ] ;
my $ extraargs = $ request - > { arg } ;
my @ exargs = ( $ request - > { arg } ) ;
my % hyp_hash = ( ) ;
# Get nodes from mp table and assign nodes to mp hash.
my $ passtab = xCAT::Table - > new ( 'passwd' ) ;
my $ tmp ;
if ( $ passtab ) {
( $ tmp ) = $ passtab - > getAttribs ( { 'key' = > 'vmware' } , 'username' , 'password' ) ;
if ( defined ( $ tmp ) ) {
$ username = $ tmp - > { username } ;
$ password = $ tmp - > { password } ;
}
( $ tmp ) = $ passtab - > getAttribs ( { 'key' = > 'vcenter' } , 'username' , 'password' ) ;
if ( defined ( $ tmp ) ) {
$ vusername = $ tmp - > { username } ;
$ vpassword = $ tmp - > { password } ;
}
}
my $ vmtab = xCAT::Table - > new ( "vm" ) ;
unless ( $ vmtab ) {
$ callback - > ( { data = > [ "Cannot open vm table" ] } ) ;
$ request = { } ;
return ;
}
my $ vmtabhash = $ vmtab - > getNodesAttribs ( $ noderange , [ 'host' ] ) ;
foreach my $ node ( @$ noderange ) {
2010-06-08 16:54:13 +00:00
if ( $ command eq "rmhypervisor" or $ command eq 'lsvm' ) {
2010-02-22 20:40:42 +00:00
$ hyp_hash { $ node } { nodes } = [ $ node ] ;
} else {
2009-06-22 16:00:28 +00:00
my $ ent = $ vmtabhash - > { $ node } - > [ 0 ] ;
if ( defined ( $ ent - > { host } ) ) {
push @ { $ hyp_hash { $ ent - > { host } } { nodes } } , $ node ;
} else {
$ callback - > ( { data = > [ "no host defined for guest $node" ] } ) ;
$ request = { } ;
return ;
}
if ( defined ( $ ent - > { id } ) ) {
push @ { $ hyp_hash { $ ent - > { host } } { ids } } , $ ent - > { id } ;
} else {
push @ { $ hyp_hash { $ ent - > { host } } { ids } } , "" ;
}
2010-02-22 20:40:42 +00:00
}
2009-06-22 16:00:28 +00:00
}
# find service nodes for the MMs
# build an individual request for each service node
my $ service = "xcat" ;
my @ hyps = keys ( % hyp_hash ) ;
2010-06-12 01:57:12 +00:00
if ( $ command eq 'rmigrate' and ( scalar @ { $ extraargs } >= 1 ) ) {
@ ARGV = @ { $ extraargs } ;
my $ offline ;
my $ junk ;
GetOptions (
"f" = > \ $ offline ,
"s=s" = > \ $ junk #wo don't care about it, but suck up nfs:// targets so they don't get added
) ;
my $ dsthyp = $ ARGV [ 0 ] ;
if ( $ dsthyp ) {
push @ hyps , $ dsthyp ;
}
2009-06-22 16:00:28 +00:00
}
#TODO: per hypervisor table password lookup
my $ sn = xCAT::Utils - > get_ServiceNode ( \ @ hyps , $ service , "MN" ) ;
2009-07-13 18:04:39 +00:00
#vmtabhash was from when we had vm.host do double duty for hypervisor data
#$vmtabhash = $vmtab->getNodesAttribs(\@hyps,['host']);
#We now use hypervisor fields to be unambiguous
my $ hyptab = xCAT::Table - > new ( 'hypervisor' ) ;
2009-07-13 19:48:34 +00:00
my $ hyptabhash = { } ;
if ( $ hyptab ) {
2009-10-01 14:35:41 +00:00
$ hyptabhash = $ hyptab - > getNodesAttribs ( \ @ hyps , [ 'mgr' ] ) ;
2009-07-13 19:48:34 +00:00
}
2009-07-13 18:04:39 +00:00
2009-06-22 16:00:28 +00:00
# build each request for each service node
foreach my $ snkey ( keys %$ sn ) {
my $ reqcopy = { %$ request } ;
$ reqcopy - > { '_xcatdest' } = $ snkey ;
2009-07-15 14:50:04 +00:00
$ reqcopy - > { _xcatpreprocessed } - > [ 0 ] = 1 ;
2009-06-22 16:00:28 +00:00
my $ hyps1 = $ sn - > { $ snkey } ;
my @ moreinfo = ( ) ;
my @ nodes = ( ) ;
foreach ( @$ hyps1 ) { #This preserves the constructed data to avoid redundant table lookup
my $ cfgdata ;
if ( $ hyp_hash { $ _ } { nodes } ) {
push @ nodes , @ { $ hyp_hash { $ _ } { nodes } } ;
$ cfgdata = "[$_][" . join ( ',' , @ { $ hyp_hash { $ _ } { nodes } } ) . "][$username][$password][$vusername][$vpassword]" ; #TODO: not use vm.host?
} else {
$ cfgdata = "[$_][][$username][$password][$vusername][$vpassword]" ; #TODO: not use vm.host?
}
2009-07-13 18:04:39 +00:00
if ( defined $ hyptabhash - > { $ _ } - > [ 0 ] - > { mgr } ) {
$ cfgdata . = "[" . $ hyptabhash - > { $ _ } - > [ 0 ] - > { mgr } . "]" ;
2009-06-22 16:00:28 +00:00
} else {
$ cfgdata . = "[]" ;
}
push @ moreinfo , $ cfgdata ; #"[$_][".join(',',@{$hyp_hash{$_}{nodes}})."][$username][$password]";
}
$ reqcopy - > { node } = \ @ nodes ;
#print "nodes=@nodes\n";
$ reqcopy - > { moreinfo } = \ @ moreinfo ;
push @ requests , $ reqcopy ;
}
return \ @ requests ;
}
sub process_request {
#$SIG{INT} = $SIG{TERM} = sub{
# foreach (keys %esx_comm_pids){
# kill 2,$_;
# }
# exit 0;
#};
my $ request = shift ;
$ output_handler = shift ;
$ executerequest = shift ;
2010-09-01 19:43:09 +00:00
if ( $ request - > { _xcat_authname } - > [ 0 ] ) {
$ requester = $ request - > { _xcat_authname } - > [ 0 ] ;
}
2009-06-22 16:00:28 +00:00
my $ level = shift ;
my $ distname = undef ;
my $ arch = undef ;
my $ path = undef ;
my $ command = $ request - > { command } - > [ 0 ] ;
#The first segment is fulfilling the role of this plugin as
#a hypervisor provisioning plugin (akin to anaconda, windows, sles plugins)
if ( $ command eq 'copycd' ) {
return copycd ( $ request , $ executerequest ) ;
} elsif ( $ command eq 'mknetboot' ) {
return mknetboot ( $ request , $ executerequest ) ;
}
#From here on out, code for managing guests under VMware
#Detect whether or not the VMware SDK is available on this specific system
2009-11-12 22:06:04 +00:00
unless ( $ vmwaresdkdetect ) {
$ vmwaresdkdetect = eval {
require VMware::VIRuntime ;
VMware::VIRuntime - > import ( ) ;
1 ;
} ;
}
2009-06-22 16:00:28 +00:00
unless ( $ vmwaresdkdetect ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VMWare SDK required for operation, but not installed" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
my $ moreinfo ;
my $ noderange = $ request - > { node } ;
xCAT::VMCommon:: grab_table_data ( $ noderange , \ % tablecfg , $ output_handler ) ;
my @ exargs ;
unless ( $ command ) {
return ; # Empty request
}
if ( ref ( $ request - > { arg } ) ) {
@ exargs = @ { $ request - > { arg } } ;
} else {
@ exargs = ( $ request - > { arg } ) ;
}
2009-12-09 21:05:35 +00:00
my $ sitetab = xCAT::Table - > new ( 'site' ) ;
if ( $ sitetab ) {
( my $ ref ) = $ sitetab - > getAttribs ( { key = > 'usehostnamesforvcenter' } , 'value' ) ;
if ( $ ref and $ ref - > { value } ) {
$ usehostnamesforvcenter = $ ref - > { value } ;
}
}
2009-06-22 16:00:28 +00:00
if ( $ request - > { moreinfo } ) { $ moreinfo = $ request - > { moreinfo } ; }
else { $ moreinfo = build_more_info ( $ noderange , $ output_handler ) ; }
foreach my $ info ( @$ moreinfo ) {
$ info =~ /^\[(.*?)\]\[(.*?)\]\[(.*?)\]\[(.*?)\]\[(.*?)\]\[(.*?)\]\[(.*?)\]/ ;
my $ hyp = $ 1 ;
my @ nodes = split ( ',' , $ 2 ) ;
my $ username = $ 3 ;
my $ password = $ 4 ;
$ hyphash { $ hyp } - > { vcenter } - > { name } = $ 7 ;
$ hyphash { $ hyp } - > { vcenter } - > { username } = $ 5 ;
$ hyphash { $ hyp } - > { vcenter } - > { password } = $ 6 ;
$ hyphash { $ hyp } - > { username } = $ username ; # $nodeid;
$ hyphash { $ hyp } - > { password } = $ password ; # $nodeid;
unless ( $ hyphash { $ hyp } - > { vcenter } - > { password } ) {
$ hyphash { $ hyp } - > { vcenter } - > { password } = "" ;
}
my $ ent ;
for ( my $ i = 0 ; $ i < @ nodes ; $ i + + ) {
2010-06-12 01:57:12 +00:00
if ( $ command eq 'rmigrate' and grep /-f/ , @ exargs ) { #offline migration,
2010-06-14 17:40:06 +00:00
$ hyphash { $ hyp } - > { offline } = 1 ; #if it is migrate and it has nodes, it is a source hypervisor apt to be offline
#this will hint to relevant code to operate under the assumption of a
#downed hypervisor source
#note this will make dangerous assumptions, it will make a very minimal attempt
#to operate normally, but really should only be called if the source is down and
#fenced (i.e. storage, network, or turned off and stateless
2010-06-12 01:57:12 +00:00
}
2009-06-22 16:00:28 +00:00
my $ node = $ nodes [ $ i ] ;
#my $nodeid = $ids[$i];
$ hyphash { $ hyp } - > { nodes } - > { $ node } = 1 ; # $nodeid;
}
}
2009-07-16 20:43:36 +00:00
my $ hyptab = xCAT::Table - > new ( 'hypervisor' , create = > 0 ) ;
if ( $ hyptab ) {
my @ hyps = keys % hyphash ;
2009-10-01 14:35:41 +00:00
$ tablecfg { hypervisor } = $ hyptab - > getNodesAttribs ( \ @ hyps , [ 'mgr' , 'netmap' , 'defaultnet' , 'cluster' , 'preferdirect' ] ) ;
2009-07-16 20:43:36 +00:00
}
2009-12-09 21:05:35 +00:00
my $ hoststab = xCAT::Table - > new ( 'hosts' , create = > 0 ) ;
if ( $ hoststab ) {
my @ hyps = keys % hyphash ;
$ tablecfg { hosts } = $ hoststab - > getNodesAttribs ( \ @ hyps , [ 'hostnames' ] ) ;
}
2009-06-22 16:00:28 +00:00
#my $children = 0;
#my $vmmaxp = 84;
#$SIG{CHLD} = sub { my $cpid; while ($cpid = waitpid(-1, WNOHANG) > 0) { delete $esx_comm_pids{$cpid}; $children--; } };
2009-10-21 16:39:17 +00:00
$ viavcenter = 0 ;
2010-02-22 20:40:42 +00:00
if ( $ command eq 'rmigrate' or $ command eq 'rmhypervisor' ) { #Only use vcenter when required, fewer prereqs
2009-06-22 16:00:28 +00:00
$ viavcenter = 1 ;
}
my $ keytab = xCAT::Table - > new ( 'prodkey' ) ;
if ( $ keytab ) {
my @ hypes = keys % hyphash ;
$ tablecfg { prodkey } = $ keytab - > getNodesAttribs ( \ @ hypes , [ qw/product key/ ] ) ;
}
2010-08-09 18:43:26 +00:00
my $ hyp ;
my % needvcentervalidation ;
foreach $ hyp ( sort ( keys % hyphash ) ) {
2009-06-22 16:00:28 +00:00
#if($pid == 0){
2009-10-01 21:01:03 +00:00
if ( $ viavcenter or ( defined $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { mgr } and not $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { preferdirect } ) ) {
2010-01-14 20:32:51 +00:00
$ viavcenterbyhyp - > { $ hyp } = 1 ;
2009-10-01 14:35:41 +00:00
$ hypready { $ hyp } = 0 ; #This hypervisor requires a flag be set to signify vCenter sanenes before proceeding
2009-06-22 16:00:28 +00:00
my $ vcenter = $ hyphash { $ hyp } - > { vcenter } - > { name } ;
unless ( $ vcenterhash { $ vcenter } - > { conn } ) {
2010-01-14 20:32:51 +00:00
eval {
2009-06-22 16:00:28 +00:00
$ vcenterhash { $ vcenter } - > { conn } =
Vim - > new ( service_url = > "https://$vcenter/sdk" ) ;
$ vcenterhash { $ vcenter } - > { conn } - > login (
user_name = > $ hyphash { $ hyp } - > { vcenter } - > { username } ,
password = > $ hyphash { $ hyp } - > { vcenter } - > { password }
) ;
2010-01-14 20:32:51 +00:00
} ;
if ( $@ ) {
$ vcenterhash { $ vcenter } - > { conn } = undef ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to reach $vcenter vCenter server to manage $hyp: $@" ] , $ output_handler ) ;
2010-01-14 20:32:51 +00:00
next ;
}
2009-06-22 16:00:28 +00:00
}
$ hyphash { $ hyp } - > { conn } = $ vcenterhash { $ hyphash { $ hyp } - > { vcenter } - > { name } } - > { conn } ;
$ hyphash { $ hyp } - > { vcenter } - > { conn } = $ vcenterhash { $ hyphash { $ hyp } - > { vcenter } - > { name } } - > { conn } ;
2010-08-09 18:43:26 +00:00
$ needvcentervalidation { $ hyp } = $ vcenter ;
$ vcenterhash { $ vcenter } - > { allhyps } - > { $ hyp } = 1 ;
2009-06-22 16:00:28 +00:00
} else {
2010-01-14 20:32:51 +00:00
eval {
$ hyphash { $ hyp } - > { conn } = Vim - > new ( service_url = > "https://$hyp/sdk" ) ;
$ hyphash { $ hyp } - > { conn } - > login ( user_name = > $ hyphash { $ hyp } - > { username } , password = > $ hyphash { $ hyp } - > { password } ) ;
} ;
if ( $@ ) {
$ hyphash { $ hyp } - > { conn } = undef ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to reach $hyp to perform operation" ] , $ output_handler ) ;
2010-01-14 20:32:51 +00:00
$ hypready { $ hyp } = - 1 ;
next ;
}
validate_licenses ( $ hyp ) ;
2009-06-22 16:00:28 +00:00
}
#}else{
# $esx_comm_pids{$pid} = 1;
#}
}
2010-08-09 18:43:26 +00:00
foreach $ hyp ( keys % needvcentervalidation ) {
my $ vcenter = $ needvcentervalidation { $ hyp } ;
if ( not defined $ vcenterhash { $ vcenter } - > { hostviews } ) {
populate_vcenter_hostviews ( $ vcenter ) ;
}
if ( validate_vcenter_prereqs ( $ hyp , \ & declare_ready , {
hyp = > $ hyp ,
vcenter = > $ vcenter
} ) eq "failed" ) {
$ hypready { $ hyp } = - 1 ;
}
}
2009-10-01 14:35:41 +00:00
while ( grep { $ _ == 0 } values % hypready ) {
wait_for_tasks ( ) ;
sleep ( 1 ) ; #We'll check back in every second. Unfortunately, we have to poll since we are in web service land
}
2010-06-12 01:57:12 +00:00
my @ badhypes ;
2009-10-01 14:35:41 +00:00
if ( grep { $ _ == - 1 } values % hypready ) {
foreach ( keys % hypready ) {
if ( $ hypready { $ _ } == - 1 ) {
2010-06-14 17:40:06 +00:00
push @ badhypes , $ _ ;
my @ relevant_nodes = sort ( keys % { $ hyphash { $ _ } - > { nodes } } ) ;
foreach ( @ relevant_nodes ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": hypervisor unreachable" ] , $ output_handler , $ _ ) ;
2010-06-14 17:40:06 +00:00
}
delete $ hyphash { $ _ } ;
2009-10-01 14:35:41 +00:00
}
}
2010-06-14 17:40:06 +00:00
if ( @ badhypes ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": The following hypervisors failed to become ready for the operation: " . join ( ',' , @ badhypes ) ] , $ output_handler ) ;
2010-06-14 17:40:06 +00:00
}
2009-10-01 14:35:41 +00:00
}
2009-06-22 16:00:28 +00:00
do_cmd ( $ command , @ exargs ) ;
2010-06-12 01:57:12 +00:00
foreach ( @ badhypes ) { delete $ hyphash { $ _ } ; }
foreach my $ hyp ( sort ( keys % hyphash ) ) {
2010-06-14 17:40:06 +00:00
$ hyphash { $ hyp } - > { conn } - > logout ( ) ;
2009-11-12 19:48:20 +00:00
}
2009-06-22 16:00:28 +00:00
}
sub validate_licenses {
my $ hyp = shift ;
my $ conn = $ hyphash { $ hyp } - > { conn } ;
unless ( $ tablecfg { prodkey } - > { $ hyp } ) { #if no license specified, no-op
return ;
}
my $ hv = get_hostview ( hypname = > $ hyp , conn = > $ conn , properties = > [ 'configManager' , 'name' ] ) ;
my $ lm = $ conn - > get_view ( mo_ref = > $ hv - > configManager - > licenseManager ) ;
my @ licenses ;
foreach ( @ { $ lm - > licenses } ) {
push @ licenses , uc ( $ _ - > licenseKey ) ;
}
my @ newlicenses ;
foreach ( @ { $ tablecfg { prodkey } - > { $ hyp } } ) {
2010-07-09 06:24:18 +00:00
if ( defined ( $ _ - > { product } ) and $ _ - > { product } eq 'esx' ) {
2009-06-22 16:00:28 +00:00
my $ key = uc ( $ _ - > { key } ) ;
unless ( grep /$key/ , @ licenses ) {
push @ newlicenses , $ key ;
}
}
}
foreach ( @ newlicenses ) {
$ lm - > UpdateLicense ( licenseKey = > $ _ ) ;
}
}
sub do_cmd {
my $ command = shift ;
my @ exargs = @ _ ;
if ( $ command eq 'rpower' ) {
2010-08-09 21:02:17 +00:00
generic_vm_operation ( [ 'config.name' , 'config.guestId' , 'config.hardware.memoryMB' , 'config.hardware.numCPU' , 'runtime.powerState' , 'runtime.host' ] , \ & power , @ exargs ) ;
2009-06-22 16:00:28 +00:00
} elsif ( $ command eq 'rmvm' ) {
2009-10-21 16:39:17 +00:00
generic_vm_operation ( [ 'config.name' , 'runtime.powerState' , 'runtime.host' ] , \ & rmvm , @ exargs ) ;
2009-06-22 16:00:28 +00:00
} elsif ( $ command eq 'rsetboot' ) {
2009-10-21 16:39:17 +00:00
generic_vm_operation ( [ 'config.name' , 'runtime.host' ] , \ & setboot , @ exargs ) ;
2010-06-04 19:15:18 +00:00
} elsif ( $ command eq 'rinv' ) {
generic_vm_operation ( [ 'config.name' , 'config' , 'runtime.host' ] , \ & inv , @ exargs ) ;
2010-02-22 20:40:42 +00:00
} elsif ( $ command eq 'rmhypervisor' ) {
generic_hyp_operation ( \ & rmhypervisor , @ exargs ) ;
2010-06-08 16:54:13 +00:00
} elsif ( $ command eq 'lsvm' ) {
generic_hyp_operation ( \ & lsvm , @ exargs ) ;
2010-08-31 20:53:55 +00:00
} elsif ( $ command eq 'clonevm' ) {
generic_hyp_operation ( \ & clonevms , @ exargs ) ;
2009-06-22 16:00:28 +00:00
} elsif ( $ command eq 'mkvm' ) {
generic_hyp_operation ( \ & mkvms , @ exargs ) ;
2010-06-07 19:44:48 +00:00
} elsif ( $ command eq 'chvm' ) {
generic_vm_operation ( [ 'config.name' , 'config' , 'runtime.host' ] , \ & chvm , @ exargs ) ;
#generic_hyp_operation(\&chvm,@exargs);
2009-06-22 16:00:28 +00:00
} elsif ( $ command eq 'rmigrate' ) { #Technically, on a host view, but vcenter path is 'weirder'
generic_hyp_operation ( \ & migrate , @ exargs ) ;
}
wait_for_tasks ( ) ;
}
2010-06-07 19:44:48 +00:00
#inventory request for esx
2010-06-04 19:15:18 +00:00
sub inv {
2010-06-07 19:44:48 +00:00
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
if ( not defined $ args { vmview } ) { #attempt one refresh
$ args { vmview } = $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' , 'runtime.powerState' ] , filter = > { name = > $ node } ) ;
2010-07-29 20:03:05 +00:00
if ( not defined $ args { vmview } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM does not appear to exist" ] , $ output_handler , $ node ) ;
2010-07-29 20:03:05 +00:00
return ;
}
2010-06-07 19:44:48 +00:00
}
my $ vmview = $ args { vmview } ;
my $ uuid = $ vmview - > config - > uuid ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "UUID/GUID: $uuid" , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
my $ cpuCount = $ vmview - > config - > hardware - > numCPU ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "CPUs: $cpuCount" , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
my $ memory = $ vmview - > config - > hardware - > memoryMB ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "Memory: $memory MB" , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
my $ devices = $ vmview - > config - > hardware - > device ;
my $ label ;
my $ size ;
my $ fileName ;
my $ device ;
foreach $ device ( @$ devices ) {
$ label = $ device - > deviceInfo - > label ;
if ( $ label =~ /^Hard disk/ ) {
2010-08-26 19:23:25 +00:00
$ label . = " (d" . $ device - > controllerKey . ":" . $ device - > unitNumber . ")" ;
2010-06-07 19:44:48 +00:00
$ size = $ device - > capacityInKB / 1024 ;
$ fileName = $ device - > backing - > fileName ;
2010-08-26 19:23:25 +00:00
$ output_handler - > ( {
node = > {
name = > $ node ,
data = > {
desc = > $ label ,
contents = > "$size MB @ $fileName"
}
}
} ) ;
2010-06-08 20:15:34 +00:00
} elsif ( $ label =~ /Network/ ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "$label: " . $ device - > macAddress , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
}
}
2010-06-04 19:15:18 +00:00
}
2010-06-07 19:44:48 +00:00
#changes the memory, number of cpus and device size
2010-06-08 20:56:51 +00:00
#can also add,resize and remove disks
2010-06-04 19:15:18 +00:00
sub chvm {
2010-06-07 19:44:48 +00:00
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
if ( not defined $ args { vmview } ) { #attempt one refresh
$ args { vmview } = $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' ,
properties = > [ 'config.name' , 'runtime.powerState' ] ,
filter = > { name = > $ node } ) ;
2010-07-29 20:03:05 +00:00
if ( not defined $ args { vmview } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM does not appear to exist" ] , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
return ;
2010-07-29 20:03:05 +00:00
}
}
2010-06-07 19:44:48 +00:00
@ ARGV = @ { $ args { exargs } } ;
my @ deregister ;
my @ purge ;
my @ add ;
my % resize ;
my $ cpuCount ;
my $ memory ;
my $ vmview = $ args { vmview } ;
require Getopt::Long ;
$ SIG { __WARN__ } = sub {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not parse options, " . shift ( ) ] , $ output_handler ) ;
2010-06-07 19:44:48 +00:00
} ;
my $ rc = GetOptions (
"d=s" = > \ @ deregister ,
"p=s" = > \ @ purge ,
"a=s" = > \ @ add ,
"resize=s%" = > \ % resize ,
2010-06-08 20:38:44 +00:00
"cpus=s" = > \ $ cpuCount ,
2010-06-07 19:44:48 +00:00
"mem=s" = > \ $ memory
) ;
$ SIG { __WARN__ } = 'DEFAULT' ;
if ( @ ARGV ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "Invalid arguments: @ARGV" , $ output_handler ) ;
2010-06-07 19:44:48 +00:00
return ;
}
if ( ! $ rc ) {
return ;
}
#use Data::Dumper;
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("dereg = ".Dumper(\@deregister));
#xCAT::SvrUtils::sendmsg("purge = ".Dumper(\@purge));
#xCAT::SvrUtils::sendmsg("add = ".Dumper(\@add));
#xCAT::SvrUtils::sendmsg("resize = ".Dumper(\%resize));
#xCAT::SvrUtils::sendmsg("cpus = $cpuCount");
#xCAT::SvrUtils::sendmsg("mem = ".getUnits($memory,"K",1024));
2010-06-07 19:44:48 +00:00
2010-06-04 19:15:18 +00:00
2010-06-07 19:44:48 +00:00
my % conargs ;
if ( $ cpuCount ) {
2010-06-08 20:38:44 +00:00
if ( $ cpuCount =~ /^\+(\d+)/ ) {
$ cpuCount = $ vmview - > config - > hardware - > numCPU + $ 1 ;
} elsif ( $ cpuCount =~ /^-(\d+)/ ) {
$ cpuCount = $ vmview - > config - > hardware - > numCPU - $ 1 ;
}
2010-06-07 19:44:48 +00:00
$ conargs { numCPUs } = $ cpuCount ;
}
if ( $ memory ) {
2010-06-08 20:38:44 +00:00
if ( $ memory =~ /^\+(.+)/ ) {
$ conargs { memoryMB } = $ vmview - > config - > hardware - > memoryMB + getUnits ( $ 1 , "M" , 1048576 ) ;
} elsif ( $ memory =~ /^-(\d+)/ ) {
$ conargs { memoryMB } = $ vmview - > config - > hardware - > memoryMB - getUnits ( $ 1 , "M" , 1048576 ) ;
} else {
$ conargs { memoryMB } = getUnits ( $ memory , "M" , 1048576 ) ;
}
2010-06-07 19:44:48 +00:00
}
my $ disk ;
my $ devices = $ vmview - > config - > hardware - > device ;
my $ label ;
my $ device ;
my $ cmdLabel ;
my $ newSize ;
my @ devChanges ;
if ( @ deregister ) {
for $ disk ( @ deregister ) {
$ device = getDiskByLabel ( $ disk , $ devices ) ;
unless ( $ device ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Disk: $disk does not exist" ] , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
return ;
}
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg(Dumper($device));
2010-06-07 19:44:48 +00:00
push @ devChanges , VirtualDeviceConfigSpec - > new (
device = > $ device ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'remove' ) ) ;
}
}
if ( @ purge ) {
for $ disk ( @ purge ) {
$ device = getDiskByLabel ( $ disk , $ devices ) ;
unless ( $ device ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Disk: $disk does not exist" ] , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
return ;
}
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg(Dumper($device));
2010-06-07 19:44:48 +00:00
push @ devChanges , VirtualDeviceConfigSpec - > new (
device = > $ device ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'remove' ) ,
fileOperation = > VirtualDeviceConfigSpecFileOperation - > new ( 'destroy' ) ) ;
}
}
if ( @ add ) {
my $ addSizes = join ( ',' , @ add ) ;
2010-06-07 21:24:22 +00:00
my $ scsiCont ;
my $ scsiUnit ;
my $ ideCont ;
my $ ideUnit ;
my $ label ;
2010-08-26 19:23:25 +00:00
my $ idefull = 0 ;
my $ scsifull = 0 ;
2010-06-07 21:24:22 +00:00
foreach $ device ( @$ devices ) {
$ label = $ device - > deviceInfo - > label ;
if ( $ label =~ /^SCSI controller/ ) {
2010-08-26 19:23:25 +00:00
my $ tmpu = getAvailUnit ( $ device - > { key } , $ devices , maxnum = > 15 ) ;
if ( $ tmpu > 0 ) {
$ scsiCont = $ device ;
$ scsiUnit = $ tmpu ;
} else {
$ scsifull = 1 ;
}
#ignore scsiControllers that are full, problem still remains if trying to add across two controllers in one go
}
if ( $ label =~ /^IDE/ and not $ ideCont ) {
my $ tmpu = getAvailUnit ( $ device - > { key } , $ devices , maxnum = > 1 ) ;
print "$tmpu for " . $ device - > { key } . "\n" ;
if ( $ tmpu >= 0 ) {
$ ideCont = $ device ;
$ ideUnit = $ tmpu ;
} elsif ( $ device - > { key } == 201 ) {
$ idefull = 1 ;
}
}
2010-06-07 21:24:22 +00:00
}
2010-08-26 19:23:25 +00:00
unless ( $ hyphash { $ hyp } - > { datastoremap } ) { validate_datastore_prereqs ( [] , $ hyp ) ; }
push @ devChanges , create_storage_devs ( $ node , $ hyphash { $ hyp } - > { datastoremap } , $ addSizes , $ scsiCont , $ scsiUnit , $ ideCont , $ ideUnit , $ devices , idefull = > $ idefull , scsifull = > $ scsifull ) ;
}
2010-06-07 19:44:48 +00:00
if ( % resize ) {
while ( my ( $ key , $ value ) = each ( % resize ) ) {
my @ drives = split ( /,/ , $ key ) ;
for my $ device ( @ drives ) {
my $ disk = $ device ;
$ device = getDiskByLabel ( $ disk , $ devices ) ;
unless ( $ device ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Disk: $disk does not exist" ] , $ output_handler , $ node ) ;
2010-06-07 19:44:48 +00:00
return ;
}
2010-06-08 20:38:44 +00:00
if ( $ value =~ /^\+(.+)/ ) {
$ value = $ device - > capacityInKB + getUnits ( $ 1 , "G" , 1024 ) ;
} else {
$ value = getUnits ( $ value , "G" , 1024 ) ;
}
2010-06-07 19:44:48 +00:00
my $ newDevice = VirtualDisk - > new ( deviceInfo = > $ device - > deviceInfo ,
key = > $ device - > key ,
controllerKey = > $ device - > controllerKey ,
unitNumber = > $ device - > unitNumber ,
deviceInfo = > $ device - > deviceInfo ,
backing = > $ device - > backing ,
capacityInKB = > $ value ) ;
push @ devChanges , VirtualDeviceConfigSpec - > new (
device = > $ newDevice ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'edit' ) ) ;
}
}
}
if ( @ devChanges ) {
$ conargs { deviceChange } = \ @ devChanges ;
}
my $ reconfigspec = VirtualMachineConfigSpec - > new ( % conargs ) ;
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("reconfigspec = ".Dumper($reconfigspec));
2010-06-07 19:44:48 +00:00
my $ task = $ vmview - > ReconfigVM_Task ( spec = > $ reconfigspec ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ hyp ;
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > "node successfully changed" } ;
}
2010-06-17 21:10:39 +00:00
sub getUsedUnits {
my $ contKey = shift ;
my $ devices = shift ;
my % usedids ;
$ usedids { 7 } = 1 ;
$ usedids { '7' } = 1 ; #TODO: figure out which of these is redundant, the string or the number variant
for my $ device ( @$ devices ) {
if ( $ device - > { controllerKey } eq $ contKey ) {
$ usedids { $ device - > { unitNumber } } = 1 ;
}
}
return \ % usedids ;
}
2010-06-08 21:07:19 +00:00
sub getAvailUnit {
2010-06-07 21:24:22 +00:00
my $ contKey = shift ;
my $ devices = shift ;
2010-08-26 19:23:25 +00:00
my % args = @ _ ;
my $ maxunit = - 1 ;
if ( defined $ args { maxnum } ) {
$ maxunit = $ args { maxnum } ;
}
2010-06-08 21:07:19 +00:00
my % usedids ;
$ usedids { 7 } = 1 ;
$ usedids { '7' } = 1 ; #TODO: figure out which of these is redundant, the string or the number variant
2010-06-07 21:24:22 +00:00
for my $ device ( @$ devices ) {
2010-06-08 21:07:19 +00:00
if ( $ device - > { controllerKey } eq $ contKey ) {
$ usedids { $ device - > { unitNumber } } = 1 ;
2010-06-07 21:24:22 +00:00
}
}
2010-06-08 21:07:19 +00:00
my $ highestUnit = 0 ;
while ( $ usedids { $ highestUnit } ) {
2010-08-26 19:23:25 +00:00
if ( $ highestUnit == $ maxunit ) {
return - 1 ;
}
2010-06-08 21:07:19 +00:00
$ highestUnit + + ;
}
2010-06-07 21:24:22 +00:00
return $ highestUnit ;
}
2010-06-07 19:44:48 +00:00
#given a device list from a vm and a label for a hard disk, returns the device object
sub getDiskByLabel {
my $ cmdLabel = shift ;
my $ devices = shift ;
my $ device ;
my $ label ;
$ cmdLabel = commandLabel ( $ cmdLabel ) ;
foreach $ device ( @$ devices ) {
$ label = $ device - > deviceInfo - > label ;
if ( $ cmdLabel eq $ label ) {
return $ device ;
2010-08-26 19:23:25 +00:00
} elsif ( ( $ label =~ /^Hard disk/ ) and ( $ cmdLabel =~ /^d(.*)/ ) ) {
my $ desc = $ 1 ;
if ( $ desc =~ /(.*):(.*)/ ) { #specific
my $ controller = $ 1 ;
my $ unit = $ 2 ;
if ( $ device - > unitNumber == $ unit and $ device - > controllerKey == $ controller ) {
return $ device ;
}
} elsif ( $ desc =~ /\d+/ and $ device - > unitNumber == $ desc ) { #not specific
2010-06-08 20:56:51 +00:00
return $ device ;
}
2010-06-07 19:44:48 +00:00
}
2010-06-08 20:56:51 +00:00
2010-06-07 19:44:48 +00:00
}
return undef ;
}
#takes a label for a hard disk and prepends "Hard disk " if it's not there already
sub commandLabel {
my $ label = shift ;
2010-06-08 20:56:51 +00:00
if ( ( $ label =~ /^Hard disk/ ) or ( $ label =~ /^d\d+/ ) ) {
2010-06-07 19:44:48 +00:00
return $ label ;
}
return "Hard disk " . $ label ;
2010-06-04 19:15:18 +00:00
}
2009-06-22 16:00:28 +00:00
#this function will check pending task status
sub process_tasks {
foreach ( keys % running_tasks ) {
my $ curcon ;
if ( defined $ running_tasks { $ _ } - > { conn } ) {
$ curcon = $ running_tasks { $ _ } - > { conn } ;
} else {
$ curcon = $ hyphash { $ running_tasks { $ _ } - > { hyp } } - > { conn } ;
}
my $ curt = $ curcon - > get_view ( mo_ref = > $ running_tasks { $ _ } - > { task } ) ;
my $ state = $ curt - > info - > state - > val ;
unless ( $ state eq 'running' or $ state eq 'queued' ) {
$ running_tasks { $ _ } - > { callback } - > ( $ curt , $ running_tasks { $ _ } - > { data } ) ;
delete $ running_tasks { $ _ } ;
}
2010-03-19 20:03:48 +00:00
if ( $ state eq 'running' and not $ running_tasks { $ _ } - > { questionasked } ) { # and $curt->info->progress == 95) { #This is unfortunate, there should be a 'state' to indicate a question is blocking
#however there isn't, so if we see something running at 95%, we just manually see if a question blocked the rest
2010-07-09 18:18:15 +00:00
my $ vm ;
$@ = "" ;
eval {
$ vm = $ curcon - > get_view ( mo_ref = > $ curt - > info - > entity ) ;
} ;
if ( $@ ) { $ vm = 0 ; }
if ( $ vm and $ vm - > { summary } and $ vm - > summary - > { runtime } and $ vm - > summary - > runtime - > { question } and $ vm - > summary - > runtime - > question ) {
2010-03-19 20:03:48 +00:00
$ running_tasks { $ _ } - > { questionasked } = 1 ;
$ running_tasks { $ _ } - > { callback } - > ( $ curt , $ running_tasks { $ _ } - > { data } , $ vm - > summary - > runtime - > question , $ vm ) ;
}
}
2009-06-22 16:00:28 +00:00
}
}
#this function is a barrier to ensure prerequisites are met
sub wait_for_tasks {
while ( scalar keys % running_tasks ) {
process_tasks ;
sleep ( 1 ) ; #We'll check back in every second. Unfortunately, we have to poll since we are in web service land
}
}
sub connecthost_callback {
my $ task = shift ;
my $ args = shift ;
my $ hv = $ args - > { hostview } ;
my $ state = $ task - > info - > state - > val ;
if ( $ state eq "success" ) {
2009-10-01 14:35:41 +00:00
$ hypready { $ args - > { hypname } } = 1 ; #declare readiness
enable_vmotion ( hypname = > $ args - > { hypname } , hostview = > $ args - > { hostview } , conn = > $ args - > { conn } ) ;
2010-08-09 15:12:05 +00:00
$ vcenterhash { $ args - > { vcenter } } - > { goodhyps } - > { $ args - > { hypname } } = 1 ;
2009-06-22 16:00:28 +00:00
if ( defined $ args - > { depfun } ) { #If a function is waiting for the host connect to go valid, call it
$ args - > { depfun } - > ( $ args - > { depargs } ) ;
}
return ;
}
my $ thumbprint ;
eval {
$ thumbprint = $ task - > { info } - > error - > fault - > thumbprint ;
} ;
2009-10-01 14:35:41 +00:00
if ( $ thumbprint ) { #was an unknown certificate error, retry and accept the unknown certificate
2009-06-22 16:00:28 +00:00
$ args - > { connspec } - > { sslThumbprint } = $ task - > info - > error - > fault - > thumbprint ;
my $ task ;
if ( defined $ args - > { hostview } ) { #It was a reconnect request
$ task = $ hv - > ReconnectHost_Task ( cnxSpec = > $ args - > { connspec } ) ;
} elsif ( defined $ args - > { foldview } ) { #was an add host request
$ task = $ args - > { foldview } - > AddStandaloneHost_Task ( spec = > $ args - > { connspec } , addConnected = > 1 ) ;
2009-10-01 14:35:41 +00:00
} elsif ( defined $ args - > { cluster } ) { #was an add host to cluster request
$ task = $ args - > { cluster } - > AddHost_Task ( spec = > $ args - > { connspec } , asConnected = > 1 ) ;
2009-06-22 16:00:28 +00:00
}
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & connecthost_callback ;
$ running_tasks { $ task } - > { conn } = $ args - > { conn } ;
$ running_tasks { $ task } - > { data } = $ args ; #{ conn_spec=>$connspec,hostview=>$hv,hypname=>$args->{hypname},vcenter=>$args->{vcenter} };
} elsif ( $ state eq 'error' ) {
my $ error = $ task - > info - > error - > localizedMessage ;
if ( defined ( $ task - > info - > error - > fault - > faultMessage ) ) { #Only in 4.0, support of 3.5 must be careful?
foreach ( @ { $ task - > info - > error - > fault - > faultMessage } ) {
$ error . = $ _ - > message ;
}
}
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , $ error ] , $ output_handler ) ; #,$node);
2009-10-01 14:35:41 +00:00
$ hypready { $ args - > { hypname } } = - 1 ; #Impossible for this hypervisor to ever be ready
2010-08-09 15:12:05 +00:00
$ vcenterhash { $ args - > { vcenter } } - > { badhyps } - > { $ args - > { hypname } } = 1 ;
2009-06-22 16:00:28 +00:00
}
}
2009-09-30 21:14:24 +00:00
sub get_clusterview {
my % args = @ _ ;
my $ clustname = $ args { clustname } ;
my % subargs = (
view_type = > 'ClusterComputeResource' ,
) ;
if ( $ args { properties } ) {
$ subargs { properties } = $ args { properties } ;
}
2010-07-07 19:32:04 +00:00
$ subargs { filter } = { name = > $ clustname } ;
my $ view = $ args { conn } - > find_entity_view ( % subargs ) ;
return $ view ;
#foreach (@{$args{conn}->find_entity_views(%subargs)}) {
# if ($_->name eq "$clustname") {
# return $_;
# last;
# }
#}
2009-09-30 21:14:24 +00:00
}
2009-06-22 16:00:28 +00:00
sub get_hostview {
my % args = @ _ ;
my $ host = $ args { hypname } ;
my % subargs = (
view_type = > 'HostSystem' ,
) ;
if ( $ args { properties } ) {
$ subargs { properties } = $ args { properties } ;
}
2009-12-09 21:05:35 +00:00
my @ addrs = gethostbyname ( $ host ) ;
2010-02-22 20:40:42 +00:00
my $ ip ;
my $ name ;
my $ aliases ;
if ( $ addrs [ 4 ] ) {
$ ip = inet_ntoa ( $ addrs [ 4 ] ) ;
( $ name , $ aliases ) = gethostbyaddr ( $ addrs [ 4 ] , AF_INET ) ; #TODO: IPv6
} else {
( $ ip , $ name , $ aliases ) = ( $ host , $ host , "" ) ;
}
2009-12-09 21:05:35 +00:00
my @ matchvalues = ( $ host , $ ip , $ name ) ;
foreach ( split /\s+/ , $ aliases ) {
push @ matchvalues , $ _ ;
}
2010-07-07 19:13:40 +00:00
my $ view ;
2010-07-09 17:02:25 +00:00
$ subargs { filter } = { 'name' = > qr/$host(?:\.|\z)/ } ;
2010-07-07 19:13:40 +00:00
$ view = $ args { conn } - > find_entity_view ( % subargs ) ;
if ( $ view ) { return $ view ; }
foreach ( @ matchvalues ) {
2010-07-09 17:02:25 +00:00
$ subargs { filter } = { 'name' = > qr/$_(?:\.|\z)/ } ;
2010-07-07 19:13:40 +00:00
$ view = $ args { conn } - > find_entity_view ( % subargs ) ;
if ( $ view ) { return $ view ; }
}
2010-07-09 17:02:25 +00:00
$ subargs { filter } = { 'name' = > qr/localhost(?:\.|\z)/ } ;
2010-07-07 19:13:40 +00:00
$ view = $ args { conn } - > find_entity_view ( % subargs ) ;
if ( $ view ) { return $ view ; }
2010-07-07 19:26:55 +00:00
return undef ; #rest of function should be obsoleted, going to run with that assumption for 2.5 at least
2010-07-08 17:58:19 +00:00
# $subargs{filter}={'name' =~ qr/.*/};
2010-07-07 19:26:55 +00:00
# foreach (@{$args{conn}->find_entity_views(%subargs)}) {
# my $view = $_;
# if ($_->name =~ /$host(?:\.|\z)/ or $_->name =~ /localhost(?:\.|\z)/ or grep { $view->name =~ /$_(?:\.|\z)/ } @matchvalues) {
# return $view;
# last;
# }
# }
2009-06-22 16:00:28 +00:00
}
sub enable_vmotion {
#TODO: vmware 3.x semantics too? this is 4.0...
my % args = @ _ ;
unless ( $ args { hostview } ) {
$ args { hostview } = get_hostview ( conn = > $ args { conn } , hypname = > $ args { hypname } , properties = > [ 'configManager' , 'name' ] ) ;
}
my $ nicmgr = $ args { conn } - > get_view ( mo_ref = > $ args { hostview } - > configManager - > virtualNicManager ) ;
my $ qnc = $ nicmgr - > QueryNetConfig ( nicType = > "vmotion" ) ;
if ( $ qnc - > { selectedVnic } ) {
return 1 ;
} else {
if ( scalar @ { $ qnc - > candidateVnic } eq 1 ) { #There is only one possible path, use it
$ nicmgr - > SelectVnicForNicType ( nicType = > "vmotion" , device = > $ qnc - > candidateVnic - > [ 0 ] - > device ) ;
return 1 ;
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "TODO: use configuration to pick the nic " . $ args { hypname } ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
}
return 0 ;
}
}
sub mkvm_callback {
my $ task = shift ;
my $ args = shift ;
my $ node = $ args - > { node } ;
2010-06-23 14:50:46 +00:00
my $ hyp = $ args - > { hyp } ;
2009-06-22 16:00:28 +00:00
if ( $ task - > info - > state - > val eq 'error' ) {
my $ error = $ task - > info - > error - > localizedMessage ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , $ error ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
}
}
sub relay_vmware_err {
my $ task = shift ;
my $ extratext = shift ;
my @ nodes = @ _ ;
my $ error = $ task - > info - > error - > localizedMessage ;
if ( defined ( $ task - > info - > error - > fault - > faultMessage ) ) { #Only in 4.0, support of 3.5 must be careful?
foreach ( @ { $ task - > info - > error - > fault - > faultMessage } ) {
$ error . = $ _ - > message ;
}
}
if ( @ nodes ) {
foreach ( @ nodes ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , $ extratext . $ error ] , $ output_handler , $ _ ) ;
2009-06-22 16:00:28 +00:00
}
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , $ extratext . $ error ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
}
}
2010-06-08 18:14:04 +00:00
sub relocate_callback {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
if ( $ state eq 'success' ) {
2010-06-17 18:55:31 +00:00
my $ vmtab = xCAT::Table - > new ( 'vm' ) ; #TODO: update vm.storage?
2010-06-17 19:42:22 +00:00
my $ prevloc = $ tablecfg { vm } - > { $ parms - > { node } } - > [ 0 ] - > { storage } ;
2010-06-17 18:55:31 +00:00
my $ model ;
( $ prevloc , $ model ) = split /=/ , $ prevloc ;
my $ target = $ parms - > { target } ;
2010-06-17 19:42:22 +00:00
if ( $ model ) {
$ target . = "=$model" ;
2010-06-17 18:55:31 +00:00
}
2010-06-17 19:42:22 +00:00
$ vmtab - > setNodeAttribs ( $ parms - > { node } , { storage = > $ target } ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( ":relocated to to " . $ parms - > { target } , $ output_handler , $ parms - > { node } ) ;
2010-06-08 18:14:04 +00:00
} else {
relay_vmware_err ( $ task , "Relocating to " . $ parms - > { target } . " " , $ parms - > { node } ) ;
}
}
2010-06-12 01:57:12 +00:00
sub migrate_ok { #look like a successful migrate, callback for registering a vm
my % args = @ _ ;
my $ vmtab = xCAT::Table - > new ( 'vm' ) ;
$ vmtab - > setNodeAttribs ( $ args { nodes } - > [ 0 ] , { host = > $ args { target } } ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "migrated to " . $ args { target } , $ output_handler , $ args { nodes } - > [ 0 ] ) ;
2010-06-12 01:57:12 +00:00
}
2009-06-22 16:00:28 +00:00
sub migrate_callback {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
2010-06-12 01:57:12 +00:00
if ( not $ parms - > { skiptodeadsource } and $ state eq 'success' ) {
2009-06-22 16:00:28 +00:00
my $ vmtab = xCAT::Table - > new ( 'vm' ) ;
$ vmtab - > setNodeAttribs ( $ parms - > { node } , { host = > $ parms - > { target } } ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "migrated to " . $ parms - > { target } , $ output_handler , $ parms - > { node } ) ;
2010-06-12 01:57:12 +00:00
} elsif ( $ parms - > { offline } ) { #try a forceful RegisterVM instead
2010-06-14 17:40:06 +00:00
my $ target = $ parms - > { target } ;
my $ hostview = $ hyphash { $ target } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' ] , filter = > { name = > $ parms - > { node } } ) ;
if ( $ hostview ) { #this means vcenter still has it in inventory, but on a dead node...
#unfortunately, vcenter won't give up the old one until we zap the dead hypervisor
#also unfortunately, it doesn't make it easy to find said hypervisor..
$ hostview = $ hyphash { $ parms - > { src } } - > { conn } - > get_view ( mo_ref = > $ hyphash { $ parms - > { src } } - > { deletionref } ) ;
$ task = $ hostview - > Destroy_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & migrate_callback ;
$ running_tasks { $ task } - > { conn } = $ hyphash { $ target } - > { vcenter } - > { conn } ;
2010-06-14 18:49:25 +00:00
$ running_tasks { $ task } - > { data } = { offline = > 1 , src = > $ parms - > { src } , node = > $ parms - > { node } , target = > $ target , skiptodeadsource = > 1 } ;
2010-06-12 01:57:12 +00:00
} else { #it is completely gone, attempt a register_vm strategy
2010-06-14 18:49:25 +00:00
register_vm ( $ target , $ parms - > { node } , undef , \ & migrate_ok , { nodes = > [ $ parms - > { node } ] , target = > $ target , } , "failonerror" ) ;
2010-06-12 01:57:12 +00:00
}
2009-06-22 16:00:28 +00:00
} else {
relay_vmware_err ( $ task , "Migrating to " . $ parms - > { target } . " " , $ parms - > { node } ) ;
}
}
2010-03-19 20:03:48 +00:00
sub poweron_task_callback {
my $ task = shift ;
my $ parms = shift ;
my $ q = shift ; #question if blocked
my $ vm = shift ; #path to answer questions if asked
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ intent , $ output_handler , $ node ) ;
2010-03-19 20:03:48 +00:00
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
} elsif ( $ q and $ q - > text =~ /^msg.uuid.altered:/ and ( $ q - > choice - > choiceInfo - > [ 0 ] - > summary eq 'Cancel' and ( $ q - > choice - > choiceInfo - > [ 0 ] - > key eq '0' ) ) ) { #make sure it is what is what we have seen it to be
2010-06-23 14:50:46 +00:00
if ( $ parms - > { forceon } and $ q - > choice - > choiceInfo - > [ 1 ] - > summary eq 'I (_)?moved it' and $ q - > choice - > choiceInfo - > [ 1 ] - > key eq '1' ) { #answer the question as 'moved'
2010-03-19 20:03:48 +00:00
$ vm - > AnswerVM ( questionId = > $ q - > id , answerChoice = > '1' ) ;
} else {
$ vm - > AnswerVM ( questionId = > $ q - > id , answerChoice = > '0' ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Failure powering on VM, it mismatched against the hypervisor. If positive VM is not running on another hypervisor, use -f to force VM on" ] , $ output_handler , $ node ) ;
2010-03-19 20:03:48 +00:00
}
} elsif ( $ q ) {
if ( $ q - > choice - > choiceInfo - > [ 0 ] - > summary eq 'Cancel' ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ":Cancelling due to unexpected question executing task: " . $ q - > text ] , $ output_handler , $ node ) ;
2010-03-19 20:03:48 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ":Task hang due to unexpected question executing task, need to use VMware tools to clean up the mess for now: " . $ q - > text ] , $ output_handler , $ node ) ;
2010-03-19 20:03:48 +00:00
}
}
}
2009-06-22 16:00:28 +00:00
sub generic_task_callback {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ intent , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
2010-06-08 18:14:04 +00:00
sub migrate {
my % args = @ _ ;
2009-06-22 16:00:28 +00:00
my @ nodes = @ { $ args { nodes } } ;
my $ hyp = $ args { hyp } ;
2010-06-08 18:14:04 +00:00
my $ vcenter = $ hyphash { $ hyp } - > { vcenter } - > { name } ;
my $ datastoredest ;
2010-06-12 01:57:12 +00:00
my $ offline ;
2010-06-08 18:14:04 +00:00
@ ARGV = @ { $ args { exargs } } ;
unless ( GetOptions (
's=s' = > \ $ datastoredest ,
2010-06-12 01:57:12 +00:00
'f' = > \ $ offline ,
2010-06-08 18:14:04 +00:00
) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Error parsing arguments" ] , $ output_handler ) ;
2010-06-08 18:14:04 +00:00
return ;
}
my $ target = $ hyp ; #case for storage migration
if ( $ datastoredest and scalar @ ARGV ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to mix storage migration and processing of arguments " . join ( ' ' , @ ARGV ) ] , $ output_handler ) ;
2010-06-08 18:14:04 +00:00
return ;
} elsif ( @ ARGV ) {
$ target = shift @ ARGV ;
if ( @ ARGV ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unrecognized arguments " . join ( ' ' , @ ARGV ) ] , $ output_handler ) ;
2010-06-08 18:14:04 +00:00
return ;
}
} elsif ( $ datastoredest ) { #storage migration only
unless ( validate_datastore_prereqs ( [] , $ hyp , { $ datastoredest = > \ @ nodes } ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to find/mount target datastore $datastoredest" ] , $ output_handler ) ;
2010-06-08 18:14:04 +00:00
return ;
}
foreach ( @ nodes ) {
my $ hostview = $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' ] , filter = > { name = > $ _ } ) ;
my $ relocatspec = VirtualMachineRelocateSpec - > new (
datastore = > $ hyphash { $ hyp } - > { datastorerefmap } - > { $ datastoredest } ,
) ;
my $ task = $ hostview - > RelocateVM_Task ( spec = > $ relocatspec ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & relocate_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ _ , target = > $ datastoredest } ;
2010-07-08 15:19:18 +00:00
process_tasks ; #check for tasks needing followup actions before the task is forgotten (VMWare's memory is fairly short at times
2010-06-08 18:14:04 +00:00
}
return ;
}
2010-08-09 15:12:05 +00:00
if ( ( not $ offline and $ vcenterhash { $ vcenter } - > { badhyps } - > { $ hyp } ) or $ vcenterhash { $ vcenter } - > { badhyps } - > { $ target } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to migrate " . join ( ',' , @ nodes ) . " to $target due to inability to validate vCenter connectivity" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
2010-08-09 15:12:05 +00:00
if ( ( $ offline or $ vcenterhash { $ vcenter } - > { goodhyps } - > { $ hyp } ) and $ vcenterhash { $ vcenter } - > { goodhyps } - > { $ target } ) {
2009-06-22 16:00:28 +00:00
unless ( validate_datastore_prereqs ( \ @ nodes , $ target ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to verify storage state on target system" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
unless ( validate_network_prereqs ( \ @ nodes , $ target ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to verify target network state" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
2009-07-14 20:43:59 +00:00
my $ dstview = get_hostview ( conn = > $ hyphash { $ target } - > { conn } , hypname = > $ target , properties = > [ 'name' , 'parent' ] ) ;
2009-06-22 16:00:28 +00:00
unless ( $ hyphash { $ target } - > { pool } ) {
$ hyphash { $ target } - > { pool } = $ hyphash { $ target } - > { conn } - > get_view ( mo_ref = > $ dstview - > parent , properties = > [ 'resourcePool' ] ) - > resourcePool ;
}
foreach ( @ nodes ) {
2010-07-08 15:19:18 +00:00
process_tasks ; #check for tasks needing followup actions before the task is forgotten (VMWare's memory is fairly short at times
2010-06-12 01:57:12 +00:00
my $ srcview = $ hyphash { $ target } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' ] , filter = > { name = > $ _ } ) ;
if ( $ offline and not $ srcview ) { #we have a request to resurrect the dead..
register_vm ( $ target , $ _ , undef , \ & migrate_ok , { nodes = > [ $ _ ] , exargs = > $ args { exargs } , target = > $ target , hyp = > $ args { hyp } , offline = > $ offline } , "failonerror" ) ;
return ;
} elsif ( not $ srcview ) {
$ srcview = $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' ] , filter = > { name = > $ _ } ) ;
}
unless ( $ srcview ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to locate node in vCenter" ] , $ output_handler , $ _ ) ;
2010-06-12 01:57:12 +00:00
next ;
}
2009-06-22 16:00:28 +00:00
my $ task = $ srcview - > MigrateVM_Task (
host = > $ dstview ,
pool = > $ hyphash { $ target } - > { pool } ,
priority = > VirtualMachineMovePriority - > new ( 'highPriority' ) ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & migrate_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
2010-06-12 01:57:12 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ _ , src = > $ hyp , target = > $ target , offline = > $ offline } ;
2009-06-22 16:00:28 +00:00
}
} else {
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("Waiting for BOTH to be 'good'");
2009-06-22 16:00:28 +00:00
return ; #One of them is still 'pending'
}
}
sub reconfig_callback {
my $ task = shift ;
my $ args = shift ;
#$args->{reconfig_args}->{vmview}->update_view_data();
delete $ args - > { reconfig_args } - > { vmview } ; #Force a reload of the view, update_view_data seems to not work as advertised..
$ args - > { reconfig_fun } - > ( % { $ args - > { reconfig_args } } ) ;
}
sub repower { #Called to try power again after power down for reconfig
my $ task = shift ;
my $ args = shift ;
my $ powargs = $ args - > { power_args } ;
$ powargs - > { pretendop } = 1 ;
#$powargs->{vmview}->update_view_data();
delete $ powargs - > { vmview } ; #Force a reload of the view, update_view_data seems to not work as advertised..
power ( %$ powargs ) ;
}
sub retry_rmvm {
my $ task = shift ;
my $ args = shift ;
my $ node = $ args - > { node } ;
my $ state = $ task - > info - > state - > val ;
if ( $ state eq "success" ) {
2009-09-18 19:53:48 +00:00
#$Data::Dumper::Maxdepth=2;
2009-06-22 16:00:28 +00:00
delete $ args - > { args } - > { vmview } ;
rmvm ( % { $ args - > { args } } ) ;
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
sub rmvm {
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
if ( not defined $ args { vmview } ) { #attempt one refresh
$ args { vmview } = $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' , 'runtime.powerState' ] , filter = > { name = > $ node } ) ;
2010-07-29 20:03:05 +00:00
if ( not defined $ args { vmview } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM does not appear to exist" ] , $ output_handler , $ node ) ;
2010-07-29 20:03:05 +00:00
return ;
}
2009-06-22 16:00:28 +00:00
}
@ ARGV = @ { $ args { exargs } } ;
require Getopt::Long ;
my $ forceremove ;
my $ purge ;
GetOptions (
'f' = > \ $ forceremove ,
'p' = > \ $ purge ,
) ;
my $ task ;
unless ( $ args { vmview } - > { 'runtime.powerState' } - > val eq 'poweredOff' ) {
if ( $ forceremove ) {
$ task = $ args { vmview } - > PowerOffVM_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & retry_rmvm ,
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ node , args = > \ % args } ;
return ;
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot rmvm active guest (use -f argument to force)" ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return ;
}
}
if ( $ purge ) {
$ task = $ args { vmview } - > Destroy_Task ( ) ;
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > 'purged' } ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ; #$hyp_conns->{$hyp};
} else {
$ task = $ args { vmview } - > UnregisterVM ( ) ;
}
}
sub getreconfigspec {
my % args = @ _ ;
my $ node = $ args { node } ;
my $ vmview = $ args { view } ;
2010-08-09 21:02:17 +00:00
my $ currid = $ args { view } - > { 'config.guestId' } ;
2009-06-22 16:00:28 +00:00
my $ rightid = getguestid ( $ node ) ;
my % conargs ;
my $ reconfigneeded = 0 ;
if ( $ currid ne $ rightid ) {
$ reconfigneeded = 1 ;
$ conargs { guestId } = $ rightid ;
}
my $ newmem ;
if ( $ newmem = getUnits ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { memory } , "M" , 1048576 ) ) {
2010-08-09 21:02:17 +00:00
my $ currmem = $ vmview - > { 'config.hardware.memoryMB' } ;
2009-06-22 16:00:28 +00:00
if ( $ newmem ne $ currmem ) {
$ conargs { memoryMB } = $ newmem ;
$ reconfigneeded = 1 ;
}
}
my $ newcpus ;
if ( $ newcpus = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cpus } ) {
2010-08-09 21:02:17 +00:00
my $ currncpu = $ vmview - > { 'config.hardware.numCPU' } ;
2009-06-22 16:00:28 +00:00
if ( $ newcpus ne $ currncpu ) {
$ conargs { numCPUs } = $ newcpus ;
$ reconfigneeded = 1 ;
}
}
if ( $ reconfigneeded ) {
return VirtualMachineConfigSpec - > new ( % conargs ) ;
} else {
return 0 ;
}
}
#This routine takes a single node, managing vmv instance, and task tracking hash to submit a power on request
sub power {
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
my $ pretendop = $ args { pretendop } ; #to pretend a system was on for reset or boot when we have to turn it off internally for reconfig
if ( not defined $ args { vmview } ) { #attempt one refresh
2010-08-09 21:02:17 +00:00
$ args { vmview } = $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' , 'config.guestId' , 'config.hardware.memoryMB' , 'config.hardware.numCPU' , 'runtime.powerState' ] , filter = > { name = > $ node } ) ;
2010-08-10 18:22:21 +00:00
#vmview not existing now is not an issue, this function
#is designed to handle that and correct if reasonably possible
#comes into play particularly in a stateless context
2009-06-22 16:00:28 +00:00
}
2010-03-19 20:03:48 +00:00
@ ARGV = @ { $ args { exargs } } ; #for getoptions;
my $ forceon ;
require Getopt::Long ;
GetOptions (
'force|f' = > \ $ forceon ,
) ;
my $ subcmd = $ ARGV [ 0 ] ;
2009-06-22 16:00:28 +00:00
my $ intent = "" ;
my $ task ;
my $ currstat ;
if ( $ args { vmview } ) {
$ currstat = $ args { vmview } - > { 'runtime.powerState' } - > val ;
if ( grep /$subcmd/ , qw/on reset boot/ ) {
my $ reconfigspec ;
if ( $ reconfigspec = getreconfigspec ( node = > $ node , view = > $ args { vmview } ) ) {
if ( $ currstat eq 'poweredOff' ) {
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("Correcting guestId because $currid and $rightid are not the same...");#DEBUG
2009-06-22 16:00:28 +00:00
my $ task = $ args { vmview } - > ReconfigVM_Task ( spec = > $ reconfigspec ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & reconfig_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ node , reconfig_fun = > \ & power , reconfig_args = > \ % args } ;
return ;
} elsif ( grep /$subcmd/ , qw/reset boot/ ) { #going to have to do a 'cycle' and present it up normally..
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("DEBUG: forcing a cycle");
2009-06-22 16:00:28 +00:00
$ task = $ args { vmview } - > PowerOffVM_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & repower ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ node , power_args = > \ % args } ;
return ; #we have to wait
}
#TODO: fixit
2010-08-06 15:29:07 +00:00
#xCAT::SvrUtils::sendmsg("I see vm has $currid and I want it to be $rightid");
2009-06-22 16:00:28 +00:00
}
}
} else {
$ currstat = 'off' ;
}
if ( $ currstat eq 'poweredOff' ) {
$ currstat = 'off' ;
} elsif ( $ currstat eq 'poweredOn' ) {
$ currstat = 'on' ;
} elsif ( $ currstat eq 'suspended' ) {
$ currstat = 'suspend' ;
}
if ( $ subcmd =~ /^stat/ ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ currstat , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return ;
}
if ( $ subcmd =~ /boot/ ) {
$ intent = "$currstat " ;
if ( $ currstat eq 'on' or $ args { pretendop } ) {
$ intent = "on " ;
$ subcmd = 'reset' ;
} else {
$ subcmd = 'on' ;
}
}
if ( $ subcmd =~ /on/ ) {
2010-03-26 12:46:05 +00:00
if ( $ currstat eq 'off' or $ currstat eq 'suspend' ) {
2009-06-22 16:00:28 +00:00
if ( not $ args { vmview } ) { #We are asking to turn on a system the hypervisor
#doesn't know, attempt to register it first
register_vm ( $ hyp , $ node , undef , \ & power , \ % args ) ;
return ; #We'll pick it up on the retry if it gets registered
}
eval {
2010-03-24 15:09:43 +00:00
$ task = $ args { vmview } - > PowerOnVM_Task ( host = > $ hyphash { $ hyp } - > { hostview } ) ;
2009-06-22 16:00:28 +00:00
} ;
if ( $@ ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ":" . $@ ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return ;
}
$ running_tasks { $ task } - > { task } = $ task ;
2010-03-19 20:03:48 +00:00
$ running_tasks { $ task } - > { callback } = \ & poweron_task_callback ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ; #$hyp_conns->{$hyp};
2010-03-19 20:03:48 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > $ intent . 'on' , forceon = > $ forceon } ;
2009-06-22 16:00:28 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ currstat , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
}
} elsif ( $ subcmd =~ /off/ ) {
if ( $ currstat eq 'on' ) {
$ task = $ args { vmview } - > PowerOffVM_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > 'off' } ;
2010-03-26 12:46:05 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ currstat , $ output_handler , $ node ) ;
2010-03-26 12:46:05 +00:00
}
} elsif ( $ subcmd =~ /suspend/ ) {
if ( $ currstat eq 'on' ) {
$ task = $ args { vmview } - > SuspendVM_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > 'suspend' } ;
2009-06-22 16:00:28 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "off" , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
}
} elsif ( $ subcmd =~ /reset/ ) {
if ( $ currstat eq 'on' ) {
$ task = $ args { vmview } - > ResetVM_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > $ intent . 'reset' } ;
} elsif ( $ args { pretendop } ) { #It is off, but pretend it was on
eval {
2010-03-24 15:09:43 +00:00
$ task = $ args { vmview } - > PowerOnVM_Task ( host = > $ hyphash { $ hyp } - > { hostview } ) ;
2009-06-22 16:00:28 +00:00
} ;
if ( $@ ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ":" . $@ ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return ;
}
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > $ intent . 'reset' } ;
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ currstat , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
}
}
}
sub generic_vm_operation { #The general form of firing per-vm requests to ESX hypervisor
my $ properties = shift ; #The relevant properties to the general task, MUST INCLUDE config.name
my $ function = shift ; #The function to actually run against the right VM view
my @ exargs = @ _ ; #Store the rest to pass on
2009-10-21 16:39:17 +00:00
my $ hyp ;
my $ vmviews ;
my % vcviews ; #views populated once per vcenter server for improved performance
2010-08-09 20:46:36 +00:00
my $ node ;
2010-02-24 21:22:40 +00:00
foreach $ hyp ( keys % hyphash ) {
if ( $ viavcenterbyhyp - > { $ hyp } ) {
2010-07-08 14:22:33 +00:00
foreach $ node ( sort ( keys % { $ hyphash { $ hyp } - > { nodes } } ) ) {
2010-08-09 20:46:36 +00:00
$ vcenterhash { $ hyphash { $ hyp } - > { vcenter } - > { name } } - > { vms } - > { $ node } = 1 ;
2010-07-08 14:22:33 +00:00
}
2010-08-09 20:46:36 +00:00
}
}
my $ currentvcenter ;
foreach $ currentvcenter ( keys % vcenterhash ) {
#retrieve all vm views in one gulp
my $ vmsearchstring = join ( ")|(" , keys % { $ vcenterhash { $ currentvcenter } - > { vms } } ) ;
$ vmsearchstring = '^((' . $ vmsearchstring . '))(\z|\.)' ;
my $ regex = qr/$vmsearchstring/ o ;
$ vcviews { $ currentvcenter } = $ vcenterhash { $ currentvcenter } - > { conn } - > find_entity_views ( view_type = > 'VirtualMachine' , properties = > $ properties , filter = > { 'config.name' = > $ regex } ) ;
}
foreach $ hyp ( keys % hyphash ) {
if ( $ viavcenterbyhyp - > { $ hyp } ) {
if ( $ vcviews { $ hyphash { $ hyp } - > { vcenter } - > { name } } ) { next ; }
2010-07-08 14:22:33 +00:00
#$vcviews{$hyphash{$hyp}->{vcenter}->{name}} = $hyphash{$hyp}->{conn}->find_entity_views(view_type => 'VirtualMachine',properties=>$properties);
2009-10-21 16:39:17 +00:00
foreach ( @ { $ vcviews { $ hyphash { $ hyp } - > { vcenter } - > { name } } } ) {
my $ node = $ _ - > { 'config.name' } ;
unless ( defined $ tablecfg { vm } - > { $ node } ) {
$ node =~ s/\..*// ; #try the short name;
}
if ( defined $ tablecfg { vm } - > { $ node } ) { #see if the host pointer requires a refresh
2010-08-09 14:29:25 +00:00
my $ host = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ _ - > { 'runtime.host' } , properties = > [ 'summary.config.name' ] ) ;
$ host = $ host - > { 'summary.config.name' } ;
2009-10-21 16:39:17 +00:00
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { host } eq "$host" ) { next ; }
2010-02-09 16:26:40 +00:00
my $ newnhost = inet_aton ( $ host ) ;
my $ oldnhost = inet_aton ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { host } ) ;
2010-02-24 21:22:40 +00:00
if ( $ newnhost eq $ oldnhost ) { next ; } #it resolved fine
2009-10-21 16:39:17 +00:00
my $ shost = $ host ;
$ shost =~ s/\..*// ;
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { host } eq "$shost" ) { next ; }
#time to figure out which of these is a node
my @ nodes = noderange ( "$host,$shost" ) ;
my $ vmtab = xCAT::Table - > new ( "vm" , - create = > 1 ) ;
unless ( $ vmtab ) {
die "Error opening vm table" ;
}
if ( $ nodes [ 0 ] ) {
print $ node . " and " . $ nodes [ 0 ] ;
$ vmtab - > setNodeAttribs ( $ node , { host = > $ nodes [ 0 ] } ) ;
2010-02-09 16:26:40 +00:00
} #else {
# $vmtab->setNodeAttribs($node,{host=>$host});
#}
2009-10-21 16:39:17 +00:00
}
}
}
}
2009-06-22 16:00:28 +00:00
foreach $ hyp ( keys % hyphash ) {
2010-01-14 20:32:51 +00:00
if ( $ viavcenterbyhyp - > { $ hyp } ) {
2009-10-21 16:39:17 +00:00
$ vmviews = $ vcviews { $ hyphash { $ hyp } - > { vcenter } - > { name } }
} else {
2010-07-08 14:22:33 +00:00
$ vmviews = [] ;
my $ node ;
foreach $ node ( sort ( keys % { $ hyphash { $ hyp } - > { nodes } } ) ) {
push @ { $ vmviews } , $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > $ properties , filter = > { 'config.name' = > qr/^$node/ } ) ;
}
#$vmviews = $hyphash{$hyp}->{conn}->find_entity_views(view_type => 'VirtualMachine',properties=>$properties);
2009-10-21 16:39:17 +00:00
}
2009-06-22 16:00:28 +00:00
my % mgdvms ; #sort into a hash for convenience
foreach ( @$ vmviews ) {
$ mgdvms { $ _ - > { 'config.name' } } = $ _ ;
}
my $ node ;
foreach $ node ( sort ( keys % { $ hyphash { $ hyp } - > { nodes } } ) ) {
$ function - > (
node = > $ node ,
hyp = > $ hyp ,
vmview = > $ mgdvms { $ node } ,
exargs = > \ @ exargs
) ;
2010-07-08 15:19:18 +00:00
process_tasks ; #check for tasks needing followup actions before the task is forgotten (VMWare's memory is fairly short at times
2009-06-22 16:00:28 +00:00
}
}
}
sub generic_hyp_operation { #The general form of firing per-hypervisor requests to ESX hypervisor
my $ function = shift ; #The function to actually run against the right VM view
my @ exargs = @ _ ; #Store the rest to pass on
my $ hyp ;
foreach $ hyp ( keys % hyphash ) {
2010-07-08 15:19:18 +00:00
process_tasks ; #check for tasks needing followup actions before the task is forgotten (VMWare's memory is fairly short at times
2009-06-22 16:00:28 +00:00
my @ relevant_nodes = sort ( keys % { $ hyphash { $ hyp } - > { nodes } } ) ;
unless ( scalar @ relevant_nodes ) {
next ;
}
$ function - > (
nodes = > \ @ relevant_nodes ,
hyp = > $ hyp ,
exargs = > \ @ exargs
) ;
#my $vmviews = $hyp_conns->{$hyp}->find_entity_views(view_type => 'VirtualMachine',properties=>['runtime.powerState','config.name']);
#my %mgdvms; #sort into a hash for convenience
#foreach (@$vmviews) {
# $mgdvms{$_->{'config.name'}} = $_;
#}
#my $node;
#foreach $node (sort (keys %{$hyp_hash->{$hyp}->{nodes}})){
# $function->($node,$mgdvms{$node},$taskstotrack,$callback,@exargs);
#REMINDER FOR RINV TO COME
# foreach (@nothing) { #@{$mgdvms{$node}->config->hardware->device}) {
# if (defined $_->{macAddress}) {
# print "\nFound a mac: ".$_->macAddress."\n";
# }
# }
# }
}
}
2010-02-22 20:40:42 +00:00
sub rmhypervisor_disconnected {
my $ task = shift ;
my $ parms = shift ;
my $ node = $ parms - > { node } ;
my $ hyp = $ node ;
my $ state = $ task - > info - > state - > val ;
if ( $ state eq 'success' ) {
my $ task = $ hyphash { $ hyp } - > { hostview } - > Destroy_Task ( ) ;
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > 'removed' } ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ hyp ;
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
sub rmhypervisor_inmaintenance {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
my $ hyp = $ parms - > { node } ;
my $ task = $ hyphash { $ hyp } - > { hostview } - > DisconnectHost_Task ( ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & rmhypervisor_disconnected ;
$ running_tasks { $ task } - > { hyp } = $ hyp ;
$ running_tasks { $ task } - > { data } = { node = > $ hyp } ;
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
2010-06-08 16:54:13 +00:00
sub lsvm {
my % args = @ _ ;
my $ hyp = $ args { hyp } ;
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager']);
use Data::Dumper ;
2010-06-08 18:19:48 +00:00
my $ vms = $ hyphash { $ hyp } - > { hostview } - > vm ;
unless ( $ vms ) {
return ;
}
foreach ( @$ vms ) {
2010-06-08 16:54:13 +00:00
my $ vmv = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ _ ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( $ vmv - > name , $ output_handler , $ hyp ) ;
2010-06-08 16:54:13 +00:00
}
return ;
}
2010-02-22 20:40:42 +00:00
sub rmhypervisor {
my % args = @ _ ;
my $ hyp = $ args { hyp } ;
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager']);
if ( defined $ hyphash { $ hyp } - > { hostview } ) {
my $ task = $ hyphash { $ hyp } - > { hostview } - > EnterMaintenanceMode_Task ( timeout = > 0 ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & rmhypervisor_inmaintenance ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ hyp } ;
}
return ;
}
2010-08-31 20:53:55 +00:00
sub clonevms {
my % args = @ _ ;
my $ nodes = $ args { nodes } ;
my $ hyp = $ args { hyp } ;
@ ARGV = @ { $ args { exargs } } ; #for getoptions;
my $ base ;
my $ force ;
my $ detach ;
my $ target ;
require Getopt::Long ;
GetOptions (
'b=s' = > \ $ base ,
'f' = > \ $ force ,
'd' = > \ $ detach ,
't=s' = > \ $ target ,
) ;
if ( $ base and $ target ) {
foreach my $ node ( @$ nodes ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot specify both base (-b) and target (-t)" ] , $ output_handler , $ node ) ;
}
return ;
}
unless ( $ base or $ target ) {
foreach my $ node ( @$ nodes ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Must specify one of base (-b) or target (-t)" ] , $ output_handler , $ node ) ;
}
return ;
}
if ( $ target and ( scalar @ { $ nodes } != 1 ) ) {
foreach my $ node ( @$ nodes ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot specify mulitple nodes to create a master from" ] , $ output_handler , $ node ) ;
}
return ;
}
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ;
my $ newdatastores ;
my $ mastername ;
my $ url ;
2010-09-01 20:43:34 +00:00
my $ masterref ;
2010-08-31 20:53:55 +00:00
if ( $ base ) { #if base, we need to pull in the target datastores
my $ mastertab = xCAT::Table - > new ( 'vmmaster' ) ;
2010-09-03 19:11:14 +00:00
$ masterref = $ mastertab - > getAttribs ( { name = > $ base } , [ qw/storage os arch profile storagemodel nics/ ] ) ;
2010-08-31 20:53:55 +00:00
unless ( $ masterref ) {
foreach my $ node ( @$ nodes ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot find master $base in vmmaster table" ] , $ output_handler , $ node ) ;
}
return ;
}
2010-09-01 20:50:34 +00:00
$ newdatastores - > { $ masterref - > { storage } } = [] ; #make sure that the master datastore is mounted...
2010-08-31 20:53:55 +00:00
foreach ( @$ nodes ) {
my $ url ;
if ( $ tablecfg { vm } - > { $ _ } - > [ 0 ] - > { storage } ) {
$ url = $ tablecfg { vm } - > { $ _ } - > [ 0 ] - > { storage } ;
} else {
$ url = $ masterref - > { storage } ;
}
unless ( $ url ) { die "Shouldn't be possible" ; }
if ( ref $ newdatastores - > { $ _ } ) {
push @ { $ newdatastores - > { $ url } } , $ _ ;
} else {
$ newdatastores - > { $ url } = [ $ _ ] ;
}
}
} elsif ( $ target ) {
$ url = $ target ;
$ url =~ s!/([^/]*)\z!! ;
$ mastername = $ 1 ;
$ newdatastores - > { $ url } = [ $ nodes - > [ 0 ] ] ;
}
unless ( validate_datastore_prereqs ( $ nodes , $ hyp , $ newdatastores ) ) {
return ;
}
$ hyphash { $ hyp } - > { vmfolder } = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'vmFolder' ] ) - > vmFolder ) ;
if ( $ target ) {
return promote_vm_to_master ( node = > $ nodes - > [ 0 ] , target = > $ target , force = > $ force , detach = > $ detach , hyp = > $ hyp , url = > $ url , mastername = > $ mastername ) ;
} elsif ( $ base ) {
2010-09-03 20:08:41 +00:00
return clone_vms_from_master ( nodes = > $ nodes , base = > $ base , detach = > $ detach , hyp = > $ hyp , mastername = > $ base , masterent = > $ masterref ) ;
2010-08-31 20:53:55 +00:00
}
}
2010-09-01 20:43:34 +00:00
sub clone_vms_from_master {
my % args = @ _ ;
my $ mastername = $ args { mastername } ;
2010-09-01 20:50:34 +00:00
my $ hyp = $ args { hyp } ;
2010-09-03 20:08:41 +00:00
my $ regex = qr/^$mastername\z/ ;
2010-09-01 20:43:34 +00:00
my @ nodes = @ { $ args { nodes } } ;
my $ node ;
my $ masterviews = $ hyphash { $ hyp } - > { conn } - > find_entity_views ( view_type = > 'VirtualMachine' , filter = > { 'config.name' = > $ regex } ) ;
if ( scalar ( @$ masterviews ) != 1 ) {
foreach $ node ( @ nodes ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to find master $mastername in VMWare infrastructure" ] , $ output_handler , $ node ) ;
}
return ;
}
my $ masterview = $ masterviews - > [ 0 ] ;
my $ masterent = $ args { masterent } ;
foreach $ node ( @ nodes ) {
2010-09-01 20:50:34 +00:00
my $ destination = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storage } ;
2010-09-01 20:43:34 +00:00
my $ nodetypeent ;
my $ vment ;
foreach ( qw/os arch profile/ ) {
$ nodetypeent - > { $ _ } = $ masterent - > { $ _ } ;
}
foreach ( qw/storagemodel nics/ ) {
$ vment - > { $ _ } = $ masterent - > { $ _ } ;
}
$ vment - > { master } = $ args { mastername } ;
unless ( $ destination ) {
$ destination = $ masterent - > { storage } ;
$ vment - > { storage } = $ destination ;
}
2010-09-16 19:28:25 +00:00
unless ( defined $ hyphash { $ hyp } - > { pool } ) {
$ hyphash { $ hyp } - > { pool } = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hyphash { $ hyp } - > { hostview } - > parent , properties = > [ 'resourcePool' ] ) - > resourcePool ;
}
2010-09-01 20:43:34 +00:00
my $ relocatespec = VirtualMachineRelocateSpec - > new (
datastore = > $ hyphash { $ hyp } - > { datastorerefmap } - > { $ destination } ,
2010-09-16 19:28:25 +00:00
#diskMoveType=>"createNewChildDiskBacking", #fyi, requires a snapshot, which isn't compatible with templates, moveChildMostDiskBacking would potentially be fine, but either way is ha incopmatible and limited to 8, arbitrary limitations hard to work around...
pool = > $ hyphash { $ hyp } - > { pool } ,
2010-09-01 20:43:34 +00:00
) ;
my $ clonespec = VirtualMachineCloneSpec - > new (
location = > $ relocatespec ,
template = > 0 ,
powerOn = > 0
) ;
2010-09-01 20:50:34 +00:00
my $ task = $ masterview - > CloneVM_Task ( folder = > $ hyphash { $ hyp } - > { vmfolder } , name = > $ node , spec = > $ clonespec ) ;
2010-09-01 20:43:34 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > 'Successfully cloned from ' . $ args { mastername } , mastername = > $ args { mastername } , nodetypeent = > $ nodetypeent , vment = > $ vment } ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & clone_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ; #$hyp_conns->{$hyp};
}
}
sub clone_task_callback {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
xCAT::SvrUtils:: sendmsg ( $ intent , $ output_handler , $ node ) ;
my $ nodetype = xCAT::Table - > new ( 'nodetype' , - create = > 1 ) ;
my $ vm = xCAT::Table - > new ( 'vm' , - create = > 1 ) ;
2010-09-01 20:50:34 +00:00
$ vm - > setAttribs ( { node = > $ node } , $ parms - > { vment } ) ;
$ nodetype - > setAttribs ( { node = > $ node } , $ parms - > { nodetypeent } ) ;
2010-09-01 20:43:34 +00:00
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
2010-08-31 20:53:55 +00:00
sub promote_vm_to_master {
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
my $ regex = qr/^$node(\z|\.)/ ;
my $ nodeviews = $ hyphash { $ hyp } - > { conn } - > find_entity_views ( view_type = > 'VirtualMachine' , filter = > { 'config.name' = > $ regex } ) ;
if ( scalar ( @$ nodeviews ) != 1 ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot find $node in VMWare infrastructure" ] , $ output_handler , $ node ) ;
return ;
}
my $ nodeview = shift @$ nodeviews ;
print Dumper ( $ hyphash { $ hyp } - > { datastorerefmap } ) ;
my $ relocatespec = VirtualMachineRelocateSpec - > new (
datastore = > $ hyphash { $ hyp } - > { datastorerefmap } - > { $ args { url } } ,
) ;
my $ clonespec = VirtualMachineCloneSpec - > new (
location = > $ relocatespec ,
template = > 1 ,
powerOn = > 0
) ;
my $ task = $ nodeview - > CloneVM_Task ( folder = > $ hyphash { $ hyp } - > { vmfolder } , name = > $ args { mastername } , spec = > $ clonespec ) ;
2010-09-01 19:43:09 +00:00
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > 'Successfully copied to ' . $ args { mastername } , mastername = > $ args { mastername } , url = > $ args { url } } ;
2010-08-31 20:53:55 +00:00
$ running_tasks { $ task } - > { task } = $ task ;
2010-09-01 19:43:09 +00:00
$ running_tasks { $ task } - > { callback } = \ & promote_task_callback ;
2010-08-31 20:53:55 +00:00
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ; #$hyp_conns->{$hyp};
}
2010-09-01 19:43:09 +00:00
sub promote_task_callback {
my $ task = shift ;
my $ parms = shift ;
my $ state = $ task - > info - > state - > val ;
my $ node = $ parms - > { node } ;
my $ intent = $ parms - > { successtext } ;
if ( $ state eq 'success' ) {
xCAT::SvrUtils:: sendmsg ( $ intent , $ output_handler , $ node ) ;
my $ mastertabentry = {
originator = > $ requester ,
2010-09-03 18:51:19 +00:00
vintage = > scalar ( localtime ) ,
2010-09-01 19:43:09 +00:00
storage = > $ parms - > { url } ,
} ;
foreach ( qw/os arch profile/ ) {
if ( defined ( $ tablecfg { nodetype } - > { $ node } - > [ 0 ] - > { $ _ } ) ) {
$ mastertabentry - > { $ _ } = $ tablecfg { nodetype } - > { $ node } - > [ 0 ] - > { $ _ } ;
}
}
foreach ( qw/storagemodel nics/ ) {
if ( defined ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { $ _ } ) ) {
$ mastertabentry - > { $ _ } = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { $ _ } ;
}
}
my $ vmmastertab = xCAT::Table - > new ( 'vmmaster' , - create = > 1 ) ;
my $ date = scalar ( localtime ) ;
2010-09-01 20:50:34 +00:00
$ vmmastertab - > setAttribs ( { name = > $ parms - > { mastername } } , $ mastertabentry ) ;
2010-09-01 19:43:09 +00:00
} elsif ( $ state eq 'error' ) {
relay_vmware_err ( $ task , "" , $ node ) ;
}
}
2009-06-22 16:00:28 +00:00
sub mkvms {
my % args = @ _ ;
my $ nodes = $ args { nodes } ;
my $ hyp = $ args { hyp } ;
@ ARGV = @ { $ args { exargs } } ; #for getoptions;
my $ disksize ;
require Getopt::Long ;
GetOptions (
'size|s=s' = > \ $ disksize
) ;
my $ node ;
2009-07-14 20:50:13 +00:00
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager']);
2009-06-22 16:00:28 +00:00
unless ( validate_datastore_prereqs ( $ nodes , $ hyp ) ) {
return ;
}
$ hyphash { $ hyp } - > { vmfolder } = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'vmFolder' ] ) - > vmFolder ) ;
$ hyphash { $ hyp } - > { pool } = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hyphash { $ hyp } - > { hostview } - > parent , properties = > [ 'resourcePool' ] ) - > resourcePool ;
my $ cfg ;
foreach $ node ( @$ nodes ) {
2010-07-08 15:19:18 +00:00
process_tasks ; #check for tasks needing followup actions before the task is forgotten (VMWare's memory is fairly short at times
2009-06-22 16:00:28 +00:00
if ( $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > "VirtualMachine" , filter = > { name = > $ node } ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Virtual Machine already exists" ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
next ;
} else {
register_vm ( $ hyp , $ node , $ disksize ) ;
}
}
my @ dhcpnodes ;
foreach ( keys % { $ tablecfg { dhcpneeded } } ) {
push @ dhcpnodes , $ _ ;
delete $ tablecfg { dhcpneeded } - > { $ _ } ;
}
$ executerequest - > ( { command = > [ 'makedhcp' ] , node = > \ @ dhcpnodes } ) ;
}
sub setboot {
my % args = @ _ ;
my $ node = $ args { node } ;
my $ hyp = $ args { hyp } ;
if ( not defined $ args { vmview } ) { #attempt one refresh
$ args { vmview } = $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'VirtualMachine' , properties = > [ 'config.name' ] , filter = > { name = > $ node } ) ;
2010-07-29 20:03:05 +00:00
if ( not defined $ args { vmview } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM does not appear to exist" ] , $ output_handler , $ node ) ;
2010-07-29 20:03:05 +00:00
return ;
}
2009-06-22 16:00:28 +00:00
}
my $ bootorder = $ { $ args { exargs } } [ 0 ] ;
#NOTE: VMware simply does not currently seem to allow programatically changing the boot
2009-07-13 18:04:39 +00:00
#order like other virtualization solutions supported by xCAT.
2009-06-22 16:00:28 +00:00
#This doesn't behave quite like any existing mechanism:
#vm.bootorder was meant to take the place of system nvram, vmware imitates that unfortunate aspect of bare metal too well..
#rsetboot was created to describe the ipmi scenario of a transient boot device, this is persistant *except* for setup, which is not
#rbootseq was meant to be entirely persistant and ordered.
#rsetboot is picked, the usage scenario matches about as good as I could think of
my $ reconfigspec ;
if ( $ bootorder =~ /setup/ ) {
unless ( $ bootorder eq 'setup' ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "rsetboot parameter may not contain 'setup' with other items, assuming vm.bootorder is just 'setup'" ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
}
$ reconfigspec = VirtualMachineConfigSpec - > new (
bootOptions = > VirtualMachineBootOptions - > new ( enterBIOSSetup = > 1 ) ,
) ;
} else {
$ bootorder = "allow:" . $ bootorder ;
$ reconfigspec = VirtualMachineConfigSpec - > new (
bootOptions = > VirtualMachineBootOptions - > new ( enterBIOSSetup = > 0 ) ,
extraConfig = > [ OptionValue - > new ( key = > 'bios.bootDeviceClasses' , value = > $ bootorder ) ]
) ;
}
my $ task = $ args { vmview } - > ReconfigVM_Task ( spec = > $ reconfigspec ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & generic_task_callback ;
$ running_tasks { $ task } - > { hyp } = $ args { hyp } ;
$ running_tasks { $ task } - > { data } = { node = > $ node , successtext = > $ { $ args { exargs } } [ 0 ] } ;
}
sub register_vm { #Attempt to register existing instance of a VM
my $ hyp = shift ;
my $ node = shift ;
my $ disksize = shift ;
my $ blockedfun = shift ; #a pointer to a blocked function to call on success
my $ blockedargs = shift ; #hash reference to call blocked function with
2010-06-12 01:57:12 +00:00
my $ failonerr = shift ;
2009-06-22 16:00:28 +00:00
my $ task ;
validate_network_prereqs ( [ keys % { $ hyphash { $ hyp } - > { nodes } } ] , $ hyp ) ;
unless ( defined $ hyphash { $ hyp } - > { datastoremap } or validate_datastore_prereqs ( [ keys % { $ hyphash { $ hyp } - > { nodes } } ] , $ hyp ) ) {
die "unexpected condition" ;
}
unless ( defined $ hyphash { $ hyp } - > { vmfolder } ) {
$ hyphash { $ hyp } - > { vmfolder } = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hyphash { $ hyp } - > { conn } - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'vmFolder' ] ) - > vmFolder ) ;
}
unless ( defined $ hyphash { $ hyp } - > { pool } ) {
$ hyphash { $ hyp } - > { pool } = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hyphash { $ hyp } - > { hostview } - > parent , properties = > [ 'resourcePool' ] ) - > resourcePool ;
}
2010-07-09 06:24:18 +00:00
# Try to add an existing VM to the machine folder
2009-06-22 16:00:28 +00:00
my $ success = eval {
$ task = $ hyphash { $ hyp } - > { vmfolder } - > RegisterVM_Task ( path = > getcfgdatastore ( $ node , $ hyphash { $ hyp } - > { datastoremap } ) . " /$node/$node.vmx" , name = > $ node , pool = > $ hyphash { $ hyp } - > { pool } , asTemplate = > 0 ) ;
} ;
2010-07-09 06:24:18 +00:00
# if we couldn't add it then it means it wasn't created yet. So we create it.
2009-06-22 16:00:28 +00:00
if ( $@ or not $ success ) {
2010-07-09 06:24:18 +00:00
#if (ref($@) eq 'SoapFault') {
# if (ref($@->detail) eq 'NotFound') {
2009-06-22 16:00:28 +00:00
register_vm_callback ( undef , {
node = > $ node ,
disksize = > $ disksize ,
blockedfun = > $ blockedfun ,
blockedargs = > $ blockedargs ,
2010-06-12 01:57:12 +00:00
errregister = > $ failonerr ,
2009-06-22 16:00:28 +00:00
hyp = > $ hyp
} ) ;
}
if ( $ task ) {
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & register_vm_callback ;
$ running_tasks { $ task } - > { hyp } = $ hyp ;
$ running_tasks { $ task } - > { data } = {
node = > $ node ,
disksize = > $ disksize ,
blockedfun = > $ blockedfun ,
blockedargs = > $ blockedargs ,
2010-06-12 01:57:12 +00:00
errregister = > $ failonerr ,
2009-06-22 16:00:28 +00:00
hyp = > $ hyp
} ;
}
}
sub register_vm_callback {
my $ task = shift ;
my $ args = shift ;
if ( not $ task or $ task - > info - > state - > val eq 'error' ) { #TODO: fail for 'rpower' flow, mkvm is too invasive in VMWare to be induced by 'rpower on'
if ( not defined $ args - > { blockedfun } ) {
mknewvm ( $ args - > { node } , $ args - > { disksize } , $ args - > { hyp } ) ;
2010-06-12 01:57:12 +00:00
} elsif ( $ args - > { errregister } ) {
relay_vmware_err ( $ task , "" , $ args - > { node } ) ;
2009-06-22 16:00:28 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "mkvm must be called before use of this function" ] , $ output_handler , $ args - > { node } ) ;
2009-06-22 16:00:28 +00:00
}
} elsif ( defined $ args - > { blockedfun } ) { #If there is a blocked function, call it here)
$ args - > { blockedfun } - > ( % { $ args - > { blockedargs } } ) ;
}
}
2010-07-09 06:24:18 +00:00
sub getURI {
my $ method = shift ;
my $ location = shift ;
my $ uri = '' ;
if ( $ method =~ /nfs/ ) {
( my $ server , my $ path ) = split /\// , $ location , 2 ;
$ server =~ s/:$// ; #tolerate habitual colons
my $ servern = inet_aton ( $ server ) ;
unless ( $ servern ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "could not resolve '$server' to an address from vm.storage/vm.cfgstore" ] , $ output_handler ) ;
2010-07-09 06:24:18 +00:00
}
$ server = inet_ntoa ( $ servern ) ;
$ uri = "nfs://$server/$path" ;
} elsif ( $ method =~ /vmfs/ ) {
( my $ name , undef ) = split /\// , $ location , 2 ;
$ name =~ s/:$// ; #remove a : if someone put it in for some reason.
$ uri = "vmfs://$name" ;
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unsupported VMware Storage Method: $method. Please use 'vmfs or nfs'" ] , $ output_handler ) ;
2010-07-09 06:24:18 +00:00
}
return $ uri ;
}
2009-06-22 16:00:28 +00:00
sub getcfgdatastore {
my $ node = shift ;
my $ dses = shift ;
2009-07-10 19:08:06 +00:00
my $ cfgdatastore = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cfgstore } ;
unless ( $ cfgdatastore ) {
$ cfgdatastore = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storage } ;
#TODO: if multiple drives are specified, make sure to split this out
2009-09-28 18:25:12 +00:00
#DONE: I believe the regex after this conditional takes care of that case already..
2009-07-10 19:08:06 +00:00
}
2010-03-24 12:08:21 +00:00
$ cfgdatastore =~ s/=.*// ;
2010-01-26 15:44:00 +00:00
( my $ method , my $ location ) = split /:\/\// , $ cfgdatastore , 2 ;
2010-07-09 06:24:18 +00:00
my $ uri = getURI ( $ method , $ location ) ;
2010-01-26 15:44:00 +00:00
$ cfgdatastore = "[" . $ dses - > { $ uri } . "]" ;
#$cfgdatastore =~ s/,.*$//; #these two lines of code were kinda pointless
#$cfgdatastore =~ s/\/$//;
2009-06-22 16:00:28 +00:00
return $ cfgdatastore ;
}
sub mknewvm {
my $ node = shift ;
my $ disksize = shift ;
my $ hyp = shift ;
#TODO: above
2009-07-17 14:18:25 +00:00
my $ cfg = build_cfgspec ( $ node , $ hyphash { $ hyp } - > { datastoremap } , $ hyphash { $ hyp } - > { nets } , $ disksize , $ hyp ) ;
2010-01-08 18:20:38 +00:00
my $ task = $ hyphash { $ hyp } - > { vmfolder } - > CreateVM_Task ( config = > $ cfg , pool = > $ hyphash { $ hyp } - > { pool } , host = > $ hyphash { $ hyp } - > { hostview } ) ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & mkvm_callback ;
$ running_tasks { $ task } - > { hyp } = $ hyp ;
2010-06-23 14:50:46 +00:00
$ running_tasks { $ task } - > { data } = { hyp = > $ hyp , node = > $ node } ;
2009-06-22 16:00:28 +00:00
}
sub getUnits {
my $ amount = shift ;
my $ defunit = shift ;
my $ divisor = shift ;
unless ( $ amount ) { return ; }
unless ( $ divisor ) {
$ divisor = 1 ;
}
if ( $ amount =~ /(\D)$/ ) { #If unitless, add unit
$ defunit = $ 1 ;
chop $ amount ;
}
if ( $ defunit =~ /k/i ) {
return $ amount * 1024 / $ divisor ;
} elsif ( $ defunit =~ /m/i ) {
return $ amount * 1048576 / $ divisor ;
} elsif ( $ defunit =~ /g/i ) {
return $ amount * 1073741824 / $ divisor ;
}
}
sub getguestid {
my $ osfound = 0 ;
my $ node = shift ;
my $ nodeos = $ tablecfg { nodetype } - > { $ node } - > [ 0 ] - > { os } ;
my $ nodearch = $ tablecfg { nodetype } - > { $ node } - > [ 0 ] - > { arch } ;
foreach ( keys % guestidmap ) {
2010-07-09 06:24:18 +00:00
if ( defined ( $ nodeos ) and $ nodeos =~ /$_/ ) {
2009-06-22 16:00:28 +00:00
if ( $ nodearch eq 'x86_64' ) {
$ nodeos = $ guestidmap { $ _ } . "64Guest" ;
} else {
$ nodeos = $ guestidmap { $ _ } ;
$ nodeos =~ s/_$// ;
$ nodeos . = "Guest" ;
}
$ osfound = 1 ;
last ;
}
}
unless ( $ osfound ) {
2010-07-09 06:24:18 +00:00
if ( defined ( $ nodearch ) and $ nodearch eq 'x86_64' ) {
2009-06-22 16:00:28 +00:00
$ nodeos = "otherGuest64" ;
} else {
$ nodeos = "otherGuest" ;
}
}
return $ nodeos ;
}
sub build_cfgspec {
my $ node = shift ;
my $ dses = shift ; #map to match vm table to datastore names
my $ netmap = shift ;
my $ disksize = shift ;
2009-07-17 14:18:25 +00:00
my $ hyp = shift ;
2009-06-22 16:00:28 +00:00
my $ memory ;
my $ ncpus ;
unless ( $ memory = getUnits ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { memory } , "M" , 1048576 ) ) {
$ memory = 512 ;
}
unless ( $ ncpus = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cpus } ) {
$ ncpus = 1 ;
}
my @ devices ;
$ currkey = 0 ;
push @ devices , create_storage_devs ( $ node , $ dses , $ disksize ) ;
2009-07-17 14:18:25 +00:00
push @ devices , create_nic_devs ( $ node , $ netmap , $ hyp ) ;
2009-07-13 20:10:08 +00:00
#my $cfgdatastore = $tablecfg{vm}->{$node}->[0]->{storage}; #TODO: need a new cfglocation field in case of stateless guest?
#$cfgdatastore =~ s/,.*$//;
#$cfgdatastore =~ s/\/$//;
#$cfgdatastore = "[".$dses->{$cfgdatastore}."]";
my $ cfgdatastore = getcfgdatastore ( $ node , $ dses ) ;
2009-06-22 16:00:28 +00:00
my $ vfiles = VirtualMachineFileInfo - > new ( vmPathName = > $ cfgdatastore ) ;
#my $nodeos = $tablecfg{nodetype}->{$node}->[0]->{os};
#my $nodearch = $tablecfg{nodetype}->{$node}->[0]->{arch};
my $ nodeos = getguestid ( $ node ) ; #nodeos=>$nodeos,nodearch=>$nodearch);
2010-06-23 14:50:46 +00:00
my $ uuid ;
if ( $ tablecfg { vpd } - > { $ node } - > [ 0 ] - > { uuid } ) {
$ uuid = $ tablecfg { vpd } - > { $ node } - > [ 0 ] - > { uuid } ;
} else {
if ( $ tablecfg { mac } - > { $ node } - > [ 0 ] - > { mac } ) { #a uuidv1 is possible, generate that for absolute uniqueness guarantee
my $ mac = $ tablecfg { mac } - > { $ node } - > [ 0 ] - > { mac } ;
$ mac =~ s/\|.*// ;
$ mac =~ s/!.*// ;
$ uuid = xCAT::Utils:: genUUID ( mac = > $ mac ) ;
} else {
$ uuid = xCAT::Utils:: genUUID ( ) ;
}
2010-07-09 06:24:18 +00:00
2010-06-23 14:50:46 +00:00
my $ vpdtab = xCAT::Table - > new ( 'vpd' ) ;
2010-07-09 06:24:18 +00:00
$ vpdtab - > setNodeAttribs ( $ node , { uuid = > $ uuid } ) ;
2010-06-23 14:50:46 +00:00
}
2009-06-22 16:00:28 +00:00
return VirtualMachineConfigSpec - > new (
name = > $ node ,
files = > $ vfiles ,
guestId = > $ nodeos ,
memoryMB = > $ memory ,
numCPUs = > $ ncpus ,
deviceChange = > \ @ devices ,
2010-06-23 14:50:46 +00:00
uuid = > $ uuid ,
2009-06-22 16:00:28 +00:00
) ;
}
sub create_nic_devs {
my $ node = shift ;
my $ netmap = shift ;
2009-07-17 14:18:25 +00:00
my $ hyp = shift ;
2009-06-22 16:00:28 +00:00
my @ networks = split /,/ , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { nics } ;
my @ devs ;
my $ idx = 0 ;
2009-08-19 15:14:38 +00:00
my @ macs = xCAT::VMCommon:: getMacAddresses ( \ % tablecfg , $ node , scalar @ networks ) ;
2009-06-22 16:00:28 +00:00
my $ connprefs = VirtualDeviceConnectInfo - > new (
allowGuestControl = > 1 ,
connected = > 0 ,
startConnected = > 1
) ;
2010-09-03 18:54:30 +00:00
my $ model = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { nicmodel } ;
2010-09-01 18:12:41 +00:00
unless ( $ model ) {
$ model = 'e1000' ;
}
2009-06-22 16:00:28 +00:00
foreach ( @ networks ) {
2009-07-17 14:18:25 +00:00
my $ pgname = $ hyphash { $ hyp } - > { pgnames } - > { $ _ } ;
2009-06-22 16:00:28 +00:00
s/.*:// ;
2010-09-01 18:12:41 +00:00
s/=(.*)$// ;
my $ tmpmodel = $ model ;
if ( $ 1 ) { $ tmpmodel = $ 1 ; }
2009-06-22 16:00:28 +00:00
my $ netname = $ _ ;
2009-09-18 19:53:48 +00:00
#print Dumper($netmap);
2009-06-22 16:00:28 +00:00
my $ backing = VirtualEthernetCardNetworkBackingInfo - > new (
2009-07-17 14:18:25 +00:00
network = > $ netmap - > { $ pgname } ,
deviceName = > $ pgname ,
2009-06-22 16:00:28 +00:00
) ;
2010-09-01 18:12:41 +00:00
my % newcardargs = (
2009-06-22 16:00:28 +00:00
key = > 0 , #3, #$currkey++,
backing = > $ backing ,
addressType = > "manual" ,
macAddress = > shift @ macs ,
connectable = > $ connprefs ,
wakeOnLanEnabled = > 1 , #TODO: configurable in tables?
) ;
2010-09-01 18:12:41 +00:00
my $ newcard ;
if ( $ tmpmodel eq 'e1000' ) {
$ newcard = VirtualE1000 - > new ( % newcardargs ) ;
} elsif ( $ tmpmodel eq 'vmxnet3' ) {
$ newcard = VirtualVmxnet3 - > new ( % newcardargs ) ;
} elsif ( $ tmpmodel eq 'pcnet32' ) {
$ newcard = VirtualPCNet32 - > new ( % newcardargs ) ;
} elsif ( $ tmpmodel eq 'vmxnet2' ) {
$ newcard = VirtualVmxnet2 - > new ( % newcardargs ) ;
} elsif ( $ tmpmodel eq 'vmxnet' ) {
$ newcard = VirtualVmxnet - > new ( % newcardargs ) ;
} else {
xCAT::SvrUtils:: sendmsg ( [ 1 , "$tmpmodel not a recognized nic type, falling back to e1000 (vmxnet3, e1000, pcnet32, vmxnet2, vmxnet are recognized" ] , $ output_handler , $ node ) ;
$ newcard = VirtualE1000 - > new ( % newcardargs ) ;
}
2009-06-22 16:00:28 +00:00
push @ devs , VirtualDeviceConfigSpec - > new ( device = > $ newcard ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'add' ) ) ;
$ idx + + ;
}
return @ devs ;
die "Stop running for test" ;
}
sub create_storage_devs {
my $ node = shift ;
my $ sdmap = shift ;
2009-09-28 17:59:43 +00:00
my $ sizes = shift ;
my @ sizes = split /[,:]/ , $ sizes ;
2010-06-07 21:24:22 +00:00
my $ existingScsiCont = shift ;
my $ scsiUnit = shift ;
my $ existingIdeCont = shift ;
my $ ideUnit = shift ;
2010-06-17 21:10:39 +00:00
my $ devices = shift ;
2010-08-26 19:23:25 +00:00
my % args = @ _ ;
2009-06-22 16:00:28 +00:00
my $ scsicontrollerkey = 0 ;
my $ idecontrollerkey = 200 ; #IDE 'controllers' exist at 200 and 201 invariably, with no flexibility?
#Cannot find documentation that declares this absolute, but attempts to do otherwise
#lead in failure, also of note, these are single-channel controllers, so two devs per controller
my $ backingif ;
my @ devs ;
my $ havescsidevs = 0 ;
my $ disktype = 'ide' ;
2010-06-07 21:24:22 +00:00
my $ ideunitnum = 0 ;
my $ scsiunitnum = 0 ;
2010-06-08 13:22:33 +00:00
my $ havescsicontroller = 0 ;
2010-06-17 21:10:39 +00:00
my % usedideunits ;
my % usedscsiunits = ( 7 = > 1 , '7' = > 1 ) ;
2010-06-07 21:24:22 +00:00
if ( defined $ existingScsiCont ) {
2010-06-08 13:22:33 +00:00
$ havescsicontroller = 1 ;
2010-06-07 21:24:22 +00:00
$ scsicontrollerkey = $ existingScsiCont - > { key } ;
$ scsiunitnum = $ scsiUnit ;
2010-06-17 21:10:39 +00:00
% usedscsiunits = % { getUsedUnits ( $ scsicontrollerkey , $ devices ) } ;
2010-06-07 21:24:22 +00:00
}
if ( defined $ existingIdeCont ) {
$ idecontrollerkey = $ existingIdeCont - > { key } ;
$ ideunitnum = $ ideUnit ;
2010-06-17 21:10:39 +00:00
% usedideunits = % { getUsedUnits ( $ idecontrollerkey , $ devices ) } ;
2010-06-07 21:24:22 +00:00
}
my $ unitnum ;
2009-06-22 16:00:28 +00:00
my % disktocont ;
my $ dev ;
2009-09-28 18:25:12 +00:00
my @ storelocs = split /,/ , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storage } ;
2010-08-25 15:42:36 +00:00
my $ globaldisktype = $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storagemodel } ;
unless ( $ globaldisktype ) { $ globaldisktype = 'ide' ; }
2009-09-28 18:25:12 +00:00
#number of devices is the larger of the specified sizes (TODO: masters) or storage pools to span
my $ numdevs = ( scalar @ storelocs > scalar @ sizes ? scalar @ storelocs : scalar @ sizes ) ;
while ( $ numdevs - - > 0 ) {
my $ storeloc = shift @ storelocs ;
unless ( scalar @ storelocs ) { @ storelocs = ( $ storeloc ) ; } #allow reuse of one cfg specified pool for multiple devs
2009-09-28 17:59:43 +00:00
my $ disksize = shift @ sizes ;
unless ( scalar @ sizes ) { @ sizes = ( $ disksize ) ; } #if we emptied the array, stick the last entry back on to allow it to specify all remaining disks
$ disksize = getUnits ( $ disksize , 'G' , 1024 ) ;
2010-08-25 15:42:36 +00:00
$ disktype = $ globaldisktype ;
2010-03-19 17:19:43 +00:00
if ( $ storeloc =~ /=/ ) {
( $ storeloc , $ disktype ) = split /=/ , $ storeloc ;
}
2010-08-26 19:23:25 +00:00
if ( $ disktype eq 'ide' and $ args { idefull } ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM is at capacity for IDE devices, a drive was not added" ] , $ output_handler , $ node ) ;
return ;
} elsif ( $ disktype eq 'scsi' and $ args { scsifull } ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "SCSI Controller at capacity, a drive was not added" ] , $ output_handler , $ node ) ;
return ;
}
2009-09-28 18:25:12 +00:00
$ storeloc =~ s/\/$// ;
2010-02-01 14:11:02 +00:00
( my $ method , my $ location ) = split /:\/\// , $ storeloc , 2 ;
2010-07-09 06:24:18 +00:00
my $ uri = getURI ( $ method , $ location ) ;
#(my $server,my $path) = split/\//,$location,2;
#$server =~ s/:$//; #tolerate habitual colons
#my $servern = inet_aton($server);
#unless ($servern) {
2010-08-06 15:29:07 +00:00
# xCAT::SvrUtils::sendmsg([1,"could not resolve '$server' to an address from vm.storage"]);
2010-07-09 06:24:18 +00:00
# return;
#}
#$server = inet_ntoa($servern);
#my $uri = "nfs://$server/$path";
2009-06-22 16:00:28 +00:00
$ backingif = VirtualDiskFlatVer2BackingInfo - > new ( diskMode = > 'persistent' ,
2010-07-09 19:46:21 +00:00
thinProvisioned = > 1 ,
2010-01-26 15:44:00 +00:00
fileName = > "[" . $ sdmap - > { $ uri } . "]" ) ;
2010-08-26 19:23:25 +00:00
if ( $ disktype eq 'ide' and $ idecontrollerkey == 1 and $ ideunitnum == 0 ) { #reserve a spot for CD
2010-06-07 21:24:22 +00:00
$ ideunitnum = 1 ;
2010-08-26 19:23:25 +00:00
} elsif ( $ disktype eq 'ide' and $ ideunitnum == 2 ) { #go from current to next ide 'controller'
2009-06-22 16:00:28 +00:00
$ idecontrollerkey + + ;
2010-06-07 21:24:22 +00:00
$ ideunitnum = 0 ;
2009-06-22 16:00:28 +00:00
}
2010-03-19 17:19:43 +00:00
unless ( $ disktype eq 'ide' ) {
push @ { $ disktocont { $ scsicontrollerkey } } , $ currkey ;
}
2009-06-22 16:00:28 +00:00
my $ controllerkey ;
if ( $ disktype eq 'ide' ) {
$ controllerkey = $ idecontrollerkey ;
2010-06-17 21:10:39 +00:00
$ unitnum = 0 ;
while ( $ usedideunits { $ unitnum } ) {
$ unitnum + + ;
}
$ usedideunits { $ unitnum } = 1 ;
2009-06-22 16:00:28 +00:00
} else {
$ controllerkey = $ scsicontrollerkey ;
2010-06-17 21:10:39 +00:00
$ unitnum = 0 ;
while ( $ usedscsiunits { $ unitnum } ) {
$ unitnum + + ;
}
$ usedscsiunits { $ unitnum } = 1 ;
2010-03-19 17:19:43 +00:00
$ havescsidevs = 1 ;
2009-06-22 16:00:28 +00:00
}
$ dev = VirtualDisk - > new ( backing = > $ backingif ,
2010-03-19 17:19:43 +00:00
controllerKey = > $ controllerkey ,
2009-06-22 16:00:28 +00:00
key = > $ currkey + + ,
2010-06-07 21:24:22 +00:00
unitNumber = > $ unitnum ,
2009-09-28 17:59:43 +00:00
capacityInKB = > $ disksize ) ;
2009-06-22 16:00:28 +00:00
push @ devs , VirtualDeviceConfigSpec - > new ( device = > $ dev ,
fileOperation = > VirtualDeviceConfigSpecFileOperation - > new ( 'create' ) ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'add' ) ) ;
}
2009-09-28 18:25:12 +00:00
2009-06-22 16:00:28 +00:00
#It *seems* that IDE controllers are not subject to require creation, so we skip it
2010-06-08 13:22:33 +00:00
if ( $ havescsidevs and not $ havescsicontroller ) { #need controllers to attach the disks to
foreach ( 0 .. $ scsicontrollerkey ) {
$ dev = VirtualLsiLogicController - > new ( key = > $ _ ,
device = > \ @ { $ disktocont { $ _ } } ,
sharedBus = > VirtualSCSISharing - > new ( 'noSharing' ) ,
busNumber = > $ _ ) ;
push @ devs , VirtualDeviceConfigSpec - > new ( device = > $ dev ,
operation = > VirtualDeviceConfigSpecOperation - > new ( 'add' ) ) ;
}
}
2009-06-22 16:00:28 +00:00
return @ devs ;
# my $ctlr = VirtualIDEController->new(
}
2009-10-01 14:35:41 +00:00
sub declare_ready {
my % args = % { shift ( ) } ;
$ hypready { $ args { hyp } } = 1 ;
}
2010-08-09 18:43:26 +00:00
sub populate_vcenter_hostviews {
my $ vcenter = shift ;
my @ hypervisors ;
my % nametohypmap ;
my $ iterations = 1 ;
if ( $ usehostnamesforvcenter and $ usehostnamesforvcenter !~ /no/i ) {
$ iterations = 2 ; #two passes possible
my $ hyp ;
foreach $ hyp ( keys % { $ vcenterhash { $ vcenter } - > { allhyps } } ) {
if ( $ tablecfg { hosts } - > { $ hyp } - > [ 0 ] - > { hostnames } ) {
$ nametohypmap { $ tablecfg { hosts } - > { $ hyp } - > [ 0 ] - > { hostnames } } = $ hyp ;
}
}
@ hypervisors = keys % nametohypmap ;
} else {
@ hypervisors = keys % { $ vcenterhash { $ vcenter } - > { allhyps } } ;
}
while ( $ iterations and scalar ( @ hypervisors ) ) {
my $ hosts = join ( ")|(" , @ hypervisors ) ;
$ hosts = '^((' . $ hosts . '))(\z|\.)' ;
my $ search = qr/$hosts/ o ;
my @ hypviews = @ { $ vcenterhash { $ vcenter } - > { conn } - > find_entity_views ( view_type = > 'HostSystem' , properties = > [ 'summary.config.name' , 'summary.runtime.connectionState' , 'runtime.inMaintenanceMode' , 'parent' , 'configManager' ] , filter = > { 'summary.config.name' = > $ search } ) } ;
foreach ( @ hypviews ) {
my $ hypname = $ _ - > { 'summary.config.name' } ;
if ( $ vcenterhash { $ vcenter } - > { allhyps } - > { $ hypname } ) { #simplest case, config.name is exactly the same as node name
$ vcenterhash { $ vcenter } - > { hostviews } - > { $ hypname } = $ _ ;
} elsif ( $ nametohypmap { $ hypname } ) { #second case, there is a name mapping this to a real name
$ vcenterhash { $ vcenter } - > { hostviews } - > { $ nametohypmap { $ hypname } } = $ _ ;
} else { #name as-is doesn't work, start stripping domain and hope for the best
$ hypname =~ s/\..*// ;
if ( $ vcenterhash { $ vcenter } - > { allhyps } - > { $ hypname } ) { #shortname is a node
$ vcenterhash { $ vcenter } - > { hostviews } - > { $ hypname } = $ _ ;
} elsif ( $ nametohypmap { $ hypname } ) { #alias for node
$ vcenterhash { $ vcenter } - > { hostviews } - > { $ nametohypmap { $ hypname } } = $ _ ;
}
}
}
$ iterations - - ;
@ hypervisors = ( ) ;
if ( $ usehostnamesforvcenter and $ usehostnamesforvcenter !~ /no/i ) { #check for hypervisors by native node name if missed above
foreach my $ hyp ( keys % { $ vcenterhash { $ vcenter } - > { allhyps } } ) {
unless ( $ vcenterhash { $ vcenter } - > { hostviews } - > { $ hyp } ) {
push @ hypervisors , $ hyp ;
}
}
}
}
}
2009-06-22 16:00:28 +00:00
sub validate_vcenter_prereqs { #Communicate with vCenter and ensure this host is added correctly to a vCenter instance when an operation requires it
my $ hyp = shift ;
my $ depfun = shift ;
my $ depargs = shift ;
my $ vcenter = $ hyphash { $ hyp } - > { vcenter } - > { name } ;
unless ( $ hyphash { $ hyp } - > { vcenter } - > { conn } ) {
2010-01-14 20:32:51 +00:00
eval {
$ hyphash { $ hyp } - > { vcenter } - > { conn } = Vim - > new ( service_url = > "https://$vcenter/sdk" ) ;
$ hyphash { $ hyp } - > { vcenter } - > { conn } - > login ( user_name = > $ hyphash { $ hyp } - > { vcenter } - > { username } , password = > $ hyphash { $ hyp } - > { vcenter } - > { password } ) ;
} ;
if ( $@ ) {
$ hyphash { $ hyp } - > { vcenter } - > { conn } = undef ;
}
2009-06-22 16:00:28 +00:00
}
unless ( $ hyphash { $ hyp } - > { vcenter } - > { conn } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Unable to reach vCenter server managing $hyp" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return undef ;
}
my $ foundhyp ;
2009-12-09 21:05:35 +00:00
my $ name = $ hyp ;
if ( $ usehostnamesforvcenter and $ usehostnamesforvcenter !~ /no/i ) {
if ( $ tablecfg { hosts } - > { $ hyp } - > [ 0 ] - > { hostnames } ) {
$ name = $ tablecfg { hosts } - > { $ hyp } - > [ 0 ] - > { hostnames } ;
}
}
2009-06-22 16:00:28 +00:00
my $ connspec = HostConnectSpec - > new (
2009-12-09 21:05:35 +00:00
hostName = > $ name ,
2009-06-22 16:00:28 +00:00
password = > $ hyphash { $ hyp } - > { password } ,
userName = > $ hyphash { $ hyp } - > { username } ,
force = > 1 ,
) ;
2010-07-07 19:58:25 +00:00
my $ hview ;
2010-08-09 18:43:26 +00:00
$ hview = $ vcenterhash { $ vcenter } - > { hostviews } - > { $ hyp } ;
2010-07-07 19:58:25 +00:00
if ( $ hview ) {
if ( $ hview - > { 'summary.config.name' } =~ /^$hyp(?:\.|\z)/ or $ hview - > { 'summary.config.name' } =~ /^$name(?:\.|\z)/ ) { #Looks good, call the dependent function after declaring the state of vcenter to hypervisor as good
if ( $ hview - > { 'summary.runtime.connectionState' } - > val eq 'connected' ) {
enable_vmotion ( hypname = > $ hyp , hostview = > $ hview , conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } ) ;
2010-08-09 15:12:05 +00:00
$ vcenterhash { $ vcenter } - > { goodhyps } - > { $ hyp } = 1 ;
2009-06-22 16:00:28 +00:00
$ depfun - > ( $ depargs ) ;
2010-07-07 19:58:25 +00:00
if ( $ hview - > parent - > type eq 'ClusterComputeResource' ) { #if it is in a cluster, we can directly remove it
$ hyphash { $ hyp } - > { deletionref } = $ hview - > { mo_ref } ;
} elsif ( $ hview - > parent - > type eq 'ComputeResource' ) { #For some reason, we must delete the container instead
$ hyphash { $ hyp } - > { deletionref } = $ hview - > { parent } ; #save off a reference to delete hostview off just in case
2010-06-14 17:40:06 +00:00
}
2009-06-22 16:00:28 +00:00
return 1 ;
} else {
2009-10-01 20:54:11 +00:00
my $ ref_to_delete ;
2010-07-07 19:58:25 +00:00
if ( $ hview - > parent - > type eq 'ClusterComputeResource' ) { #We are allowed to specifically kill a host in a cluster
$ ref_to_delete = $ hview - > { mo_ref } ;
} elsif ( $ hview - > parent - > type eq 'ComputeResource' ) { #For some reason, we must delete the container instead
$ ref_to_delete = $ hview - > { parent } ;
2009-10-01 20:54:11 +00:00
}
my $ task = $ hyphash { $ hyp } - > { vcenter } - > { conn } - > get_view ( mo_ref = > $ ref_to_delete ) - > Destroy_Task ( ) ;
2009-06-22 16:00:28 +00:00
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & addhosttovcenter ;
$ running_tasks { $ task } - > { conn } = $ hyphash { $ hyp } - > { vcenter } - > { conn } ;
2010-07-07 19:58:25 +00:00
$ running_tasks { $ task } - > { data } = { depfun = > $ depfun , depargs = > $ depargs , conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } , connspec = > $ connspec , hostview = > $ hview , hypname = > $ hyp , vcenter = > $ vcenter } ;
2009-06-22 16:00:28 +00:00
return undef ;
#The rest would be shorter/ideal, but seems to be confused a lot by stateless
#Maybe in a future VMWare technology level the following would work better
#than it does today
2010-07-07 19:58:25 +00:00
# my $task = $hview_->ReconnectHost_Task(cnxSpec=>$connspec);
# my $task = $hview->DisconnectHost_Task();
2009-06-22 16:00:28 +00:00
# $running_tasks{$task}->{task} = $task;
# $running_tasks{$task}->{callback} = \&disconnecthost_callback;
# $running_tasks{$task}->{conn} = $hyphash{$hyp}->{vcenter}->{conn};
2010-07-07 19:58:25 +00:00
# $running_tasks{$task}->{data} = { depfun => $depfun, depargs => $depargs, conn=> $hyphash{$hyp}->{vcenter}->{conn}, connspec=>$connspec,hostview=>$hview,hypname=>$hyp,vcenter=>$vcenter };
2009-06-22 16:00:28 +00:00
#ADDHOST
}
}
}
#If still in function, haven't found any likely host entries, make a new one
2010-06-14 17:40:06 +00:00
unless ( $ hyphash { $ hyp } - > { offline } ) {
eval {
$ hyphash { $ hyp } - > { conn } = Vim - > new ( service_url = > "https://$hyp/sdk" ) ; #Direct connect to install/check licenses
$ hyphash { $ hyp } - > { conn } - > login ( user_name = > $ hyphash { $ hyp } - > { username } , password = > $ hyphash { $ hyp } - > { password } ) ;
} ;
if ( $@ ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Failed to communicate with $hyp" ] , $ output_handler ) ;
2010-06-14 17:40:06 +00:00
$ hyphash { $ hyp } - > { conn } = undef ;
return "failed" ;
}
validate_licenses ( $ hyp ) ;
2010-01-14 20:32:51 +00:00
}
2009-06-22 16:00:28 +00:00
addhosttovcenter ( undef , {
depfun = > $ depfun ,
depargs = > $ depargs ,
conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } ,
connspec = > $ connspec ,
hypname = > $ hyp ,
vcenter = > $ vcenter ,
} ) ;
}
sub addhosttovcenter {
my $ task = shift ;
my $ args = shift ;
my $ hyp = $ args - > { hypname } ;
my $ depfun = $ args - > { depfun } ;
my $ depargs = $ args - > { depargs } ;
my $ connspec = $ args - > { connspec } ;
my $ vcenter = $ args - > { vcenter } ;
if ( $ task ) {
my $ state = $ task - > info - > state - > val ;
if ( $ state eq 'error' ) {
die ;
}
}
2010-06-14 17:40:06 +00:00
if ( $ hyphash { $ args - > { hypname } } - > { offline } ) { #let it stay offline
$ hypready { $ args - > { hypname } } = 1 ; #declare readiness
#enable_vmotion(hypname=>$args->{hypname},hostview=>$args->{hostview},conn=>$args->{conn});
2010-08-09 15:12:05 +00:00
$ vcenterhash { $ args - > { vcenter } } - > { goodhyps } - > { $ args - > { hypname } } = 1 ;
2010-06-14 17:40:06 +00:00
if ( defined $ args - > { depfun } ) { #If a function is waiting for the host connect to go valid, call it
$ args - > { depfun } - > ( $ args - > { depargs } ) ;
}
return ;
}
2009-10-01 14:35:41 +00:00
if ( $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { cluster } ) {
my $ cluster = get_clusterview ( clustname = > $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { cluster } , conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } ) ;
unless ( $ cluster ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { cluster } . " is not a known cluster to the vCenter server." ] , $ output_handler ) ;
2009-10-01 14:35:41 +00:00
$ hypready { $ hyp } = - 1 ; #Declare impossiblility to be ready
return ;
}
$ task = $ cluster - > AddHost_Task ( spec = > $ connspec , asConnected = > 1 ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & connecthost_callback ;
$ running_tasks { $ task } - > { conn } = $ hyphash { $ hyp } - > { vcenter } - > { conn } ;
$ running_tasks { $ task } - > { data } = { depfun = > $ depfun , depargs = > $ depargs , conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } , connspec = > $ connspec , cluster = > $ cluster , hypname = > $ hyp , vcenter = > $ vcenter } ;
} else {
my $ datacenter = validate_datacenter_prereqs ( $ hyp ) ;
my $ hfolder = $ datacenter - > hostFolder ; #$hyphash{$hyp}->{vcenter}->{conn}->find_entity_view(view_type=>'Datacenter',properties=>['hostFolder'])->hostFolder;
$ hfolder = $ hyphash { $ hyp } - > { vcenter } - > { conn } - > get_view ( mo_ref = > $ hfolder ) ;
$ task = $ hfolder - > AddStandaloneHost_Task ( spec = > $ connspec , addConnected = > 1 ) ;
$ running_tasks { $ task } - > { task } = $ task ;
$ running_tasks { $ task } - > { callback } = \ & connecthost_callback ;
$ running_tasks { $ task } - > { conn } = $ hyphash { $ hyp } - > { vcenter } - > { conn } ;
$ running_tasks { $ task } - > { data } = { depfun = > $ depfun , depargs = > $ depargs , conn = > $ hyphash { $ hyp } - > { vcenter } - > { conn } , connspec = > $ connspec , foldview = > $ hfolder , hypname = > $ hyp , vcenter = > $ vcenter } ;
}
2009-06-22 16:00:28 +00:00
#print Dumper @{$hyphash{$hyp}->{vcenter}->{conn}->find_entity_views(view_type=>'HostSystem',properties=>['runtime.connectionState'])};
}
2009-07-13 18:04:39 +00:00
sub validate_datacenter_prereqs {
my ( $ hyp ) = @ _ ;
my $ datacenter = $ hyphash { $ hyp } - > { vcenter } - > { conn } - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'hostFolder' ] ) ;
if ( ! defined $ datacenter ) {
my $ vconn = $ hyphash { $ hyp } - > { vcenter } - > { conn } ;
my $ root_folder = $ vconn - > get_view ( mo_ref = > $ vconn - > get_service_content ( ) - > rootFolder ) ;
$ root_folder - > CreateDatacenter ( name = > 'xcat-datacenter' ) ;
$ datacenter = $ hyphash { $ hyp } - > { vcenter } - > { conn } - > find_entity_view ( view_type = > 'Datacenter' , properties = > [ 'hostFolder' ] ) ;
}
return $ datacenter ;
}
2009-07-17 20:09:05 +00:00
sub get_default_switch_for_hypervisor {
#This will make sure the default, implicit switch is in order in accordance
#with the configuration. If nothing specified, it just spits out vSwitch0
#if something specified, make sure it exists
#if it doesn't exist, and the syntax explains how to build it, build it
#return undef if something is specified, doesn't exist, and lacks instruction
my $ hyp = shift ;
my $ defswitch = 'vSwitch0' ;
my $ switchmembers ;
if ( $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { defaultnet } ) {
$ defswitch = $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { defaultnet } ;
( $ defswitch , $ switchmembers ) = split /=/ , $ defswitch , 2 ;
my $ vswitch ;
my $ hostview = $ hyphash { $ hyp } - > { hostview } ;
foreach $ vswitch ( @ { $ hostview - > config - > network - > vswitch } ) {
if ( $ vswitch - > name eq $ defswitch ) {
return $ defswitch ;
}
}
#If still here, means we need to build the switch
unless ( $ switchmembers ) { return undef ; } #No hope, no idea how to make it
return create_vswitch ( $ hyp , $ defswitch , split ( /&/ , $ switchmembers ) ) ;
} else {
return 'vSwitch0' ;
}
}
2009-07-16 20:43:36 +00:00
sub get_switchname_for_portdesc {
#Thisk function will examine all current switches to find or create a switch to match the described requirement
2009-07-14 20:43:59 +00:00
my $ hyp = shift ;
2009-07-16 20:43:36 +00:00
my $ portdesc = shift ;
my $ description ; #actual name to use for the virtual switch
if ( $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { netmap } ) {
foreach ( split /,/ , $ tablecfg { hypervisor } - > { $ hyp } - > [ 0 ] - > { netmap } ) {
if ( /^$portdesc=/ ) {
( $ description , $ portdesc ) = split /=/ , $ _ , 2 ;
last ;
}
}
} else {
$ description = 'vsw' . $ portdesc ;
}
unless ( $ description ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Invalid format for hypervisor.netmap detected for $hyp" ] , $ output_handler ) ;
2009-07-16 20:43:36 +00:00
return undef ;
}
my % requiredports ;
my % portkeys ;
foreach ( split /&/ , $ portdesc ) {
$ requiredports { $ _ } = 1 ;
}
2009-07-14 20:43:59 +00:00
my $ hostview = $ hyphash { $ hyp } - > { hostview } ;
unless ( $ hostview ) {
2009-07-14 20:50:13 +00:00
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager']);
2009-07-14 20:43:59 +00:00
$ hostview = $ hyphash { $ hyp } - > { hostview } ;
}
2009-07-16 20:43:36 +00:00
foreach ( @ { $ hostview - > config - > network - > pnic } ) {
if ( $ requiredports { $ _ - > device } ) { #We establish lookups both ways
$ portkeys { $ _ - > key } = $ _ - > device ;
delete $ requiredports { $ _ - > device } ;
}
}
if ( keys % requiredports ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ":Unable to locate the following nics on $hyp: " . join ( ',' , keys % requiredports ) ] , $ output_handler ) ;
2009-07-16 20:43:36 +00:00
return undef ;
}
my $ foundmatchswitch ;
my $ cfgmismatch = 0 ;
my $ vswitch ;
foreach $ vswitch ( @ { $ hostview - > config - > network - > vswitch } ) {
$ cfgmismatch = 0 ; #new switch, no sign of mismatch
foreach ( @ { $ vswitch - > pnic } ) {
if ( $ portkeys { $ _ } ) {
$ foundmatchswitch = $ vswitch - > name ;
delete $ requiredports { $ portkeys { $ _ } } ;
delete $ portkeys { $ _ } ;
} else {
$ cfgmismatch = 1 ; #If this turns out to have anything, it is bad
}
}
if ( $ foundmatchswitch ) { last ; }
}
if ( $ foundmatchswitch ) {
if ( $ cfgmismatch ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Aggregation mismatch detected, request nic is aggregated with a nic not requested" ] , $ output_handler ) ;
2009-07-16 20:43:36 +00:00
return undef ;
}
unless ( keys % portkeys ) {
return $ foundmatchswitch ;
}
die "TODO: add physical nics to aggregation if requested" ;
} else {
return create_vswitch ( $ hyp , $ description , values % portkeys ) ;
}
die "impossible occurance" ;
return undef ;
}
sub create_vswitch {
my $ hyp = shift ;
my $ description = shift ;
my @ ports = @ _ ;
my $ vswitch = HostVirtualSwitchBondBridge - > new (
nicDevice = > \ @ ports
) ;
my $ vswspec = HostVirtualSwitchSpec - > new (
bridge = > $ vswitch ,
2009-07-17 14:18:25 +00:00
mtu = > 1500 ,
2009-07-16 20:43:36 +00:00
numPorts = > 64
) ;
my $ hostview = $ hyphash { $ hyp } - > { hostview } ;
my $ netman = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hostview - > configManager - > networkSystem ) ;
$ netman - > AddVirtualSwitch (
vswitchName = > $ description ,
spec = > $ vswspec
) ;
2009-07-17 14:18:25 +00:00
return $ description ;
2009-07-14 20:43:59 +00:00
}
2009-06-22 16:00:28 +00:00
sub validate_network_prereqs {
my $ nodes = shift ;
my $ hyp = shift ;
my $ hypconn = $ hyphash { $ hyp } - > { conn } ;
my $ hostview = $ hyphash { $ hyp } - > { hostview } ;
if ( $ hostview ) {
$ hostview - > update_view_data ( ) ; #pull in changes induced by previous activity
} else {
2009-07-14 20:50:13 +00:00
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hyphash { $ hyp } - > { conn } ) ; #,properties=>['config','configManager','network']);
2009-06-22 16:00:28 +00:00
$ hostview = $ hyphash { $ hyp } - > { hostview } ;
}
my $ node ;
my $ method ;
my $ location ;
if ( defined $ hostview - > { network } ) {
foreach ( @ { $ hostview - > network } ) {
my $ nvw = $ hypconn - > get_view ( mo_ref = > $ _ ) ;
if ( defined $ nvw - > name ) {
$ hyphash { $ hyp } - > { nets } - > { $ nvw - > name } = $ _ ;
}
}
}
foreach $ node ( @$ nodes ) {
my @ networks = split /,/ , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { nics } ;
foreach ( @ networks ) {
2009-07-17 20:09:05 +00:00
my $ switchname = get_default_switch_for_hypervisor ( $ hyp ) ;
2009-07-17 14:18:25 +00:00
my $ tabval = $ _ ;
2009-07-17 20:09:05 +00:00
my $ pgname ;
s/=.*// ; #TODO specify nic model with <blah>=model
if ( /:/ ) { #The config specifies a particular path in some way
2009-07-17 14:18:25 +00:00
s/(.*):// ;
2009-07-16 20:43:36 +00:00
$ switchname = get_switchname_for_portdesc ( $ hyp , $ 1 ) ;
2009-07-17 20:09:05 +00:00
$ pgname = $ switchname . "-" . $ _ ;
} else { #Use the default vswitch per table config to connect this through, use the same name we did before to maintain compatibility
$ pgname = $ _ ;
2009-07-14 20:43:59 +00:00
}
2009-06-22 16:00:28 +00:00
my $ netname = $ _ ;
my $ netsys ;
2009-07-17 14:18:25 +00:00
$ hyphash { $ hyp } - > { pgnames } - > { $ tabval } = $ pgname ;
2009-06-22 16:00:28 +00:00
my $ policy = HostNetworkPolicy - > new ( ) ;
2009-07-17 14:18:25 +00:00
unless ( $ hyphash { $ hyp } - > { nets } - > { $ pgname } ) {
2009-06-22 16:00:28 +00:00
my $ vlanid ;
if ( $ netname =~ /trunk/ ) {
$ vlanid = 4095 ;
} elsif ( $ netname =~ /vl(an)?(\d+)$/ ) {
$ vlanid = $ 2 ;
} else {
$ vlanid = 0 ;
}
my $ hostgroupdef = HostPortGroupSpec - > new (
2009-07-17 14:18:25 +00:00
name = > $ pgname ,
2009-06-22 16:00:28 +00:00
vlanId = > $ vlanid ,
policy = > $ policy ,
vswitchName = > $ switchname
) ;
unless ( $ netsys ) {
$ netsys = $ hyphash { $ hyp } - > { conn } - > get_view ( mo_ref = > $ hostview - > configManager - > networkSystem ) ;
}
$ netsys - > AddPortGroup ( portgrp = > $ hostgroupdef ) ;
#$hyphash{$hyp}->{nets}->{$netname}=1;
2010-08-05 14:49:21 +00:00
while ( ( not defined $ hyphash { $ hyp } - > { nets } - > { $ pgname } ) and sleep 1 ) { #we will only sleep if we know something will be waiting for
$ hostview - > update_view_data ( ) ; #pull in changes induced by previous activity
if ( defined $ hostview - > { network } ) { #We load the new object references
foreach ( @ { $ hostview - > network } ) {
my $ nvw = $ hypconn - > get_view ( mo_ref = > $ _ ) ;
if ( defined $ nvw - > name ) {
$ hyphash { $ hyp } - > { nets } - > { $ nvw - > name } = $ _ ;
}
2009-06-22 16:00:28 +00:00
}
}
2010-08-05 14:49:21 +00:00
} #end while loop
2009-06-22 16:00:28 +00:00
}
}
}
return 1 ;
}
sub validate_datastore_prereqs {
my $ nodes = shift ;
my $ hyp = shift ;
2010-06-08 18:14:04 +00:00
my $ newdatastores = shift ; # a hash reference of URLs to afflicted nodes outside of table space
2009-06-22 16:00:28 +00:00
my $ hypconn = $ hyphash { $ hyp } - > { conn } ;
my $ hostview = $ hyphash { $ hyp } - > { hostview } ;
unless ( $ hostview ) {
2009-07-17 14:18:25 +00:00
$ hyphash { $ hyp } - > { hostview } = get_hostview ( hypname = > $ hyp , conn = > $ hypconn ) ; #,properties=>['config','configManager']);
2009-06-22 16:00:28 +00:00
$ hostview = $ hyphash { $ hyp } - > { hostview } ;
}
my $ node ;
my $ method ;
my $ location ;
2010-07-09 06:24:18 +00:00
# get all of the datastores that are currently available on this node.
# and put them into a hash
2009-06-22 16:00:28 +00:00
if ( defined $ hostview - > { datastore } ) { # only iterate if it exists
foreach ( @ { $ hostview - > datastore } ) {
my $ dsv = $ hypconn - > get_view ( mo_ref = > $ _ ) ;
if ( defined $ dsv - > info - > { nas } ) {
if ( $ dsv - > info - > nas - > type eq 'NFS' ) {
2010-01-26 15:44:00 +00:00
my $ mnthost = inet_aton ( $ dsv - > info - > nas - > remoteHost ) ;
if ( $ mnthost ) {
$ mnthost = inet_ntoa ( $ mnthost ) ;
} else {
$ mnthost = $ dsv - > info - > nas - > remoteHost ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to resolve VMware specified host '" . $ dsv - > info - > nas - > remoteHost . "' to an address, problems may occur" ] , $ output_handler ) ;
2010-01-26 15:44:00 +00:00
}
$ hyphash { $ hyp } - > { datastoremap } - > { "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath } = $ dsv - > info - > name ;
2010-06-08 18:14:04 +00:00
$ hyphash { $ hyp } - > { datastorerefmap } - > { "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath } = $ _ ;
2009-06-22 16:00:28 +00:00
} #TODO: care about SMB
2010-07-09 06:24:18 +00:00
} elsif ( defined $ dsv - > info - > { vmfs } ) {
my $ name = $ dsv - > info - > vmfs - > name ;
$ hyphash { $ hyp } - > { datastoremap } - > { "vmfs://" . $ name } = $ dsv - > info - > name ;
$ hyphash { $ hyp } - > { datasotrerefmap } - > { "vmfs://" . $ name } = $ _ ;
}
2009-06-22 16:00:28 +00:00
}
}
2010-01-07 18:20:29 +00:00
my $ refresh_names = 0 ;
2010-07-09 06:24:18 +00:00
# now go through the nodes and make sure that we have matching datastores.
# E.g.: if its NFS, then mount it (if not mounted)
# E.g.: if its VMFS, then create it if not created already. Note: VMFS will persist on
# machine reboots, unless its destroyed by being overwritten.
2009-06-22 16:00:28 +00:00
foreach $ node ( @$ nodes ) {
my @ storage = split /,/ , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { storage } ;
2009-07-16 20:43:36 +00:00
if ( $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cfgstore } ) {
push @ storage , $ tablecfg { vm } - > { $ node } - > [ 0 ] - > { cfgstore } ;
}
2010-06-08 18:14:04 +00:00
foreach ( @ storage ) { #TODO: merge this with foreach loop below. Here we could build onto $newdatastores instead, for faster operation at scale
2010-03-19 17:19:43 +00:00
s/=.*// ; #remove device type information from configuration
2009-06-22 16:00:28 +00:00
s/\/$// ; #Strip trailing slash if specified, to align to VMware semantics
if ( /:\/\// ) {
( $ method , $ location ) = split /:\/\// , $ _ , 2 ;
2010-07-09 06:24:18 +00:00
if ( $ method =~ /nfs/ ) {
# go through and see if NFS is mounted, if not, then mount it.
( my $ server , my $ path ) = split /\// , $ location , 2 ;
$ server =~ s/:$// ; #remove a : if someone put it in out of nfs mount habit
my $ servern = inet_aton ( $ server ) ;
unless ( $ servern ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Unable to resolve '$server' to an address, check vm.cfgstore/vm.storage" ] , $ output_handler ) ;
2010-07-09 06:24:18 +00:00
return 0 ;
}
$ server = inet_ntoa ( $ servern ) ;
my $ uri = "nfs://$server/$path" ;
unless ( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } ) { #If not already there, must mount it
$ refresh_names = 1 ;
( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } , $ hyphash { $ hyp } - > { datastorerefmap } - > { $ uri } ) = mount_nfs_datastore ( $ hostview , $ location ) ;
}
} elsif ( $ method =~ /vmfs/ ) {
( my $ name , undef ) = split /\// , $ location , 2 ;
$ name =~ s/:$// ; #remove a : if someone put it in for some reason.
my $ uri = "vmfs://$name" ;
# check and see if this vmfs is on the node.
unless ( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } ) { #If not already there, try creating it.
( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } , $ hyphash { $ hyp } - > { datastorerefmap } - > { $ uri } ) = create_vmfs_datastore ( $ hostview , $ name ) ;
}
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $method is unsupported at this time (nfs would be)" ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return 0 ;
}
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $_ not supported storage specification for ESX plugin,\n\t'nfs://<server>/<path>'\n\t\tor\n\t'vmfs://<vmfs>'\n only currently supported vm.storage supported for ESX at the moment" ] , $ output_handler , $ node ) ;
2009-06-22 16:00:28 +00:00
return 0 ;
} #TODO: raw device mapping, VMFS via iSCSI, VMFS via FC?
}
}
2010-07-09 06:24:18 +00:00
# newdatastores are for migrations or changing vms.
# TODO: make this work for VMFS. Right now only NFS.
2010-06-08 18:14:04 +00:00
if ( ref $ newdatastores ) {
foreach ( keys %$ newdatastores ) {
2010-08-31 20:53:55 +00:00
my $ origurl = $ _ ;
2010-06-08 18:14:04 +00:00
s/\/$// ; #Strip trailing slash if specified, to align to VMware semantics
if ( /:\/\// ) {
( $ method , $ location ) = split /:\/\// , $ _ , 2 ;
( my $ server , my $ path ) = split /\// , $ location , 2 ;
$ server =~ s/:$// ; #remove a : if someone put it in out of nfs mount habit
my $ servern = inet_aton ( $ server ) ;
unless ( $ servern ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": Unable to resolve '$server' to an address, check vm.cfgstore/vm.storage" ] , $ output_handler ) ;
2010-06-08 18:14:04 +00:00
return 0 ;
}
$ server = inet_ntoa ( $ servern ) ;
my $ uri = "nfs://$server/$path" ;
unless ( $ method =~ /nfs/ ) {
foreach ( @ { $ newdatastores - > { $ _ } } ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $method is unsupported at this time (nfs would be)" ] , $ output_handler , $ _ ) ;
2010-06-08 18:14:04 +00:00
}
return 0 ;
}
unless ( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } ) { #If not already there, must mount it
$ refresh_names = 1 ;
( $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } , $ hyphash { $ hyp } - > { datastorerefmap } - > { $ uri } ) = mount_nfs_datastore ( $ hostview , $ location ) ;
}
2010-08-31 20:53:55 +00:00
$ hyphash { $ hyp } - > { datastoremap } - > { $ origurl } = $ hyphash { $ hyp } - > { datastoremap } - > { $ uri } ;
$ hyphash { $ hyp } - > { datastorerefmap } - > { $ origurl } = $ hyphash { $ hyp } - > { datastorerefmap } - > { $ uri } ;
2010-06-08 18:14:04 +00:00
} else {
2010-08-31 20:53:55 +00:00
my $ datastore = $ _ ;
2010-06-08 18:14:04 +00:00
foreach ( @ { $ newdatastores - > { $ _ } } ) {
2010-08-31 20:53:55 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , ": $datastore not supported storage specification for ESX plugin, 'nfs://<server>/<path>' only currently supported vm.storage supported for ESX at the moment" ] , $ output_handler , $ _ ) ;
2010-06-08 18:14:04 +00:00
}
return 0 ;
} #TODO: raw device mapping, VMFS via iSCSI, VMFS via FC?
}
}
2010-01-07 18:20:29 +00:00
if ( $ refresh_names ) { #if we are in a vcenter context, vmware can rename a datastore behind our backs immediately after adding
$ hostview - > update_view_data ( ) ;
if ( defined $ hostview - > { datastore } ) { # only iterate if it exists
foreach ( @ { $ hostview - > datastore } ) {
my $ dsv = $ hypconn - > get_view ( mo_ref = > $ _ ) ;
if ( defined $ dsv - > info - > { nas } ) {
if ( $ dsv - > info - > nas - > type eq 'NFS' ) {
2010-01-26 15:44:00 +00:00
my $ mnthost = inet_aton ( $ dsv - > info - > nas - > remoteHost ) ;
if ( $ mnthost ) {
$ mnthost = inet_ntoa ( $ mnthost ) ;
} else {
$ mnthost = $ dsv - > info - > nas - > remoteHost ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to resolve VMware specified host '" . $ dsv - > info - > nas - > remoteHost . "' to an address, problems may occur" ] , $ output_handler ) ;
2010-01-26 15:44:00 +00:00
}
$ hyphash { $ hyp } - > { datastoremap } - > { "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath } = $ dsv - > info - > name ;
2010-06-08 18:14:04 +00:00
$ hyphash { $ hyp } - > { datastorerefmap } - > { "nfs://" . $ mnthost . $ dsv - > info - > nas - > remotePath } = $ _ ;
2010-01-07 18:20:29 +00:00
} #TODO: care about SMB
} #TODO: care about VMFS
}
}
}
2009-06-22 16:00:28 +00:00
return 1 ;
}
2009-07-13 18:04:39 +00:00
sub getlabel_for_datastore {
my $ method = shift ;
2009-06-22 16:00:28 +00:00
my $ location = shift ;
2009-07-13 18:04:39 +00:00
2009-06-22 16:00:28 +00:00
$ location =~ s/\//_/g ;
2009-07-13 18:04:39 +00:00
$ location = $ method . '_' . $ location ;
2009-07-10 18:55:58 +00:00
#VMware has a 42 character limit, we will start mangling to get under 42.
#Will try to preserve as much informative detail as possible, hence several conditionals instead of taking the easy way out
if ( length ( $ location ) > 42 ) {
$ location =~ s/nfs_// ; #Ditch unique names for different protocols to the same path, seems unbelievably unlikely
}
if ( length ( $ location ) > 42 ) {
$ location =~ s/\.//g ; #Next, ditch host delimiter, it is unlikely that hosts will have unique names if their dots are removed
}
if ( length ( $ location ) > 42 ) {
$ location =~ s/_//g ; #Next, ditch path delimiter, it is unlikely that two paths will happen to look the same without delimiters
}
if ( length ( $ location ) > 42 ) { #finally, replace the middle with ellipsis
substr ( $ location , 20 , - 20 , '..' ) ;
}
2009-07-13 18:04:39 +00:00
return $ location ;
}
sub mount_nfs_datastore {
my $ hostview = shift ;
my $ location = shift ;
my $ server ;
my $ path ;
( $ server , $ path ) = split /\// , $ location , 2 ;
$ location = getlabel_for_datastore ( 'nfs' , $ location ) ;
2009-07-10 18:55:58 +00:00
2009-06-22 16:00:28 +00:00
my $ nds = HostNasVolumeSpec - > new ( accessMode = > 'readWrite' ,
remoteHost = > $ server ,
localPath = > $ location ,
remotePath = > "/" . $ path ) ;
my $ dsmv = $ hostview - > { vim } - > get_view ( mo_ref = > $ hostview - > configManager - > datastoreSystem ) ;
2009-11-23 23:11:16 +00:00
2010-06-08 18:14:04 +00:00
my $ dsref ;
2009-11-23 23:11:16 +00:00
eval {
2010-06-08 18:14:04 +00:00
$ dsref = $ dsmv - > CreateNasDatastore ( spec = > $ nds ) ;
2009-11-23 23:11:16 +00:00
} ;
if ( $@ ) {
die "$@" unless $@ =~ m/Fault detail: DuplicateNameFault/ ;
die "esx plugin: a datastore was discovered with the same name referring to a different nominatum- cannot continue\n$@"
unless & match_nfs_datastore ( $ server , "/$path" , $ hostview - > { vim } ) ;
}
2010-06-08 18:14:04 +00:00
return ( $ location , $ dsref ) ;
2009-06-22 16:00:28 +00:00
}
2010-07-09 06:24:18 +00:00
# create a VMFS data store on a node so that VMs can live locally instead of NFS
sub create_vmfs_datastore {
my $ hostview = shift ; # VM object
my $ name = shift ; # name of storage we wish to create.
# call some VMware API here to create
my $ hdss = $ hostview - > { vim } - > get_view ( mo_ref = > $ hostview - > configManager - > datastoreSystem ) ;
my $ diskList = $ hdss - > QueryAvailableDisksForVmfs ( ) ;
my $ count = scalar ( @$ diskList ) ; # get the number of disks available for formatting.
unless ( $ count > 0 ) {
die "No disks are available to create VMFS volume for $name" ;
}
foreach my $ disk ( @$ diskList ) {
my $ options = $ hdss - > QueryVmfsDatastoreCreateOptions ( devicePath = > $ disk - > devicePath ) ;
@$ options [ 0 ] - > spec - > vmfs - > volumeName ( $ name ) ;
my $ newDatastore = $ hdss - > CreateVmfsDatastore ( spec = > @$ options [ 0 ] - > spec ) ;
#return $newDatastore;
# create it on the first disk we see.
return ( $ name , $ newDatastore ) ;
}
return 0 ;
}
2009-06-22 16:00:28 +00:00
sub build_more_info {
die ( "TODO: fix this function if called" ) ;
print "Does this acually get called????**********************************\n" ;
my $ noderange = shift ;
my $ callback = shift ;
my $ vmtab = xCAT::Table - > new ( "vm" ) ;
my @ moreinfo = ( ) ;
unless ( $ vmtab ) {
$ callback - > ( { data = > [ "Cannot open mp table" ] } ) ;
return @ moreinfo ;
}
my % mpa_hash = ( ) ;
foreach my $ node ( @$ noderange ) {
my $ ent = $ vmtab - > getNodeAttribs ( $ node , [ 'mpa' , 'id' ] ) ;
if ( defined ( $ ent - > { mpa } ) ) { push @ { $ mpa_hash { $ ent - > { mpa } } { nodes } } , $ node ; }
else {
$ callback - > ( { data = > [ "no mpa defined for node $node" ] } ) ;
return @ moreinfo ; ;
}
if ( defined ( $ ent - > { id } ) ) { push @ { $ mpa_hash { $ ent - > { mpa } } { ids } } , $ ent - > { id } ; }
else { push @ { $ mpa_hash { $ ent - > { mpa } } { ids } } , "" ; }
}
foreach ( keys % mpa_hash ) {
push @ moreinfo , "\[$_\]\[" . join ( ',' , @ { $ mpa_hash { $ _ } { nodes } } ) . "\]\[" . join ( ',' , @ { $ mpa_hash { $ _ } { ids } } ) . "\]" ;
}
return \ @ moreinfo ;
}
sub copycd {
my $ request = shift ;
my $ doreq = shift ;
my $ distname = "" ;
my $ path ;
my $ arch ;
my $ darch ;
my $ installroot ;
$ installroot = "/install" ;
my $ sitetab = xCAT::Table - > new ( 'site' ) ;
if ( $ sitetab ) {
( my $ ref ) = $ sitetab - > getAttribs ( { key = > 'installdir' } , 'value' ) ;
if ( $ ref and $ ref - > { value } ) {
$ installroot = $ ref - > { value } ;
}
}
@ ARGV = @ { $ request - > { arg } } ;
GetOptions (
'n=s' = > \ $ distname ,
'a=s' = > \ $ arch ,
'p=s' = > \ $ path
) ;
# run a few tests to see if the copycds should use this plugin
unless ( $ path ) {
# can't use us cause we need a path and you didn't provide one!
return ;
}
if ( $ distname and $ distname !~ /^esx/ ) {
# we're for esx, so if you didn't specify that its not us!
return ;
}
my $ found = 0 ;
if ( - r $ path . "/README" and - r $ path . "/build_number" and - d $ path . "/VMware" and - r $ path . "/packages.xml" ) { #We have a probable new style ESX media
open ( LINE , $ path . "/packages.xml" ) ;
my $ product ;
my $ version ;
while ( <LINE> ) {
if ( /roductLineId>([^<]*)<\/Prod/ ) {
$ product = $ 1 ;
}
if ( /ersion>([^<]*)<\/version/ ) {
$ version = $ 1 ;
$ version =~ s/\.0$// ;
}
if ( /arch>([^>]*)<\/arch/ ) {
unless ( $ darch and $ darch =~ /x86_64/ ) { #prefer to be characterized as x86_64
$ darch = $ 1 ;
$ arch = $ 1 ;
}
}
}
close ( LINE ) ;
if ( $ product and $ version ) {
$ distname = $ product . $ version ;
$ found = 1 ;
}
} elsif ( - r $ path . "/README" and - r $ path . "/open_source_licenses.txt" and - d $ path . "/VMware" ) { #Candidate to be ESX 3.5
open ( LINE , $ path . "/README" ) ;
while ( <LINE> ) {
if ( /VMware ESX Server 3.5\s*$/ ) {
$ darch = 'x86' ;
$ arch = 'x86' ;
$ distname = 'esx3.5' ;
$ found = 1 ;
last ;
}
}
close ( LINE ) ;
} elsif ( - r $ path . "/README.txt" and - r $ path . "/vmkernel.gz" ) {
# its an esxi dvd!
# if we got here its probably ESX they want to copy
my $ line ;
my $ darch ;
open ( LINE , $ path . "/README.txt" ) or die "couldn't open!" ;
while ( $ line = <LINE> ) {
chomp ( $ line ) ;
2010-06-15 15:10:45 +00:00
if ( $ line =~ /VMware ESXi(?: version)? 4\.(\d+)/ ) {
2009-09-28 21:26:01 +00:00
$ darch = "x86_64" ;
2009-06-22 16:00:28 +00:00
$ distname = "esxi4" ;
2010-05-10 13:54:48 +00:00
if ( $ 1 ) {
$ distname . = '.' . $ 1 ;
}
2009-06-22 16:00:28 +00:00
$ found = 1 ;
if ( $ arch and $ arch ne $ darch ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Requested distribution architecture $arch, but media is $darch" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
$ arch = $ darch ;
last ; # we found our distro! end this loop madness.
}
}
close ( LINE ) ;
unless ( $ found ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "I don't recognize this VMware ESX DVD" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ; # doesn't seem to be a valid DVD or CD
}
} elsif ( - r $ path . "/vmkernel.gz" and - r $ path . "/isolinux.cfg" ) {
open ( LINE , $ path . "/isolinux.cfg" ) ;
while ( <LINE> ) {
if ( /ThinESX Installer/ ) {
$ darch = 'x86' ;
$ arch = 'x86' ;
$ distname = 'esxi3.5' ;
$ found = 1 ;
last ;
}
}
close ( LINE ) ;
}
unless ( $ found ) { return ; } #not our media
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "Copying media to $installroot/$distname/$arch/" , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
my $ omask = umask 0022 ;
mkpath ( "$installroot/$distname/$arch" ) ;
umask $ omask ;
my $ rc ;
my $ reaped = 0 ;
$ SIG { INT } = $ SIG { TERM } = sub {
foreach ( @ cpiopid ) {
kill 2 , $ _ ;
}
if ( $ ::CDMOUNTPATH ) {
chdir ( "/" ) ;
system ( "umount $::CDMOUNTPATH" ) ;
}
} ;
my $ KID ;
chdir $ path ;
my $ numFiles = `find . -print | wc -l` ;
my $ child = open ( $ KID , "|-" ) ;
unless ( defined $ child )
{
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Media copy operation fork failure" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
return ;
}
if ( $ child )
{
push @ cpiopid , $ child ;
my @ finddata = `find .` ;
for ( @ finddata )
{
print $ KID $ _ ;
}
close ( $ KID ) ;
$ rc = $? ;
}
else
{
nice 10 ;
my $ c = "nice -n 20 cpio -vdump $installroot/$distname/$arch" ;
my $ k2 = open ( PIPE , "$c 2>&1 |" ) ||
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Media copy operation fork failure" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
push @ cpiopid , $ k2 ;
my $ copied = 0 ;
my ( $ percent , $ fout ) ;
while ( <PIPE> ) {
next if /^cpio:/ ;
$ percent = $ copied / $ numFiles ;
$ fout = sprintf "%0.2f%%" , $ percent * 100 ;
$ output_handler - > ( { sinfo = > "$fout" } ) ;
+ + $ copied ;
}
exit ;
}
# let everyone read it
#chdir "/tmp";
chmod 0755 , "$installroot/$distname/$arch" ;
if ( $ rc != 0 ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Media copy operation failed, status $rc" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "Media copy operation successful" , $ output_handler ) ;
2009-09-19 17:03:14 +00:00
my @ ret = xCAT::SvrUtils - > update_tables_with_templates ( $ distname , $ arch ) ;
if ( $ ret [ 0 ] != 0 ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "Error when updating the osimage tables: " . $ ret [ 1 ] , $ output_handler ) ;
2009-09-19 17:03:14 +00:00
}
2009-06-22 16:00:28 +00:00
}
}
sub makecustomizedmod {
my $ osver = shift ;
my $ dest = shift ;
2010-08-12 22:31:31 +00:00
# if it already exists, do not overwrite it because it may be someone
# else's custom image
if ( - f "$dest/mod.tgz" ) { return 1 ; }
2009-06-22 16:00:28 +00:00
my $ passtab = xCAT::Table - > new ( 'passwd' ) ;
my $ tmp ;
my $ password ;
if ( $ passtab ) {
( $ tmp ) = $ passtab - > getAttribs ( { 'key' = > 'vmware' } , 'username' , 'password' ) ;
if ( defined ( $ tmp ) ) {
$ password = $ tmp - > { password } ;
}
}
2010-01-07 20:03:15 +00:00
unless ( $ password ) {
return 0 ;
}
mkpath ( "/tmp/xcat" ) ;
my $ tempdir = tempdir ( "/tmp/xcat/esxmodcustXXXXXXXX" ) ;
my $ shadow ;
mkpath ( $ tempdir . "/etc/" ) ;
2010-09-16 13:41:16 +00:00
my $ oldmask = umask ( 0077 ) ;
2010-01-07 20:03:15 +00:00
open ( $ shadow , ">" , $ tempdir . "/etc/shadow" ) ;
2009-06-22 16:00:28 +00:00
$ password = crypt ( $ password , '$1$' . xCAT::Utils:: genpassword ( 8 ) ) ;
my $ dayssince1970 = int ( time ( ) /86400); #Be truthful about / etc / shadow
my @ otherusers = qw/nobody nfsnobody dcui daemon vimuser/ ;
print $ shadow "root:$password:$dayssince1970:0:99999:7:::\n" ;
foreach ( @ otherusers ) {
print $ shadow "$_:*:$dayssince1970:0:99999:7:::\n" ;
}
close ( $ shadow ) ;
2010-09-16 13:41:16 +00:00
umask ( $ oldmask ) ;
2010-08-19 13:15:57 +00:00
if ( - e "$::XCATROOT/share/xcat/netboot/esxi/38.xcat-enableipv6" ) {
mkpath ( $ tempdir . "/etc/vmware/init/init.d" ) ;
copy ( "$::XCATROOT/share/xcat/netboot/esxi/38.xcat-enableipv6" , $ tempdir . "/etc/vmware/init/init.d/38.xcat-enableipv6" ) ;
}
2010-05-15 01:02:30 +00:00
if ( - e "$::XCATROOT/share/xcat/netboot/esxi/47.xcat-networking" ) {
2010-05-15 01:32:13 +00:00
copy ( "$::XCATROOT/share/xcat/netboot/esxi/47.xcat-networking" , $ tempdir . "/etc/vmware/init/init.d/47.xcat-networking" ) ;
2010-05-15 01:02:30 +00:00
}
2010-08-19 14:40:41 +00:00
if ( - e "$::XCATROOT/share/xcat/netboot/esxi/xcatsplash" ) {
copy ( "$::XCATROOT/share/xcat/netboot/esxi/xcatsplash" , $ tempdir . "/etc/vmware/welcome" ) ;
}
2010-08-20 15:12:38 +00:00
my $ dossh = 0 ;
2010-08-20 14:48:31 +00:00
if ( - r "/root/.ssh/id_rsa.pub" ) {
2010-08-20 15:12:38 +00:00
$ dossh = 1 ;
2010-08-20 14:48:31 +00:00
my $ umask = umask ( 0077 ) ; #don't remember if dropbear is picky, but just in case
2010-08-20 15:03:29 +00:00
mkpath ( $ tempdir . "/.ssh" ) ;
2010-08-20 14:48:31 +00:00
copy ( "/root/.ssh/id_rsa.pub" , $ tempdir . "/.ssh/authorized_keys" ) ;
2010-08-20 14:54:36 +00:00
umask ( $ umask ) ;
2010-08-20 14:48:31 +00:00
}
2010-08-19 14:40:41 +00:00
my $ tfile ;
mkpath ( $ tempdir . "/var/run/vmware" ) ;
open $ tfile , ">" , $ tempdir . "/var/run/vmware/show-tech-support-login" ;
close ( $ tfile ) ;
2010-05-15 01:02:30 +00:00
#TODO: auto-enable ssh and request boot-time customization rather than on-demand?
2009-06-22 16:00:28 +00:00
require Cwd ;
my $ dir = Cwd:: cwd ( ) ;
chdir ( $ tempdir ) ;
if ( - e "$dest/mod.tgz" ) {
unlink ( "$dest/mod.tgz" ) ;
}
2010-08-20 15:12:38 +00:00
if ( $ dossh ) {
system ( "tar czf $dest/mod.tgz * .ssh" ) ;
} else {
system ( "tar czf $dest/mod.tgz *" ) ;
}
2009-06-22 16:00:28 +00:00
chdir ( $ dir ) ;
rmtree ( $ tempdir ) ;
2010-01-07 20:03:15 +00:00
return 1 ;
2009-06-22 16:00:28 +00:00
}
sub mknetboot {
my $ req = shift ;
my $ doreq = shift ;
my $ tftpdir = "/tftpboot" ;
my @ nodes = @ { $ req - > { node } } ;
my $ ostab = xCAT::Table - > new ( 'nodetype' ) ;
my $ sitetab = xCAT::Table - > new ( 'site' ) ;
my $ bptab = xCAT::Table - > new ( 'bootparams' , - create = > 1 ) ;
my $ installroot = "/install" ;
if ( $ sitetab ) {
( my $ ref ) = $ sitetab - > getAttribs ( { key = > 'installdir' } , 'value' ) ;
if ( $ ref and $ ref - > { value } ) {
$ installroot = $ ref - > { value } ;
}
( $ ref ) = $ sitetab - > getAttribs ( { key = > 'tftpdir' } , 'value' ) ;
if ( $ ref and $ ref - > { value } ) {
$ tftpdir = $ ref - > { value } ;
}
}
my % donetftp = ( ) ;
2009-08-07 17:06:22 +00:00
my $ bpadds = $ bptab - > getNodesAttribs ( \ @ nodes , [ 'addkcmdline' ] ) ;
2009-08-19 14:28:17 +00:00
my % tablecolumnsneededforaddkcmdline ;
my % nodesubdata ;
foreach my $ key ( keys %$ bpadds ) { #First, we identify all needed table.columns needed to aggregate database call
my $ add = $ bpadds - > { $ key } - > [ 0 ] - > { addkcmdline } ;
2009-11-19 22:46:26 +00:00
next if ! defined $ add ;
2009-08-19 14:28:17 +00:00
while ( $ add =~ /#NODEATTRIB:([^:#]+):([^:#]+)#/ ) {
push @ { $ tablecolumnsneededforaddkcmdline { $ 1 } } , $ 2 ;
$ add =~ s/#NODEATTRIB:([^:#]+):([^:#]+)#// ;
}
}
foreach my $ table ( keys % tablecolumnsneededforaddkcmdline ) {
my $ tab = xCAT::Table - > new ( $ table , - create = > 0 ) ;
if ( $ tab ) {
$ nodesubdata { $ table } = $ tab - > getNodesAttribs ( \ @ nodes , $ tablecolumnsneededforaddkcmdline { $ table } ) ;
}
}
2009-06-22 16:00:28 +00:00
foreach my $ node ( @ nodes ) {
my $ ent = $ ostab - > getNodeAttribs ( $ node , [ 'os' , 'arch' , 'profile' ] ) ;
my $ arch = $ ent - > { 'arch' } ;
my $ profile = $ ent - > { 'profile' } ;
my $ osver = $ ent - > { 'os' } ;
2009-09-28 21:26:01 +00:00
#if($arch ne 'x86'){
2010-08-06 15:29:07 +00:00
# xCAT::SvrUtils::sendmsg([1,"VMware ESX hypervisors are x86, please change the nodetype.arch value to x86 instead of $arch for $node before proceeding:
2009-09-28 21:26:01 +00:00
#e.g: nodech $node nodetype.arch=x86\n"]);
# return;
#}
2009-06-22 16:00:28 +00:00
# first make sure copycds was done:
2009-09-28 21:26:01 +00:00
my $ custprofpath = $ profile ;
unless ( $ custprofpath =~ /^\// ) { #If profile begins with a /, assume it already is a path
$ custprofpath = $ installroot . "/custom/install/esxi/$arch/$profile" ;
}
2009-06-22 16:00:28 +00:00
unless (
2009-09-28 21:26:01 +00:00
- r "$custprofpath/vmkboot.gz"
or - r "$installroot/$osver/$arch/mboot.c32"
2009-06-22 16:00:28 +00:00
or - r "$installroot/$osver/$arch/install.tgz" ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Please run copycds first for $osver or create custom image in $custprofpath/" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
}
mkpath ( "$tftpdir/xcat/netboot/$osver/$arch/" ) ;
2010-06-15 20:45:32 +00:00
my @ reqmods = qw/vmkboot.gz vmk.gz sys.vgz cim.vgz/ ; #Required modules for an image to be considered complete
2009-09-28 21:26:01 +00:00
my % mods ;
foreach ( @ reqmods ) {
$ mods { $ _ } = 1 ;
}
2009-10-02 15:27:59 +00:00
my $ shortprofname = $ profile ;
$ shortprofname =~ s/\/\z// ;
$ shortprofname =~ s/.*\/// ;
2009-06-22 16:00:28 +00:00
unless ( $ donetftp { $ osver , $ arch } ) {
my $ srcdir = "$installroot/$osver/$arch" ;
2009-10-02 15:20:52 +00:00
my $ dest = "$tftpdir/xcat/netboot/$osver/$arch/$shortprofname" ;
2009-09-28 21:26:01 +00:00
cpNetbootImages ( $ osver , $ srcdir , $ dest , $ custprofpath , \ % mods ) ;
2010-01-07 20:03:15 +00:00
if ( makecustomizedmod ( $ osver , $ dest ) ) {
push @ reqmods , "mod.tgz" ;
$ mods { "mod.tgz" } = 1 ;
}
2010-05-15 01:34:54 +00:00
if ( - r "$::XCATROOT/share/xcat/netboot/syslinux/mboot.c32" ) { #prefer xCAT patched mboot.c32 with BOOTIF for mboot
2010-05-15 01:44:30 +00:00
copy ( "$::XCATROOT/share/xcat/netboot/syslinux/mboot.c32" , $ dest ) ;
2010-05-15 01:24:56 +00:00
} else {
copy ( "$srcdir/mboot.c32" , $ dest ) ;
}
2009-06-22 16:00:28 +00:00
$ donetftp { $ osver , $ arch , $ profile } = 1 ;
}
2009-10-02 15:20:52 +00:00
my $ tp = "xcat/netboot/$osver/$arch/$shortprofname" ;
2009-09-28 21:26:01 +00:00
my $ bail = 0 ;
foreach ( @ reqmods ) {
unless ( - r "$tftpdir/$tp/$_" ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "$_ is missing from the target destination, ensure that either copycds has been run or that $custprofpath contains this file" ] , $ output_handler ) ;
2009-09-28 21:26:01 +00:00
$ bail = 1 ; #only flag to bail, present as many messages as possible to user
}
}
if ( $ bail ) { #if the above loop detected one or more failures, bail out
return ;
}
# now make <HEX> file entry stuff
2009-06-22 16:00:28 +00:00
my $ kernel = "$tp/mboot.c32" ;
2009-08-18 18:59:20 +00:00
my $ prepend = "$tp/vmkboot.gz" ;
2009-09-28 21:26:01 +00:00
delete $ mods { "vmkboot.gz" } ;
2009-08-18 18:59:20 +00:00
my $ append = " --- $tp/vmk.gz" ;
2009-09-28 21:26:01 +00:00
delete $ mods { "vmk.gz" } ;
2009-06-22 16:00:28 +00:00
$ append . = " --- $tp/sys.vgz" ;
2009-09-28 21:26:01 +00:00
delete $ mods { "sys.vgz" } ;
2010-05-17 14:58:30 +00:00
$ append . = " --- $tp/cim.vgz" ;
delete $ mods { "cim.vgz" } ;
2010-01-07 20:03:15 +00:00
if ( $ mods { "mod.tgz" } ) {
$ append . = " --- $tp/mod.tgz" ;
delete $ mods { "mod.tgz" } ;
}
2009-09-28 21:26:01 +00:00
foreach ( keys % mods ) {
$ append . = " --- $tp/$_" ;
}
2009-08-07 17:06:22 +00:00
if ( defined $ bpadds - > { $ node } - > [ 0 ] - > { addkcmdline } ) {
2009-08-18 18:59:20 +00:00
my $ modules ;
my $ kcmdline ;
( $ kcmdline , $ modules ) = split /---/ , $ bpadds - > { $ node } - > [ 0 ] - > { addkcmdline } , 2 ;
2009-08-19 14:28:17 +00:00
$ kcmdline =~ s/#NODEATTRIB:([^:#]+):([^:#]+)#/$nodesubdata{$1}->{$node}->[0]->{$2}/eg ;
2009-08-18 18:59:20 +00:00
if ( $ modules ) {
$ append . = " --- " . $ modules ;
}
$ prepend . = " " . $ kcmdline ;
2009-08-07 17:06:22 +00:00
}
2009-08-18 18:59:20 +00:00
$ append = $ prepend . $ append ;
$ output_handler - > ( { node = > [ { name = > [ $ node ] , '_addkcmdlinehandled' = > [ 1 ] } ] } ) ;
2009-08-07 17:06:22 +00:00
2009-06-22 16:00:28 +00:00
$ bptab - > setNodeAttribs (
$ node ,
{
kernel = > $ kernel ,
initrd = > "" ,
kcmdline = > $ append
}
) ;
} # end of node loop
}
# this is where we extract the netboot images out of the copied ISO image
sub cpNetbootImages {
my $ osver = shift ;
my $ srcDir = shift ;
my $ destDir = shift ;
2009-09-28 21:26:01 +00:00
my $ overridedir = shift ;
my $ modulestoadd = shift ;
2009-06-22 16:00:28 +00:00
my $ tmpDir = "/tmp/xcat.$$" ;
if ( $ osver =~ /esxi4/ ) {
# we don't want to go through this all the time, so if its already
# there we're not going to extract:
2009-09-28 21:26:01 +00:00
unless ( - r "$destDir/vmk.gz"
2009-06-22 16:00:28 +00:00
and - r "$destDir/vmkboot.gz"
and - r "$destDir/sys.vgz"
2010-05-17 14:58:30 +00:00
and - r "$destDir/cim.vgz"
and - r "$destDir/cimstg.tgz"
2009-06-22 16:00:28 +00:00
) {
2009-09-28 21:26:01 +00:00
if ( - r "$srcDir/image.tgz" ) { #it still may work without image.tgz if profile customization has everything replaced
mkdir ( $ tmpDir ) ;
chdir ( $ tmpDir ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "extracting netboot files from OS image. This may take about a minute or two...hopefully you have ~1GB free in your /tmp dir\n" , $ output_handler ) ;
2009-09-28 21:26:01 +00:00
my $ cmd = "tar zxvf $srcDir/image.tgz" ;
print "\n$cmd\n" ;
if ( system ( "tar zxf $srcDir/image.tgz" ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to extract $srcDir/image.tgz\n" ] , $ output_handler ) ;
2009-09-28 21:26:01 +00:00
}
# this has the big image and may take a while.
# this should now create:
# /tmp/xcat.1234/usr/lib/vmware/installer/VMware-VMvisor-big-164009-x86_64.dd.bz2 or some other version. We need to extract partition 5 from it.
system ( "bunzip2 $tmpDir/usr/lib/vmware/installer/*bz2" ) ;
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( "finished extracting, now copying files...\n" , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
2009-09-28 21:26:01 +00:00
# now we need to get partition 5 which has the installation goods in it.
my $ scmd = "fdisk -lu $tmpDir/usr/lib/vmware/installer/*dd 2>&1 | grep dd5 | awk '{print \$2}'" ;
print "running: $scmd\n" ;
my $ sector = `$scmd` ;
chomp ( $ sector ) ;
my $ offset = $ sector * 512 ;
mkdir "/mnt/xcat" ;
my $ mntcmd = "mount $tmpDir/usr/lib/vmware/installer/*dd /mnt/xcat -o loop,offset=$offset" ;
print "$mntcmd\n" ;
if ( system ( $ mntcmd ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "unable to mount partition 5 of the ESX netboot image to /mnt/xcat" ] , $ output_handler ) ;
2009-09-28 21:26:01 +00:00
return ;
}
2009-11-19 22:46:26 +00:00
if ( ! - d $ destDir ) {
mkpath ( $ destDir ) ;
}
2009-09-28 21:26:01 +00:00
if ( system ( "cp /mnt/xcat/* $destDir/" ) ) {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents to $destDir" ] , $ output_handler ) ;
2009-09-28 21:26:01 +00:00
system ( "umount /mnt/xcat" ) ;
return ;
}
chdir ( "/tmp" ) ;
system ( "umount /mnt/xcat" ) ;
print "tempDir: $tmpDir\n" ;
system ( "rm -rf $tmpDir" ) ;
2010-06-15 20:45:32 +00:00
} elsif ( - r "$srcDir/cim.vgz" and - r "$srcDir/vmkernel.gz" and - r "$srcDir/vmkboot.gz" and - r "$srcDir/sys.vgz" ) {
use File::Basename ;
if ( ! - d $ destDir ) {
mkpath ( $ destDir ) ;
}
#In ESXI 4.1, the above breaks, this seems to work, much simpler too
foreach ( "$srcDir/cim.vgz" , "$srcDir/vmkernel.gz" , "$srcDir/vmkboot.gz" , "$srcDir/sys.vgz" , "$srcDir/sys.vgz" ) {
my $ mod = scalar fileparse ( $ _ ) ;
if ( $ mod =~ /vmkernel.gz/ ) {
2010-08-06 15:29:07 +00:00
copy ( $ _ , "$destDir/vmk.gz" ) or xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $_ to $destDir/$mod" ] , $ output_handler ) ;
2010-06-15 20:45:32 +00:00
} else {
2010-08-06 15:29:07 +00:00
copy ( $ _ , "$destDir/$mod" ) or xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $_ to $destDir/$mod" ] , $ output_handler ) ;
2010-06-15 20:45:32 +00:00
}
}
2009-09-28 21:26:01 +00:00
}
}
if ( - d $ overridedir ) { #Copy over all modules
use File::Basename ;
foreach ( glob "$overridedir/*" ) {
my $ mod = scalar fileparse ( $ _ ) ;
2010-06-15 20:45:32 +00:00
if ( $ mod =~ /gz\z/ and $ mod !~ /pkgdb.tgz/ and $ mod !~ /vmkernel.gz/ ) {
2009-09-28 21:26:01 +00:00
$ modulestoadd - > { $ mod } = 1 ;
2010-08-06 15:29:07 +00:00
copy ( $ _ , "$destDir/$mod" ) or xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $overridedir to $destDir" ] , $ output_handler ) ;
2010-06-15 20:45:32 +00:00
} elsif ( $ mod =~ /vmkernel.gz/ ) {
$ modulestoadd - > { "vmk.gz" } = 1 ;
2010-08-06 15:29:07 +00:00
copy ( $ _ , "$destDir/vmk.gz" ) or xCAT::SvrUtils:: sendmsg ( [ 1 , "Could not copy netboot contents from $overridedir to $destDir" ] , $ output_handler ) ;
2009-09-28 21:26:01 +00:00
}
}
}
2009-06-22 16:00:28 +00:00
} else {
2010-08-06 15:29:07 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "VMware $osver is not supported for netboot" ] , $ output_handler ) ;
2009-06-22 16:00:28 +00:00
}
}
2009-11-23 23:11:16 +00:00
# compares nfs target described by parameters to every share mounted by target hypervisor
# returns 1 if matching datastore is present and 0 otherwise
sub match_nfs_datastore {
my ( $ host , $ path , $ hypconn ) = @ _ ;
die "esx plugin bug: no host provided for match_datastore" unless defined $ host ;
die "esx plugin bug: no path provided for match_datastore" unless defined $ path ;
my @ ip ;
eval {
if ( $ host =~ m/\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\// ) {
use Socket ;
@ ip = ( $ host ) ;
$ host = gethostbyaddr ( inet_aton ( $ host , AF_INET ) , AF_INET ) ;
} else {
use Socket ;
( undef , undef , undef , undef , @ ip ) = gethostbyname ( $ host ) ;
my @ ip_ntoa = ( ) ;
foreach ( @ ip ) {
push ( @ ip_ntoa , inet_ntoa ( $ _ ) ) ;
}
@ ip = @ ip_ntoa ;
}
} ;
if ( $@ ) {
die "error while resolving datastore host: $@\n" ;
}
my % viewcrit = (
view_type = > 'HostSystem' ,
properties = > [ 'config.fileSystemVolume' ] ,
) ;
my $ dsviews = $ hypconn - > find_entity_views ( % viewcrit ) ;
foreach ( @$ dsviews ) {
foreach my $ mount ( @ { $ _ - > get_property ( 'config.fileSystemVolume.mountInfo' ) } ) {
next unless $ mount - > { 'volume' } { 'type' } eq 'NFS' ;
my $ hostMatch = 0 ;
HOSTMATCH: foreach ( @ ip , $ host ) {
next HOSTMATCH unless $ mount - > { 'volume' } { 'remoteHost' } eq $ _ ;
$ hostMatch = 1 ;
last HOSTMATCH ;
}
next unless $ hostMatch ;
next unless $ mount - > { 'volume' } { 'remotePath' } eq $ path ;
return 1 ;
}
}
return 0 ;
}
2009-06-22 16:00:28 +00:00
1 ;
2009-07-13 18:04:39 +00:00
# vi: set ts=4 sw=4 filetype=perl: