2009-02-15 22:42:22 +00:00
#!/usr/bin/env perl
# IBM(c) 2007 EPL license http://www.eclipse.org/legal/epl-v10.html
package xCAT_plugin::kvm ;
BEGIN
{
$ ::XCATROOT = $ ENV { 'XCATROOT' } ? $ ENV { 'XCATROOT' } : '/opt/xcat' ;
}
use lib "$::XCATROOT/lib/perl" ;
use xCAT::GlobalDef ;
use xCAT::NodeRange ;
2009-11-28 00:50:14 +00:00
use xCAT::VMCommon ;
2009-02-15 22:42:22 +00:00
use xCAT_monitoring::monitorctrl ;
use xCAT::Table ;
2010-08-25 21:04:20 +00:00
use XML::LibXML ; #now that we are in the business of modifying xml data, need something capable of preserving more of the XML structure
#TODO: convert all uses of XML::Simple to LibXML? Using both seems wasteful in a way..
2009-02-15 22:42:22 +00:00
use XML::Simple qw( XMLout ) ;
use Thread qw( yield ) ;
2011-05-02 19:06:50 +00:00
use xCAT::Utils qw/genpassword/ ;
2009-10-20 00:44:16 +00:00
use File::Basename qw/fileparse/ ;
use File::Path qw/mkpath/ ;
2009-02-15 22:42:22 +00:00
use IO::Socket ;
use IO::Select ;
use strict ;
#use warnings;
2009-10-20 00:44:16 +00:00
my $ use_xhrm = 0 ; #xCAT Hypervisor Resource Manager, to satisfy networking and storage prerequisites, default to not using it for the moment
2009-11-22 19:41:52 +00:00
my $ imgfmt = 'raw' ; #use raw format by default
my $ clonemethod = 'qemu-img' ; #use qemu-img command
2009-02-15 22:42:22 +00:00
my % vm_comm_pids ;
2009-10-19 22:47:48 +00:00
my % offlinehyps ;
2009-12-02 00:37:04 +00:00
my % hypstats ;
2009-10-19 22:47:48 +00:00
my % offlinevms ;
2010-08-25 21:04:20 +00:00
my $ parser ;
2009-02-15 22:42:22 +00:00
my @ destblacklist ;
2009-11-27 18:55:13 +00:00
my $ updatetable ; #when a function is performing per-node operations, it can queue up a table update by populating parts of this hash
2009-11-28 00:50:14 +00:00
my $ confdata ; #a reference to serve as a common pointer betweer VMCommon functions and this plugin
my $ libvirtsupport ;
$ libvirtsupport = eval {
require Sys::Virt ;
if ( Sys::Virt - > VERSION < "0.2.0" ) {
die ;
}
1 ;
} ;
2009-02-15 22:42:22 +00:00
use XML::Simple ;
2009-07-30 19:42:29 +00:00
$ XML:: Simple:: PREFERRED_PARSER = 'XML::Parser' ;
2009-02-15 22:42:22 +00:00
use Data::Dumper ;
use POSIX "WNOHANG" ;
use Storable qw( freeze thaw ) ;
use IO::Select ;
use IO::Handle ;
use Time::HiRes qw( gettimeofday sleep ) ;
use xCAT::DBobjUtils ;
use Getopt::Long ;
2009-03-20 21:36:49 +00:00
use xCAT::SvrUtils ;
2009-02-15 22:42:22 +00:00
my % runningstates ;
my $ vmmaxp = 64 ;
my $ mactab ;
2009-04-17 16:35:54 +00:00
my % usedmacs ;
2009-02-15 22:42:22 +00:00
my $ status_noop = "XXXno-opXXX" ;
2010-08-26 13:16:24 +00:00
my $ callback ;
2010-08-27 23:46:06 +00:00
my $ requester ; #used to track the user
2009-02-15 22:42:22 +00:00
sub handled_commands {
#unless ($libvirtsupport) {
# return {};
#}
return {
rpower = > 'nodehm:power,mgt' ,
2009-02-21 12:58:41 +00:00
mkvm = > 'nodehm:power,mgt' ,
2010-08-28 01:31:51 +00:00
clonevm = > 'nodehm:power,mgt' ,
2010-07-21 15:23:37 +00:00
chvm = > 'nodehm:power,mgt' ,
2010-08-26 14:01:54 +00:00
rmvm = > 'nodehm:power,mgt' ,
2010-08-26 18:32:44 +00:00
rinv = > 'nodehm:power,mgt' ,
2009-02-15 22:42:22 +00:00
rmigrate = > 'nodehm:mgt' ,
2009-03-13 23:58:29 +00:00
getcons = > 'nodehm:mgt' ,
2009-02-15 22:42:22 +00:00
#rvitals => 'nodehm:mgt',
#rinv => 'nodehm:mgt',
getrvidparms = > 'nodehm:mgt' ,
2010-09-01 12:42:08 +00:00
lsvm = > 'hypervisor:type' ,
2009-02-15 22:42:22 +00:00
rbeacon = > 'nodehm:mgt' ,
2010-09-01 12:42:08 +00:00
revacuate = > 'hypervisor:type' ,
vmstatenotify = > 'hypervisor:type' ,
2009-02-15 22:42:22 +00:00
#rspreset => 'nodehm:mgt',
#rspconfig => 'nodehm:mgt',
#rbootseq => 'nodehm:mgt',
#reventlog => 'nodehm:mgt',
} ;
}
my $ hypconn ;
my $ hyp ;
my $ doreq ;
my % hyphash ;
my $ node ;
my $ vmtab ;
2010-07-01 21:03:00 +00:00
2010-09-27 17:56:50 +00:00
sub get_path_for_pool {
my $ poolobj = shift ;
my $ poolxml = $ poolobj - > get_xml_description ( ) ;
my $ pooldom = $ parser - > parse_string ( $ poolxml ) ;
my @ paths = $ pooldom - > findnodes ( "/pool/target/path/text()" ) ;
if ( scalar @ paths != 1 ) {
return undef ;
}
return $ paths [ 0 ] - > data ;
}
2010-07-01 21:03:00 +00:00
sub build_pool_xml {
my $ url = shift ;
2010-11-16 22:39:10 +00:00
my $ pooldesc ;
my $ name = $ url ;
$ name =~ s!nfs://!nfs_! ;
$ name =~ s!dir://!dir_! ;
$ name =~ s/\//_/g ; #though libvirt considers / kosher, sometimes it wants to create a local xml file using name for filename...
if ( $ url =~ /^dir:/ ) { #directory style..
my $ path = $ url ;
$ path =~ s/dir:\/\///g ;
return "<pool type=\"dir\"><name>$name</name><target><path>$path</path></target></pool>" ;
}
2010-07-22 13:55:50 +00:00
my $ mounthost = shift ;
unless ( $ mounthost ) { $ mounthost = $ hyp ; }
2010-07-01 21:03:00 +00:00
my $ pool ;
my $ host = $ url ;
$ host =~ s/.*:\/\/// ;
$ host =~ s/(\/.*)// ;
my $ srcpath = $ 1 ;
2010-07-19 17:43:15 +00:00
my $ uuid = xCAT::Utils:: genUUID ( url = > $ url ) ;
2010-08-30 15:54:22 +00:00
#first, we make a pool desc that won't have slashes in them
2010-11-16 22:39:10 +00:00
$ pooldesc = '<pool type="netfs">' ;
2010-08-30 15:54:22 +00:00
$ pooldesc . = '<name>' . $ name . '</name>' ;
2010-07-19 17:43:15 +00:00
$ pooldesc . = '<uuid>' . $ uuid . '</uuid>>' ;
2010-07-01 21:03:00 +00:00
$ pooldesc . = '<source>' ;
$ pooldesc . = '<host name="' . $ host . '"/>' ;
$ pooldesc . = '<dir path="' . $ srcpath . '"/>' ;
$ pooldesc . = '</source>' ;
2010-07-21 15:23:37 +00:00
$ pooldesc . = '<target><path>/var/lib/xcat/pools/' . $ uuid . '</path></target></pool>' ;
2010-08-30 15:54:22 +00:00
#turns out we can 'define', then 'build', then 'create' on the poolobj instead of 'create', to get mkdir -p like function
#system("ssh $mounthost mkdir -p /var/lib/xcat/pools/$uuid"); #ok, so not *technically* just building XML, but here is the cheapest
2010-07-21 15:23:37 +00:00
#place to know uuid... And yes, we must be allowed to ssh in
#libvirt just isn't capable enough for this sort of usage
2010-07-01 21:03:00 +00:00
return $ pooldesc ;
}
2010-07-02 20:41:29 +00:00
sub get_storage_pool_by_url {
2010-07-01 21:03:00 +00:00
my $ url = shift ;
2010-07-22 13:55:50 +00:00
my $ virtconn = shift ;
my $ mounthost = shift ;
unless ( $ virtconn ) { $ virtconn = $ hypconn ; }
my @ currpools = $ virtconn - > list_storage_pools ( ) ;
2010-08-30 15:54:22 +00:00
push @ currpools , $ virtconn - > list_defined_storage_pools ( ) ;
2010-07-01 21:03:00 +00:00
my $ poolobj ;
my $ pool ;
foreach my $ poolo ( @ currpools ) {
$ poolobj = $ poolo ;
2010-08-30 15:54:22 +00:00
$ pool = $ parser - > parse_string ( $ poolobj - > get_xml_description ( ) ) ; #XMLin($poolobj->get_xml_description());
if ( $ url =~ /^nfs:\/\/([^\/]*)(\/.*)$/ ) { #check the essence of the pool rather than the name
my $ host = $ 1 ;
my $ path = $ 2 ;
unless ( $ pool - > findnodes ( "/pool" ) - > [ 0 ] - > getAttribute ( "type" ) eq "netfs" ) {
$ pool = undef ;
next ;
}
#ok, it is netfs, now check source..
my $ checkhost = $ pool - > findnodes ( "/pool/source/host" ) - > [ 0 ] - > getAttribute ( "name" ) ;
my $ checkpath = $ pool - > findnodes ( "/pool/source/dir" ) - > [ 0 ] - > getAttribute ( "path" ) ;
if ( $ checkhost eq $ host and $ checkpath eq $ path ) { #TODO: check name resolution to see if they match really even if not strictly the same
last ;
}
2010-11-16 22:39:10 +00:00
} elsif ( $ url =~ /^dir:\/\/(.*)\z/ ) { #a directory, simple enough
my $ path = $ 1 ;
unless ( $ path =~ /^\// ) {
$ path = '/' . $ path ;
}
my $ checkpath = $ pool - > findnodes ( "/pool/target/path/text()" ) - > [ 0 ] - > data ;
if ( $ checkpath eq $ path ) {
last ;
}
2010-08-30 15:54:22 +00:00
} elsif ( $ pool - > findnodes ( '/pool/name/text()' ) - > [ 0 ] - > data eq $ url ) { #$pool->{name} eq $url) {
2010-07-01 21:03:00 +00:00
last ;
}
$ pool = undef ;
}
2010-08-30 15:54:22 +00:00
if ( $ pool ) {
my $ inf = $ poolobj - > get_info ( ) ;
if ( $ inf - > { state } == 0 ) { #Sys::Virt::StoragePool::STATE_INACTIVE) { #if pool is currently inactive, bring it up
$ poolobj - > build ( ) ;
$ poolobj - > create ( ) ;
}
2011-04-15 14:37:57 +00:00
eval { #we *try* to do this, but various things may interfere.
#this is basically to make sure the list of contents is up to date
$ poolobj - > refresh ( ) ;
} ;
2010-08-30 15:54:22 +00:00
return $ poolobj ;
}
$ poolobj = $ virtconn - > define_storage_pool ( build_pool_xml ( $ url , $ mounthost ) ) ;
$ poolobj - > build ( ) ;
$ poolobj - > create ( ) ;
2011-04-15 14:37:57 +00:00
eval { #wrap in eval, not likely to fail here, but calling it at all may be superfluous anyway
$ poolobj - > refresh ( ) ;
} ;
2010-07-02 20:41:29 +00:00
return $ poolobj ;
}
2010-07-06 14:51:19 +00:00
sub get_multiple_paths_by_url {
my % args = @ _ ;
my $ url = $ args { url } ;
my $ node = $ args { node } ;
my $ poolobj = get_storage_pool_by_url ( $ url ) ;
unless ( $ poolobj ) { die "Cound not get storage pool for $url" ; }
2011-04-15 14:37:57 +00:00
eval { #refresh() can 'die' if cloning in progress, accept stale data then
$ poolobj - > refresh ( ) ; #if volumes change on nfs storage, libvirt is too dumb to notice
} ;
2010-07-06 14:51:19 +00:00
my @ volobjs = $ poolobj - > list_volumes ( ) ;
my % paths ;
foreach ( @ volobjs ) {
2010-07-30 14:53:45 +00:00
if ( $ _ - > get_name ( ) =~ /^$node\.([^\.]*)\.([^\.]*)$/ ) {
$ paths { $ _ - > get_path ( ) } = { device = > $ 1 , format = > $ 2 } ;
2010-07-30 14:58:49 +00:00
} elsif ( $ _ - > get_name ( ) =~ /^$node\.([^\.]*)$/ ) {
2010-07-30 14:53:45 +00:00
$ paths { $ _ - > get_path ( ) } = { device = > $ 1 , format = > 'raw' } ;
#this requires any current user of qcow2 to migrate, unfortunate to escape
#a vulnerability where raw user could write malicious qcow2 to header
#and use that to get at files on the hypervisor os with escalated privilege
2010-07-06 14:51:19 +00:00
}
}
return \ % paths ;
}
sub get_filepath_by_url { #at the end of the day, the libvirt storage api gives the following capability:
#mount, limited ls, and qemu-img
#it does not frontend mkdir, and does not abstract away any nitty-gritty detail, you must know:
#the real mountpoint, and the real full path to storage
#in addition to this, subdirectories are not allowed, and certain extra metadata must be created
#not a big fan compared to ssh and run the commands myself, but it's the most straightforward path
#to avoid ssh for users who dislike that style access
2010-07-02 20:41:29 +00:00
my % args = @ _ ;
my $ url = $ args { url } ;
my $ dev = $ args { dev } ;
my $ create = $ args { create } ;
my $ force = $ args { force } ;
my $ format = $ args { format } ;
unless ( $ format ) {
$ format = 'qcow2' ;
2010-07-01 21:03:00 +00:00
}
2011-02-18 20:01:39 +00:00
#print "url=$url, dev=$dev,create=$create, force=$force, format=$format\n";
2010-07-01 21:03:00 +00:00
#ok, now that we have the pool, we need the storage volume from the pool for the node/dev
2010-07-02 20:41:29 +00:00
my $ poolobj = get_storage_pool_by_url ( $ url ) ;
unless ( $ poolobj ) { die "Could not get storage pool for $url" ; }
2011-04-15 14:37:57 +00:00
eval { #make a refresh attempt non-fatal to fail, since cloning can block it
$ poolobj - > refresh ( ) ; #if volumes change on nfs storage, libvirt is too dumb to notice
} ;
2010-07-01 21:03:00 +00:00
my @ volobjs = $ poolobj - > list_volumes ( ) ;
2010-07-30 14:53:45 +00:00
my $ desiredname = $ node . '.' . $ dev . '.' . $ format ;
2011-02-18 20:01:39 +00:00
#print "desiredname=$desiredname, volobjs=@volobjs\n";
2010-07-01 21:03:00 +00:00
foreach ( @ volobjs ) {
if ( $ _ - > get_name ( ) eq $ desiredname ) {
2010-07-02 20:41:29 +00:00
if ( $ create ) {
if ( $ force ) { #must destroy the storage
$ _ - > delete ( ) ;
} else {
die "Path already exists" ;
}
} else {
return $ _ - > get_path ( ) ;
}
2010-07-01 21:03:00 +00:00
}
}
2010-07-02 20:41:29 +00:00
if ( $ create ) {
2010-08-26 20:46:39 +00:00
if ( $ create =~ /^clone=(.*)$/ ) {
my $ src = $ 1 ;
my $ fmt = 'raw' ;
if ( $ src =~ /\.qcow2$/ ) {
$ fmt = 'qcow2' ;
}
my $ vol = $ poolobj - > create_volume ( "<volume><name>" . $ desiredname . "</name><target><format type='$format'/></target><capacity>100</capacity><backingStore><path>$src</path><format type='$fmt'/></backingStore></volume>" ) ;
#ok, this is simply hinting, not the real deal, so to speak
# 1) sys::virt complains if capacity isn't defined. We say '100', knowing full well it will be promptly ignored down the code. This is aggravating
# and warrants recheck with the RHEL6 stack
# 2) create_volume with backingStore is how we do the clone from master (i.e. a thin clone, a la qemu-img create)
# note how backing store is full path, allowing cross-pool clones
# 3) clone_volume is the way to invoke qemu-img convert (i.e. to 'promote' and flatten a vm image to a standalone duplicate volume
# incidentally, promote to master will be relatively expensive compared to the converse operation, as expected
# will have to verify as it is investigated whether this can successfully cross pools (hope so)
# 4) qemu-img was so much more transparent and easy to figure out than this
2010-08-27 13:58:40 +00:00
# additionally, when mastering a powered down node, we should rebase the node to be a cow clone of the master it just spawned
2010-07-02 20:41:29 +00:00
} else {
my $ vol = $ poolobj - > create_volume ( "<volume><name>" . $ desiredname . "</name><target><format type='$format'/></target><capacity>" . getUnits ( $ create , "G" , 1 ) . "</capacity><allocation>0</allocation></volume>" ) ;
if ( $ vol ) { return $ vol - > get_path ( ) ; }
}
} else {
return undef ;
2009-10-20 00:44:16 +00:00
}
}
2009-10-20 23:22:04 +00:00
sub nodesockopen {
my $ node = shift ;
my $ port = shift ;
2009-10-20 23:56:40 +00:00
unless ( $ node ) { return 0 ; }
2009-10-20 23:22:04 +00:00
my $ socket ;
my $ addr = gethostbyname ( $ node ) ;
my $ sin = sockaddr_in ( $ port , $ addr ) ;
my $ proto = getprotobyname ( 'tcp' ) ;
socket ( $ socket , PF_INET , SOCK_STREAM , $ proto ) || return 0 ;
connect ( $ socket , $ sin ) || return 0 ;
return 1 ;
}
2009-10-20 00:44:16 +00:00
2009-02-15 22:42:22 +00:00
sub waitforack {
my $ sock = shift ;
my $ select = new IO:: Select ;
$ select - > add ( $ sock ) ;
my $ str ;
2009-04-03 19:12:56 +00:00
if ( $ select - > can_read ( 60 ) ) { # Continue after 10 seconds, even if not acked...
2009-02-15 22:42:22 +00:00
if ( $ str = <$sock> ) {
} else {
$ select - > remove ( $ sock ) ; #Block until parent acks data
}
}
}
2011-05-31 20:29:59 +00:00
sub reconfigvm {
2010-08-27 15:49:11 +00:00
$ node = shift ;
my $ xml = shift ;
my $ domdesc = $ parser - > parse_string ( $ xml ) ;
my @ bootdevs = $ domdesc - > findnodes ( "/domain/os/boot" ) ;
2011-09-29 17:58:47 +00:00
my $ curroffset = $ domdesc - > findnodes ( "/domain/clock" ) - > [ 0 ] - > getAttribute ( "offset" ) ;
my $ newoffset ;
2010-08-27 15:49:11 +00:00
my $ needfixin = 0 ;
2011-09-29 17:58:47 +00:00
if ( defined ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { clockoffset } ) ) {
#If user requested a specific behavior, give it
$ newoffset = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { clockoffset } ;
} else {
#Otherwise, only do local time for things that look MS
if ( defined ( $ confdata - > { nodetype } - > { $ node } - > [ 0 ] - > { os } ) and $ confdata - > { nodetype } - > { $ node } - > [ 0 ] - > { os } =~ /win.*/ ) {
$ newoffset = 'localtime' ;
} else { #For everyone else, utc is preferred generally
$ newoffset = 'utc' ;
}
}
if ( $ curroffset ne $ newoffset ) {
$ needfixin = 1 ;
$ domdesc - > findnodes ( "/domain/clock" ) - > [ 0 ] - > setAttribute ( "offset" , $ newoffset ) ;
}
my @ oldbootdevs ;
2011-05-31 20:29:59 +00:00
if ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { memory } ) {
$ needfixin = 1 ;
$ domdesc - > findnodes ( "/domain/memory/text()" ) - > [ 0 ] - > setData ( getUnits ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { memory } , "M" , 1024 ) ) ;
foreach ( $ domdesc - > findnodes ( "/domain/currentMemory/text()" ) ) {
$ _ - > setData ( getUnits ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { memory } , "M" , 1024 ) ) ;
}
}
if ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { cpus } ) {
$ needfixin = 1 ;
$ domdesc - > findnodes ( "/domain/vcpu/text()" ) - > [ 0 ] - > setData ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { cpus } ) ;
}
2010-08-27 15:49:11 +00:00
if ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { bootorder } ) {
2010-08-27 17:27:54 +00:00
my @ expectedorder = split ( /[:,]/ , $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { bootorder } ) ;
2010-08-27 15:49:11 +00:00
foreach ( @ expectedorder ) { #this loop will check for changes and fix 'n' and 'net'
my $ currdev = shift @ bootdevs ;
if ( "net" eq $ _ or "n" eq $ _ ) {
$ _ = "network" ;
}
2010-09-02 18:46:35 +00:00
unless ( $ currdev and $ currdev - > getAttribute ( "dev" ) eq $ _ ) {
2010-08-27 15:49:11 +00:00
$ needfixin = 1 ;
}
2010-09-27 17:56:50 +00:00
if ( $ currdev ) {
push @ oldbootdevs , $ currdev ;
}
2010-08-27 15:49:11 +00:00
}
if ( scalar ( @ bootdevs ) ) {
$ needfixin = 1 ;
2010-09-02 18:46:35 +00:00
push @ oldbootdevs , @ bootdevs ;
2010-08-27 15:49:11 +00:00
}
unless ( $ needfixin ) { return 0 ; }
#ok, we need to remove all 'boot' nodes from current xml, and put in new ones in the order we like
2010-09-02 18:46:35 +00:00
foreach ( @ oldbootdevs ) {
2010-08-27 15:49:11 +00:00
$ _ - > parentNode - > removeChild ( $ _ ) ;
}
#now to add what we want...
my $ osnode = $ domdesc - > findnodes ( "/domain/os" ) - > [ 0 ] ;
foreach ( @ expectedorder ) {
my $ fragment = $ parser - > parse_balanced_chunk ( '<boot dev="' . $ _ . '"/>' ) ;
$ osnode - > appendChild ( $ fragment ) ;
}
2011-05-31 20:29:59 +00:00
}
if ( $ needfixin ) {
2010-08-27 15:49:11 +00:00
return $ domdesc - > toString ( ) ;
} else { return 0 ; }
}
2009-02-15 22:42:22 +00:00
sub build_oshash {
my % rethash ;
$ rethash { type } - > { content } = 'hvm' ;
2009-11-28 00:50:14 +00:00
if ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { bootorder } ) {
my $ bootorder = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { bootorder } ;
2009-03-18 18:59:41 +00:00
my @ bootdevs = split ( /[:,]/ , $ bootorder ) ;
2009-02-15 22:42:22 +00:00
my $ bootnum = 0 ;
foreach ( @ bootdevs ) {
2009-11-04 16:25:59 +00:00
if ( "net" eq $ _ or "n" eq $ _ ) {
$ rethash { boot } - > [ $ bootnum ] - > { dev } = "network" ;
} else {
$ rethash { boot } - > [ $ bootnum ] - > { dev } = $ _ ;
}
2009-02-15 22:42:22 +00:00
$ bootnum + + ;
}
} else {
$ rethash { boot } - > [ 0 ] - > { dev } = 'network' ;
$ rethash { boot } - > [ 1 ] - > { dev } = 'hd' ;
}
return \ % rethash ;
}
sub build_diskstruct {
2011-02-18 20:01:39 +00:00
print "build_diskstruct called\n" ;
2009-03-14 22:17:24 +00:00
my $ cdloc = shift ;
2009-02-15 22:42:22 +00:00
my @ returns = ( ) ;
my $ currdev ;
2010-08-26 15:24:48 +00:00
my @ suffixes = ( 'a' , 'b' , 'd' .. 'zzz' ) ;
2009-02-15 22:42:22 +00:00
my $ suffidx = 0 ;
2009-03-14 22:17:24 +00:00
if ( $ cdloc ) {
my $ cdhash ;
$ cdhash - > { device } = 'cdrom' ;
if ( $ cdloc =~ /^\/dev/ ) {
$ cdhash - > { type } = 'block' ;
} else {
$ cdhash - > { type } = 'file' ;
}
$ cdhash - > { source } - > { file } = $ cdloc ;
$ cdhash - > { readonly } ;
$ cdhash - > { target } - > { dev } = 'hdc' ;
push @ returns , $ cdhash ;
2011-05-01 17:40:04 +00:00
} else { #give the VM an empty optical drive, to allow chvm live attach/remove
my $ cdhash ;
$ cdhash - > { device } = 'cdrom' ;
2011-05-02 13:40:46 +00:00
$ cdhash - > { type } = 'file' ;
2011-05-01 17:40:04 +00:00
$ cdhash - > { readonly } ;
$ cdhash - > { target } - > { dev } = 'hdc' ;
push @ returns , $ cdhash ;
2009-03-14 22:17:24 +00:00
}
2009-11-28 00:50:14 +00:00
if ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ) {
my $ disklocs = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ;
2009-10-20 00:44:16 +00:00
my @ locations = split /\|/ , $ disklocs ;
2009-02-15 22:42:22 +00:00
foreach my $ disk ( @ locations ) {
#Setting default values of a virtual disk backed by a file at hd*.
my $ diskhash ;
2010-07-02 20:41:29 +00:00
$ disk =~ s/=(.*)// ;
my $ model = $ 1 ;
2010-08-25 15:42:36 +00:00
unless ( $ model ) {
#if not defined, model will stay undefined like above
2010-08-25 15:48:14 +00:00
$ model = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storagemodel } ;
2010-08-25 15:42:36 +00:00
unless ( $ model ) { $ model = 'ide' ; } #if still not defined, ide
}
2010-07-02 20:41:29 +00:00
my $ prefix = 'hd' ;
if ( $ model eq 'virtio' ) {
$ prefix = 'vd' ;
} elsif ( $ model eq 'scsi' ) {
$ prefix = 'sd' ;
}
2009-02-15 22:42:22 +00:00
$ diskhash - > { type } = 'file' ;
$ diskhash - > { device } = 'disk' ;
2010-07-02 20:41:29 +00:00
$ diskhash - > { target } - > { dev } = $ prefix . $ suffixes [ $ suffidx ] ;
$ diskhash - > { target } - > { bus } = $ model ;
2009-02-15 22:42:22 +00:00
my @ disk_parts = split ( /,/ , $ disk ) ;
#Find host file and determine if it is a file or a block device.
if ( substr ( $ disk_parts [ 0 ] , 0 , 4 ) eq 'phy:' ) {
$ diskhash - > { type } = 'block' ;
$ diskhash - > { source } - > { dev } = substr ( $ disk_parts [ 0 ] , 4 ) ;
2010-11-16 22:39:10 +00:00
} elsif ( $ disk_parts [ 0 ] =~ m/^nfs:\/\/(.*)$/ or $ disk_parts [ 0 ] =~ m/^dir:\/\/(.*)$/ ) {
2010-07-06 14:51:19 +00:00
my % disks = % { get_multiple_paths_by_url ( url = > $ disk_parts [ 0 ] , node = > $ node ) } ;
unless ( keys % disks ) {
die "Unable to find any persistent disks at " . $ disk_parts [ 0 ] ;
}
foreach ( keys % disks ) {
my $ tdiskhash ;
$ tdiskhash - > { type } ;
$ tdiskhash - > { device } = 'disk' ;
2010-07-30 14:53:45 +00:00
$ tdiskhash - > { driver } - > { name } = 'qemu' ;
$ tdiskhash - > { driver } - > { type } = $ disks { $ _ } - > { format } ;
2010-07-06 14:51:19 +00:00
$ tdiskhash - > { source } - > { file } = $ _ ;
2010-07-30 14:53:45 +00:00
$ tdiskhash - > { target } - > { dev } = $ disks { $ _ } - > { device } ;
2010-07-06 14:51:19 +00:00
if ( $ disks { $ _ } =~ /^vd/ ) {
$ tdiskhash - > { target } - > { bus } = 'virtio' ;
} elsif ( $ disks { $ _ } =~ /^hd/ ) {
$ tdiskhash - > { target } - > { bus } = 'ide' ;
} elsif ( $ disks { $ _ } =~ /^sd/ ) {
$ tdiskhash - > { target } - > { bus } = 'scsi' ;
}
push @ returns , $ tdiskhash ;
2010-07-02 19:09:49 +00:00
}
2010-07-06 14:51:19 +00:00
next ; #nfs:// skips the other stuff
#$diskhash->{source}->{file} = get_filepath_by_url(url=>$disk_parts[0],dev=>$diskhash->{target}->{dev}); #"/var/lib/xcat/vmnt/nfs_".$1."/$node/".$diskhash->{target}->{dev};
2010-07-21 15:23:37 +00:00
} else { #currently, this would be a bare file to slap in as a disk
2009-02-15 22:42:22 +00:00
$ diskhash - > { source } - > { file } = $ disk_parts [ 0 ] ;
}
#See if there are any other options. If not, increment suffidx because the already determined device node was used.
2009-10-20 00:44:16 +00:00
#evidently, we support specificying explicitly how to target the system..
2009-02-15 22:42:22 +00:00
if ( @ disk_parts gt 1 ) {
my @ disk_opts = split ( /:/ , $ disk_parts [ 1 ] ) ;
if ( $ disk_opts [ 0 ] ne '' ) {
$ diskhash - > { target } - > { dev } = $ disk_opts [ 0 ] ;
} else {
$ suffidx + + ;
}
if ( $ disk_opts [ 1 ] eq 'cdrom' ) {
$ diskhash - > { device } = 'cdrom' ;
}
} else {
$ suffidx + + ;
}
push @ returns , $ diskhash ;
}
}
return \ @ returns ;
}
sub getNodeUUID {
my $ node = shift ;
2010-05-16 23:03:33 +00:00
if ( $ confdata - > { vpd } - > { $ node } - > [ 0 ] and $ confdata - > { vpd } - > { $ node } - > [ 0 ] - > { uuid } ) {
return $ confdata - > { vpd } - > { $ node } - > [ 0 ] - > { uuid } ;
}
2010-05-16 23:08:49 +00:00
if ( $ confdata - > { mac } - > { $ node } - > [ 0 ] - > { mac } ) { #a uuidv1 is possible, generate that for absolute uniqueness guarantee
my $ mac = $ confdata - > { mac } - > { $ node } - > [ 0 ] - > { mac } ;
2010-05-16 18:54:46 +00:00
$ mac =~ s/\|.*// ;
$ mac =~ s/!.*// ;
2010-05-16 23:03:33 +00:00
$ updatetable - > { vpd } - > { $ node } = { uuid = > xCAT::Utils:: genUUID ( mac = > $ mac ) } ;
2010-05-16 18:54:46 +00:00
} else {
2010-05-16 23:03:33 +00:00
$ updatetable - > { vpd } - > { $ node } = { uuid = > xCAT::Utils:: genUUID ( ) } ;
2010-05-16 18:54:46 +00:00
}
2010-09-24 14:09:32 +00:00
return $ updatetable - > { vpd } - > { $ node } - > { uuid } ;
2010-05-16 18:54:46 +00:00
2009-02-15 22:42:22 +00:00
}
sub build_nicstruct {
my $ rethash ;
my $ node = shift ;
2009-03-13 23:58:29 +00:00
my @ nics = ( ) ;
2009-11-28 00:50:14 +00:00
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nics } ) {
@ nics = split /,/ , $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nics } ;
2009-03-13 23:58:29 +00:00
} else {
@ nics = ( 'virbr0' ) ;
}
2010-08-30 19:45:10 +00:00
my @ macs = xCAT::VMCommon:: getMacAddresses ( $ confdata , $ node , scalar @ nics ) ;
2009-02-15 22:42:22 +00:00
my @ rethashes ;
foreach ( @ macs ) {
my $ rethash ;
2009-03-13 23:58:29 +00:00
my $ nic = shift @ nics ;
my $ type = 'e1000' ; #better default fake nic than rtl8139, relevant to most
unless ( $ nic ) {
last ; #Don't want to have multiple vnics tied to the same switch
}
2009-10-20 01:02:01 +00:00
$ nic =~ s/.*:// ; #the detail of how the bridge was built is of no
#interest to this segment of code
2010-08-25 15:42:36 +00:00
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nicmodel } ) {
$ type = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nicmodel } ;
}
2009-03-13 23:58:29 +00:00
if ( $ nic =~ /=/ ) {
( $ nic , $ type ) = split /=/ , $ nic , 2 ;
}
2009-02-15 22:42:22 +00:00
$ rethash - > { type } = 'bridge' ;
$ rethash - > { mac } - > { address } = $ _ ;
2009-03-13 23:58:29 +00:00
$ rethash - > { source } - > { bridge } = $ nic ;
$ rethash - > { model } - > { type } = $ type ;
2009-02-15 22:42:22 +00:00
push @ rethashes , $ rethash ;
}
return \ @ rethashes ;
}
sub getUnits {
my $ amount = shift ;
my $ defunit = shift ;
my $ divisor = shift ;
unless ( $ divisor ) {
$ divisor = 1 ;
}
if ( $ amount =~ /(\D)$/ ) { #If unitless, add unit
$ defunit = $ 1 ;
chop $ amount ;
}
if ( $ defunit =~ /k/i ) {
return $ amount * 1024 / $ divisor ;
} elsif ( $ defunit =~ /m/i ) {
return $ amount * 1048576 / $ divisor ;
} elsif ( $ defunit =~ /g/i ) {
return $ amount * 1073741824 / $ divisor ;
}
}
sub build_xmldesc {
my $ node = shift ;
2010-09-17 14:37:20 +00:00
my % args = @ _ ;
my $ cdloc = $ args { cd } ;
2009-02-15 22:42:22 +00:00
my % xtree = ( ) ;
$ xtree { type } = 'kvm' ;
$ xtree { name } - > { content } = $ node ;
$ xtree { uuid } - > { content } = getNodeUUID ( $ node ) ;
$ xtree { os } = build_oshash ( ) ;
2010-09-17 14:37:20 +00:00
if ( $ args { memory } ) {
$ xtree { memory } - > { content } = getUnits ( $ args { memory } , "M" , 1024 ) ;
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { memory } ) {
$ updatetable - > { vm } - > { $ node } - > { memory } = $ args { memory } ;
}
} elsif ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { memory } ) {
2009-11-28 00:50:14 +00:00
$ xtree { memory } - > { content } = getUnits ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { memory } , "M" , 1024 ) ;
2009-02-15 22:42:22 +00:00
} else {
$ xtree { memory } - > { content } = 524288 ;
}
2010-09-17 14:37:20 +00:00
if ( $ args { cpus } ) {
$ xtree { vcpu } - > { content } = $ args { cpus } ;
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { cpus } ) {
$ updatetable - > { vm } - > { $ node } - > { cpus } = $ args { cpus } ;
}
} elsif ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { cpus } ) {
2009-11-28 00:50:14 +00:00
$ xtree { vcpu } - > { content } = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { cpus } ;
2009-03-14 00:37:20 +00:00
} else {
$ xtree { vcpu } - > { content } = 1 ;
}
2009-11-28 00:50:14 +00:00
if ( defined ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { clockoffset } ) ) {
2009-03-14 16:44:02 +00:00
#If user requested a specific behavior, give it
2009-11-28 00:50:14 +00:00
$ xtree { clock } - > { offset } = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { clockoffset } ;
2009-03-14 16:37:49 +00:00
} else {
2009-03-14 16:44:02 +00:00
#Otherwise, only do local time for things that look MS
2009-11-28 00:50:14 +00:00
if ( defined ( $ confdata - > { nodetype } - > { $ node } - > [ 0 ] - > { os } ) and $ confdata - > { nodetype } - > { $ node } - > [ 0 ] - > { os } =~ /win.*/ ) {
2009-03-14 16:44:02 +00:00
$ xtree { clock } - > { offset } = 'localtime' ;
} else { #For everyone else, utc is preferred generally
$ xtree { clock } - > { offset } = 'utc' ;
}
2009-03-14 16:37:49 +00:00
}
2009-02-15 22:42:22 +00:00
$ xtree { features } - > { pae } = { } ;
$ xtree { features } - > { acpi } = { } ;
$ xtree { features } - > { apic } = { } ;
$ xtree { features } - > { content } = "\n" ;
2009-03-14 22:17:24 +00:00
$ xtree { devices } - > { disk } = build_diskstruct ( $ cdloc ) ;
2009-02-15 22:42:22 +00:00
$ xtree { devices } - > { interface } = build_nicstruct ( $ node ) ;
2010-05-17 19:04:36 +00:00
#use content to force xml simple to not make model the 'name' of video
2011-04-29 15:25:00 +00:00
if ( defined ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { vidmodel } ) ) {
2011-04-29 15:20:12 +00:00
my $ model = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { vidmodel } ;
my $ vram = '8192' ;
$ xtree { devices } - > { video } = [ { 'content' = > '' , 'model' = > { type = > $ model , vram = > 8192 } } ] ;
} else {
$ xtree { devices } - > { video } = [ { 'content' = > '' , 'model' = > { type = > 'vga' , vram = > 8192 } } ] ;
}
2009-03-14 00:14:29 +00:00
$ xtree { devices } - > { input } - > { type } = 'tablet' ;
$ xtree { devices } - > { input } - > { bus } = 'usb' ;
2011-04-29 15:25:00 +00:00
if ( defined ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { vidproto } ) ) {
2011-04-29 15:20:12 +00:00
$ xtree { devices } - > { graphics } - > { type } = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { vidproto } ;
} else {
$ xtree { devices } - > { graphics } - > { type } = 'vnc' ;
}
2011-05-02 18:11:00 +00:00
$ xtree { devices } - > { graphics } - > { autoport } = 'yes' ;
2011-05-03 19:54:08 +00:00
$ xtree { devices } - > { graphics } - > { listen } = '0.0.0.0' ;
2011-05-02 19:18:16 +00:00
$ xtree { devices } - > { graphics } - > { password } = genpassword ( 16 ) ;
2011-05-02 18:11:00 +00:00
$ xtree { devices } - > { sound } - > { model } = 'ac97' ;
2011-04-29 15:20:12 +00:00
2009-02-15 22:42:22 +00:00
$ xtree { devices } - > { console } - > { type } = 'pty' ;
$ xtree { devices } - > { console } - > { target } - > { port } = '1' ;
return XMLout ( \ % xtree , RootName = > "domain" ) ;
}
sub refresh_vm {
my $ dom = shift ;
2010-08-13 12:26:17 +00:00
my $ newxml = $ dom - > get_xml_description ( ) ;
2010-08-26 14:45:12 +00:00
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ newxml ;
2010-08-13 12:26:17 +00:00
$ newxml = XMLin ( $ newxml ) ;
2011-05-02 19:06:50 +00:00
my $ vidport = $ newxml - > { devices } - > { graphics } - > { port } ;
my $ vidproto = $ newxml - > { devices } - > { graphics } - > { type } ;
2009-02-15 22:42:22 +00:00
my $ stty = $ newxml - > { devices } - > { console } - > { tty } ;
2011-05-02 19:09:24 +00:00
#$updatetable->{vm}->{$node}={vncport=>$vncport,textconsole=>$stty};
2009-11-27 18:55:13 +00:00
#$vmtab->setNodeAttribs($node,{vncport=>$vncport,textconsole=>$stty});
2011-05-02 19:06:50 +00:00
return { vidport = > $ vidport , textconsole = > $ stty , vidproto = > $ vidproto } ;
2009-02-15 22:42:22 +00:00
}
2009-03-13 23:58:29 +00:00
sub getcons {
2009-02-15 22:42:22 +00:00
my $ node = shift ( ) ;
my $ type = shift ( ) ;
my $ dom ;
eval {
$ dom = $ hypconn - > get_domain_by_name ( $ node ) ;
} ;
unless ( $ dom ) {
return 1 , "Unable to query running VM" ;
}
my $ consdata = refresh_vm ( $ dom ) ;
2009-11-28 00:50:14 +00:00
my $ hyper = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { host } ;
2009-02-15 22:42:22 +00:00
if ( $ type eq "text" ) {
my $ serialspeed ;
2009-11-28 00:50:14 +00:00
if ( $ confdata - > { nodehm } ) {
$ serialspeed = $ confdata - > { nodehm } - > { $ node } - > [ 0 ] - > { serialspeed } ;
2009-02-15 22:42:22 +00:00
}
my $ sconsparms = { node = > [ { name = > [ $ node ] } ] } ;
$ sconsparms - > { node } - > [ 0 ] - > { sshhost } = [ $ hyper ] ;
$ sconsparms - > { node } - > [ 0 ] - > { psuedotty } = [ $ consdata - > { textconsole } ] ;
$ sconsparms - > { node } - > [ 0 ] - > { baudrate } = [ $ serialspeed ] ;
return ( 0 , $ sconsparms ) ;
2011-05-02 19:06:50 +00:00
} elsif ( $ type eq "vid" ) {
2011-05-03 13:21:18 +00:00
my $ domxml = $ dom - > get_xml_description ( ) ;
my $ parseddom = $ parser - > parse_string ( $ domxml ) ;
my ( $ graphicsnode ) = $ parseddom - > findnodes ( "//graphics" ) ;
2011-05-02 19:18:16 +00:00
my $ tpasswd = genpassword ( 16 ) ;
2011-05-12 14:06:14 +00:00
my $ validto = POSIX:: strftime ( "%Y-%m-%dT%H:%M:%S" , gmtime ( time ( ) + 60 ) ) ;
2011-05-03 13:21:18 +00:00
$ graphicsnode - > setAttribute ( "passwd" , $ tpasswd ) ;
$ graphicsnode - > setAttribute ( "passwdValidTo" , $ validto ) ;
$ dom - > update_device ( $ graphicsnode - > toString ( ) ) ;
#$dom->update_device("<graphics type='".$consdata->{vidproto}."' passwd='$tpasswd' passwdValidTo='$validto' autoport='yes'/>");
2011-05-02 19:06:50 +00:00
$ consdata - > { password } = $ tpasswd ;
$ consdata - > { server } = $ hyper ;
return $ consdata ;
#return (0,{$consdata->{vidproto}.'@'.$hyper.":".$consdata->{vidport}); #$consdata->{vncport});
2009-02-15 22:42:22 +00:00
}
}
sub getrvidparms {
my $ node = shift ;
2011-05-02 19:06:50 +00:00
my $ location = getcons ( $ node , "vid" ) ;
2011-05-09 16:32:39 +00:00
unless ( ref $ location ) {
return ( 1 , "Error: Unable to determine rvid destination for $node (appears VM is off)" ) ;
2011-05-02 19:06:50 +00:00
}
2009-02-15 22:42:22 +00:00
my @ output = (
2011-05-02 19:06:50 +00:00
"method: kvm"
) ;
foreach ( keys %$ location ) {
push @ output , $ _ . ":" . $ location - > { $ _ } ;
2009-02-15 22:42:22 +00:00
}
2011-05-02 19:06:50 +00:00
return 0 , @ output ;
2009-02-15 22:42:22 +00:00
}
2011-06-23 13:44:24 +00:00
my % cached_noderanges ;
2009-02-15 22:42:22 +00:00
sub pick_target {
my $ node = shift ;
2009-03-18 20:00:51 +00:00
my $ addmemory = shift ;
2009-02-15 22:42:22 +00:00
my $ target ;
2009-10-20 23:56:40 +00:00
my $ mostfreememory = undef ;
my $ currentfreememory ;
2009-11-28 00:50:14 +00:00
my $ candidates = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { migrationdest } ;
my $ currhyp = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { host } ;
2010-02-15 16:54:06 +00:00
#caching strategy is implicit on whether $addmemory is passed.
2009-02-15 22:42:22 +00:00
unless ( $ candidates ) {
return undef ;
}
2011-06-23 13:44:24 +00:00
my @ fosterhyps ; #noderange is relatively expensive, and generally we only will have a few distinct noderange descriptions to contend with in a mass adoption, so cache eache one for reuse across pick_target() calls
if ( defined $ cached_noderanges { $ candidates } ) {
@ fosterhyps = @ { $ cached_noderanges { $ candidates } } ;
} else {
@ fosterhyps = noderange ( $ candidates ) ;
$ cached_noderanges { $ candidates } = \ @ fosterhyps ;
}
foreach ( @ fosterhyps ) {
2009-02-15 22:42:22 +00:00
my $ targconn ;
my $ cand = $ _ ;
if ( $ _ eq $ currhyp ) { next ; } #skip current node
2009-10-20 23:22:04 +00:00
if ( $ offlinehyps { $ _ } ) { next } ; #skip already offlined nodes
2009-04-10 13:39:57 +00:00
if ( grep { "$_" eq $ cand } @ destblacklist ) { next ; } #skip blacklisted destinations
2010-02-15 16:54:06 +00:00
if ( $ addmemory and defined $ hypstats { $ _ } - > { freememory } ) { #only used cache results when addmemory suggests caching can make sense
2009-12-02 00:37:04 +00:00
$ currentfreememory = $ hypstats { $ _ } - > { freememory }
} else {
if ( not nodesockopen ( $ _ , 22 ) ) { $ offlinehyps { $ _ } = 1 ; next ; } #skip unusable destinations
2009-02-15 22:42:22 +00:00
eval { #Sys::Virt has bugs that cause it to die out in weird ways some times, contain it here
2009-12-02 00:37:04 +00:00
$ targconn = Sys::Virt - > new ( uri = > "qemu+ssh://root@" . $ _ . "/system?no_tty=1&netcat=nc" ) ;
2009-02-15 22:42:22 +00:00
} ;
2009-12-02 00:37:04 +00:00
unless ( $ targconn ) {
eval { #Sys::Virt has bugs that cause it to die out in weird ways some times, contain it here
$ targconn = Sys::Virt - > new ( uri = > "qemu+ssh://root@" . $ _ . "/system?no_tty=1" ) ;
} ;
}
unless ( $ targconn ) { next ; } #skip unreachable destinations
$ currentfreememory = $ targconn - > get_node_info ( ) - > { memory } ;
foreach ( $ targconn - > list_domains ( ) ) {
if ( $ _ - > get_name ( ) eq 'Domain-0' ) { next ; } #Dom0 memory usage is elastic, we are interested in HVM DomU memory, which is inelastic
$ currentfreememory -= $ _ - > get_info ( ) - > { memory } ;
2009-02-15 22:42:22 +00:00
}
2009-12-02 00:37:04 +00:00
$ hypstats { $ cand } - > { freememory } = $ currentfreememory ;
2009-02-15 22:42:22 +00:00
}
2009-03-18 20:00:51 +00:00
if ( $ addmemory and $ addmemory - > { $ _ } ) {
2009-10-20 23:56:40 +00:00
$ currentfreememory -= $ addmemory - > { $ _ } ;
2009-03-18 20:00:51 +00:00
}
2009-10-20 23:56:40 +00:00
if ( not defined ( $ mostfreememory ) ) {
$ mostfreememory = $ currentfreememory ;
2009-02-15 22:42:22 +00:00
$ target = $ _ ;
2009-10-20 23:56:40 +00:00
} elsif ( $ currentfreememory > $ mostfreememory ) {
$ mostfreememory = $ currentfreememory ;
2009-02-15 22:42:22 +00:00
$ target = $ _ ;
}
}
return $ target ;
}
sub migrate {
2009-10-19 22:47:48 +00:00
$ node = shift ( ) ;
2009-02-15 22:42:22 +00:00
my $ targ = shift ( ) ;
2009-10-19 22:47:48 +00:00
if ( $ offlinevms { $ node } ) {
return power ( "on" ) ;
}
2010-02-15 16:54:06 +00:00
#TODO: currently, we completely serialize migration events. Some IO fabrics can facilitate concurrent migrations
#One trivial example is an ethernet port aggregation where a single conversation may likely be unable to utilize all the links
#because traffic is balanced by a mac address hashing algorithim, but talking to several hypervisors would have
#distinct peers that can be balanced more effectively.
#The downside is that migration is sufficiently slow that a lot can change in the intervening time on a target hypervisor, but
#this should not be an issue if:
#xCAT is the only path a configuration is using to make changes in the virtualization stack
#xCAT implements a global semaphore mechanism that this plugin can use to assure migration targets do not change by our own hand..
#failing that.. flock.
2009-02-15 22:42:22 +00:00
unless ( $ targ ) {
$ targ = pick_target ( $ node ) ;
}
unless ( $ targ ) {
return ( 1 , "Unable to identify a suitable target host for guest $node" ) ;
}
2009-10-20 01:02:01 +00:00
if ( $ use_xhrm ) {
xhrm_satisfy ( $ node , $ targ ) ;
}
2009-02-15 22:42:22 +00:00
my $ prevhyp ;
2009-04-10 13:53:39 +00:00
my $ target = "qemu+ssh://root@" . $ targ . "/system?no_tty=1" ;
my $ currhyp = "qemu+ssh://root@" ;
2009-11-28 00:50:14 +00:00
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { host } ) {
$ prevhyp = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { host } ;
2009-02-15 22:42:22 +00:00
$ currhyp . = $ prevhyp ;
} else {
return ( 1 , "Unable to find current location of $node" ) ;
}
$ currhyp . = "/system?no_tty=1" ;
if ( $ currhyp eq $ target ) {
return ( 0 , "Guest is already on host $targ" ) ;
}
2009-04-10 13:39:57 +00:00
my $ srchypconn ;
my $ desthypconn ;
2009-10-20 23:22:04 +00:00
unless ( $ offlinehyps { $ prevhyp } or nodesockopen ( $ prevhyp , 22 ) ) {
$ offlinehyps { $ prevhyp } = 1 ;
}
2009-02-15 22:42:22 +00:00
my $ srcnetcatadd = "&netcat=nc" ;
2009-10-20 23:22:04 +00:00
unless ( $ offlinehyps { $ prevhyp } ) {
2009-02-15 22:42:22 +00:00
eval { #Contain Sys::Virt bugs
2009-10-20 23:22:04 +00:00
$ srchypconn = Sys::Virt - > new ( uri = > "qemu+ssh://root@" . $ prevhyp . "/system?no_tty=1$srcnetcatadd" ) ;
2009-02-15 22:42:22 +00:00
} ;
2009-10-20 23:22:04 +00:00
unless ( $ srchypconn ) {
$ srcnetcatadd = "" ;
eval { #Contain Sys::Virt bugs
$ srchypconn = Sys::Virt - > new ( uri = > "qemu+ssh://root@" . $ prevhyp . "/system?no_tty=1" ) ;
} ;
}
2009-02-15 22:42:22 +00:00
}
2009-04-10 13:39:57 +00:00
unless ( $ srchypconn ) {
2009-02-15 22:42:22 +00:00
return ( 1 , "Unable to reach $prevhyp to perform operation of $node, use nodech to change vm.host if certain of no split-brain possibility exists" ) ;
}
2009-10-20 23:22:04 +00:00
unless ( $ offlinehyps { $ targ } or nodesockopen ( $ targ , 22 ) ) {
$ offlinehyps { $ targ } = 1 ;
}
2009-02-15 22:42:22 +00:00
my $ destnetcatadd = "&netcat=nc" ;
2009-10-20 23:22:04 +00:00
unless ( $ offlinehyps { $ targ } ) {
2009-02-15 22:42:22 +00:00
eval { #Contain Sys::Virt bugs
2009-10-20 23:22:04 +00:00
$ desthypconn = Sys::Virt - > new ( uri = > $ target . $ destnetcatadd ) ;
2009-02-15 22:42:22 +00:00
} ;
2009-10-20 23:22:04 +00:00
unless ( $ desthypconn ) {
$ destnetcatadd = "" ;
eval { #Contain Sys::Virt bugs
$ desthypconn = Sys::Virt - > new ( uri = > $ target ) ;
} ;
}
2009-02-15 22:42:22 +00:00
}
2009-04-10 13:39:57 +00:00
unless ( $ desthypconn ) {
2009-02-15 22:42:22 +00:00
return ( 1 , "Unable to reach $targ to perform operation of $node, destination unusable." ) ;
}
2010-07-22 13:55:50 +00:00
if ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } and $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } =~ /^nfs:/ ) {
2011-01-28 19:32:12 +00:00
#first, assure master is in place
2010-08-31 15:01:24 +00:00
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { master } ) {
my $ vmmastertab = xCAT::Table - > new ( 'vmmaster' , - create = > 0 ) ;
my $ masterent ;
if ( $ vmmastertab ) {
$ masterent = $ vmmastertab - > getAttribs ( { name = > $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { master } } , [ 'storage' ] ) ;
}
if ( $ masterent and $ masterent - > { storage } ) {
foreach ( split /,/ , $ masterent - > { storage } ) {
s/=.*// ;
get_storage_pool_by_url ( $ _ , $ desthypconn , $ targ ) ;
}
}
}
2011-01-28 19:32:12 +00:00
my $ urls = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } and $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ;
foreach ( split /,/ , $ urls ) {
s/=.*// ;
get_storage_pool_by_url ( $ _ , $ desthypconn , $ targ ) ;
}
2010-07-22 13:55:50 +00:00
}
2009-02-15 22:42:22 +00:00
my $ sock = IO::Socket::INET - > new ( Proto = > 'udp' ) ;
my $ ipa = inet_aton ( $ node ) ;
2010-02-11 22:13:54 +00:00
my $ pa ;
if ( $ ipa ) {
$ pa = sockaddr_in ( 7 , $ ipa ) ; #UDP echo service, not needed to be actually
}
2009-02-15 22:42:22 +00:00
#serviced, we just want to trigger MAC move in the switch forwarding dbs
2009-04-10 13:39:57 +00:00
my $ nomadomain ;
eval {
$ nomadomain = $ srchypconn - > get_domain_by_name ( $ node ) ;
} ;
unless ( $ nomadomain ) {
return ( 1 , "Unable to find $node on $prevhyp, vm.host may be incorrect or a split-brain condition, such as libvirt forgetting a guest due to restart or bug." ) ;
}
my $ newdom ;
my $ errstr ;
eval {
$ newdom = $ nomadomain - > migrate ( $ desthypconn , & Sys::Virt::Domain:: MIGRATE_LIVE , undef , undef , 0 ) ;
} ;
if ( $@ ) { $ errstr = $@ ; }
#TODO: If it looks like it failed to migrate, ensure the guest exists only in one place
if ( $ errstr ) {
return ( 1 , "Failed migration of $node from $prevhyp to $targ: $errstr" ) ;
}
unless ( $ newdom ) {
return ( 1 , "Failed migration from $prevhyp to $targ" ) ;
}
2010-02-11 22:13:54 +00:00
if ( $ ipa ) {
system ( "arp -d $node" ) ; #Make ethernet fabric take note of change
send ( $ sock , "dummy" , 0 , $ pa ) ; #UDP packet to force forwarding table update in switches, ideally a garp happened, but just in case...
}
2009-04-10 13:39:57 +00:00
#BTW, this should all be moot since the underlying kvm seems good about gratuitous traffic, but it shouldn't hurt anything
refresh_vm ( $ newdom ) ;
#The migration seems tohave suceeded, but to be sure...
close ( $ sock ) ;
if ( $ desthypconn - > get_domain_by_name ( $ node ) ) {
2010-02-15 16:54:06 +00:00
#$updatetable->{vm}->{$node}->{host} = $targ;
2011-06-22 15:23:06 +00:00
unless ( $ vmtab ) { $ vmtab = new xCAT:: Table ( 'vm' , - create = > 1 ) ; }
2010-02-15 16:54:06 +00:00
$ vmtab - > setNodeAttribs ( $ node , { host = > $ targ } ) ;
2009-02-15 22:42:22 +00:00
return ( 0 , "migrated to $targ" ) ;
2009-04-10 13:39:57 +00:00
} else { #This *should* not be possible
return ( 1 , "Failed migration from $prevhyp to $targ, despite normal looking run..." ) ;
2009-02-15 22:42:22 +00:00
}
}
sub getpowstate {
my $ dom = shift ;
my $ vmstat ;
if ( $ dom ) {
$ vmstat = $ dom - > get_info ;
}
if ( $ vmstat and $ runningstates { $ vmstat - > { state } } ) {
return "on" ;
} else {
return "off" ;
}
}
2009-10-20 00:44:16 +00:00
sub xhrm_satisfy {
2009-10-20 01:02:01 +00:00
my $ node = shift ;
my $ hyp = shift ;
2009-11-22 19:41:52 +00:00
my $ rc = 0 ;
2009-10-20 01:02:01 +00:00
my @ nics = ( ) ;
my @ storage = ( ) ;
2009-11-28 00:50:14 +00:00
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nics } ) {
@ nics = split /,/ , $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nics } ;
2009-10-20 01:02:01 +00:00
}
2011-02-18 20:01:39 +00:00
$ rc |= system ( "scp $::XCATROOT/share/xcat/scripts/xHRM $hyp:/usr/bin" ) ;
2009-10-20 01:02:01 +00:00
foreach ( @ nics ) {
s/=.*// ; #this code cares not about the model of virtual nic
2011-02-18 20:01:39 +00:00
my $ nic = $ _ ;
my $ vlanip ;
my $ netmask ;
my $ subnet ;
if ( $ nic =~ /^vl([\d]+)$/ ) {
my $ vlan = $ 1 ;
my $ nwtab = xCAT::Table - > new ( "networks" , - create = > 0 ) ;
if ( $ nwtab ) {
my $ sent = $ nwtab - > getAttribs ( { vlanid = > "$vlan" } , 'net' , 'mask' ) ;
if ( $ sent and ( $ sent - > { net } ) ) {
$ subnet = $ sent - > { net } ;
$ netmask = $ sent - > { mask } ;
}
if ( ( $ subnet ) && ( $ netmask ) ) {
my $ hoststab = xCAT::Table - > new ( "hosts" , - create = > 0 ) ;
if ( $ hoststab ) {
my $ tmp = $ hoststab - > getNodeAttribs ( $ hyp , [ 'otherinterfaces' ] ) ;
if ( defined ( $ tmp ) && ( $ tmp ) && $ tmp - > { otherinterfaces } )
{
my $ otherinterfaces = $ tmp - > { otherinterfaces } ;
my @ itf_pairs = split ( /,/ , $ otherinterfaces ) ;
foreach ( @ itf_pairs ) {
my ( $ name , $ vip ) = split ( /:/ , $ _ ) ;
if ( xCAT::NetworkUtils - > ishostinsubnet ( $ vip , $ netmask , $ subnet ) ) {
$ vlanip = $ vip ;
last ;
}
}
}
}
}
}
}
#print "nic=$nic\n";
$ rc |= system ( "ssh $hyp xHRM bridgeprereq $nic $vlanip $netmask" ) ;
2010-08-27 13:58:40 +00:00
#TODO: surprise! there is relatively undocumented libvirt capability for this...
#./tests/interfaceschemadata/ will have to do in lieu of documentation..
#note that RHEL6 is where that party starts
#of course, they don't have a clean 'migrate from normal interface to bridge' capability
#consequently, would have to have some init script at least pre-bridge it up..
#even then, may not be able to intelligently modify the bridge remotely, so may still not be feasible for our use..
#this is sufficiently hard, punting to 2.6 at least..
2009-10-20 01:02:01 +00:00
}
2009-11-22 19:41:52 +00:00
return $ rc ;
2009-10-20 00:44:16 +00:00
}
2009-02-15 22:42:22 +00:00
sub makedom {
my $ node = shift ;
2009-03-14 22:17:24 +00:00
my $ cdloc = shift ;
2010-08-13 12:34:32 +00:00
my $ xml = shift ;
2009-02-15 22:42:22 +00:00
my $ dom ;
2010-08-13 12:34:32 +00:00
if ( not $ xml and $ confdata - > { kvmnodedata } - > { $ node } and $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] and $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ) {
2010-08-27 15:49:11 +00:00
#we do this to trigger storage prereq fixup
2010-11-16 22:39:10 +00:00
if ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } and ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } =~ /^nfs:/ or $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } =~ /^dir:/ ) ) {
2010-08-31 15:01:24 +00:00
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { master } ) {
my $ vmmastertab = xCAT::Table - > new ( 'vmmaster' , - create = > 0 ) ;
my $ masterent ;
if ( $ vmmastertab ) {
$ masterent = $ vmmastertab - > getAttribs ( { name = > $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { master } } , [ 'storage' ] ) ;
}
if ( $ masterent and $ masterent - > { storage } ) {
foreach ( split /,/ , $ masterent - > { storage } ) {
s/=.*// ;
get_storage_pool_by_url ( $ _ ) ;
}
}
}
2011-01-28 19:32:12 +00:00
my $ urls = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } and $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ;
foreach ( split /,/ , $ urls ) {
s/=.*// ;
get_storage_pool_by_url ( $ _ ) ;
}
2010-08-27 15:49:11 +00:00
}
2010-08-12 18:55:38 +00:00
$ xml = $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ;
2011-05-31 20:29:59 +00:00
my $ newxml = reconfigvm ( $ node , $ xml ) ;
2010-08-27 15:49:11 +00:00
if ( $ newxml ) {
$ xml = $ newxml ;
}
2010-08-13 12:34:32 +00:00
} elsif ( not $ xml ) {
2010-09-17 14:37:20 +00:00
$ xml = build_xmldesc ( $ node , cd = > $ cdloc ) ;
2010-08-12 18:30:41 +00:00
}
2011-05-03 13:24:11 +00:00
my $ parseddom = $ parser - > parse_string ( $ xml ) ;
my ( $ graphics ) = $ parseddom - > findnodes ( "//graphics" ) ;
$ graphics - > setAttribute ( "passwd" , genpassword ( 20 ) ) ;
2011-05-06 14:22:18 +00:00
$ graphics - > setAttribute ( "listen" , '0.0.0.0' ) ;
2011-05-13 14:28:19 +00:00
$ xml = $ parseddom - > toString ( ) ;
2009-02-15 22:42:22 +00:00
my $ errstr ;
eval { $ dom = $ hypconn - > create_domain ( $ xml ) ; } ;
if ( $@ ) { $ errstr = $@ ; }
if ( ref $ errstr ) {
$ errstr = ":" . $ errstr - > { message } ;
}
if ( $ errstr ) { return ( undef , $ errstr ) ; }
if ( $ dom ) {
refresh_vm ( $ dom ) ;
}
return $ dom , undef ;
}
2009-03-17 00:49:55 +00:00
sub createstorage {
2010-07-02 20:41:29 +00:00
#svn rev 6638 held the older vintage of createstorage
2011-02-18 20:01:39 +00:00
#print "createstorage called\n";
2009-03-17 00:49:55 +00:00
my $ filename = shift ;
my $ mastername = shift ;
my $ size = shift ;
2009-11-22 19:41:52 +00:00
my $ cfginfo = shift ;
my $ force = shift ;
2010-07-02 20:41:29 +00:00
#my $diskstruct = shift;
2009-11-22 19:41:52 +00:00
my $ node = $ cfginfo - > { node } ;
my @ flags = split /,/ , $ cfginfo - > { virtflags } ;
foreach ( @ flags ) {
if ( /^imageformat=(.*)\z/ ) {
$ imgfmt = $ 1 ;
} elsif ( /^clonemethod=(.*)\z/ ) {
$ clonemethod = $ 1 ;
}
}
my $ mountpath ;
2009-11-30 19:51:33 +00:00
my $ pathappend ;
2009-11-22 19:41:52 +00:00
#for nfs paths and qemu-img, we do the magic locally only for now
2009-11-30 19:51:33 +00:00
my $ basename ;
my $ dirname ;
2010-07-02 20:41:29 +00:00
if ( $ mastername and $ size ) {
return 1 , "Can not specify both a master to clone and size(s)" ;
2009-11-22 19:41:52 +00:00
}
2010-07-02 20:41:29 +00:00
$ filename =~ s/=(.*)// ;
my $ model = $ 1 ;
2010-08-25 15:42:36 +00:00
unless ( $ model ) {
#if not defined, model will stay undefined like above
$ model = $ cfginfo - > { storagemodel } ;
}
2010-07-02 20:41:29 +00:00
my $ prefix = 'hd' ;
if ( $ model eq 'scsi' ) {
$ prefix = 'sd' ;
} elsif ( $ model eq 'virtio' ) {
$ prefix = 'vd' ;
2009-11-22 19:41:52 +00:00
}
2010-08-26 15:24:48 +00:00
my @ suffixes = ( 'a' , 'b' , 'd' .. 'zzz' ) ;
2010-11-16 22:39:10 +00:00
if ( $ filename =~ /^nfs:/ or $ filename =~ /^dir:/ ) { #libvirt storage pool to be used for this
2010-07-02 20:41:29 +00:00
my @ sizes = split /,/ , $ size ;
foreach ( @ sizes ) {
2011-02-18 20:01:39 +00:00
get_filepath_by_url ( url = > $ filename , dev = > $ prefix . shift ( @ suffixes ) , create = > $ _ , force = > $ force ) ;
2010-07-02 20:41:29 +00:00
}
2010-08-27 17:27:54 +00:00
} else {
oldCreateStorage ( $ filename , $ mastername , $ size , $ cfginfo , $ force ) ;
}
2009-11-22 19:41:52 +00:00
my $ masterserver ;
2010-07-02 20:41:29 +00:00
if ( $ mastername ) { #cloning
2009-03-17 00:49:55 +00:00
}
2010-07-02 20:41:29 +00:00
if ( $ size ) { #new volume
2009-03-17 00:49:55 +00:00
}
}
2010-08-27 17:27:54 +00:00
sub oldCreateStorage {
my $ filename = shift ;
my $ mastername = shift ;
my $ size = shift ;
my $ cfginfo = shift ;
my $ force = shift ;
my $ node = $ cfginfo - > { node } ;
my @ flags = split /,/ , $ cfginfo - > { virtflags } ;
foreach ( @ flags ) {
if ( /^imageformat=(.*)\z/ ) {
$ imgfmt = $ 1 ;
} elsif ( /^clonemethod=(.*)\z/ ) {
$ clonemethod = $ 1 ;
}
}
my $ mountpath ;
my $ pathappend ;
my $ storageserver ;
#for nfs paths and qemu-img, we do the magic locally only for now
my $ basename ;
my $ dirname ;
( $ basename , $ dirname ) = fileparse ( $ filename ) ;
unless ( $ storageserver ) {
if ( - f $ filename ) {
unless ( $ force ) {
return 1 , "Storage already exists, delete manually or use --force" ;
}
unlink $ filename ;
}
}
if ( $ storageserver and $ mastername and $ clonemethod eq 'reflink' ) {
my $ rc = system ( "ssh $storageserver mkdir -p $dirname" ) ;
if ( $ rc ) {
return 1 , "Unable to manage storage on remote server $storageserver" ;
}
} elsif ( $ storageserver ) {
my @ mounts = `mount` ;
my $ foundmount ;
foreach ( @ mounts ) {
if ( /^$storageserver:$mountpath/ ) {
chomp ;
s/^.* on (\S*) type nfs.*$/$1/ ;
$ dirname = $ _ ;
mkpath ( $ dirname . $ pathappend ) ;
$ foundmount = 1 ;
last ;
}
}
unless ( $ foundmount ) {
return 1 , "qemu-img cloning requires that the management server have the directory $mountpath from $storageserver mounted" ;
}
} else {
mkpath ( $ dirname ) ;
}
if ( $ mastername and $ size ) {
return 1 , "Can not specify both a master to clone and a size" ;
}
my $ masterserver ;
if ( $ mastername ) {
unless ( $ mastername =~ /^\// or $ mastername =~ /^nfs:/ ) {
$ mastername = $ xCAT_plugin:: kvm:: masterdir . '/' . $ mastername ;
}
if ( $ mastername =~ m !nfs://([^/]*)(/.*\z)! ) {
$ mastername = $ 2 ;
$ masterserver = $ 1 ;
}
if ( $ masterserver ne $ storageserver ) {
return 1 , "Not supporting cloning between $masterserver and $storageserver at this time, for now ensure master images and target VM images are on the same server" ;
}
my $ rc ;
if ( $ clonemethod eq 'qemu-img' ) {
my $ dirn ;
my $ filn ;
( $ filn , $ dirn ) = fileparse ( $ filename ) ;
chdir ( $ dirn ) ;
$ rc = system ( "qemu-img create -f qcow2 -b $mastername $filename" ) ;
} elsif ( $ clonemethod eq 'reflink' ) {
if ( $ storageserver ) {
$ rc = system ( "ssh $storageserver cp --reflink $mastername $filename" ) ;
} else {
$ rc = system ( "cp --reflink $mastername $filename" ) ;
}
}
if ( $ rc ) {
return $ rc , "Failure creating image $filename from $mastername" ;
}
}
if ( $ size ) {
my $ rc = system ( "qemu-img create -f $imgfmt $filename " . getUnits ( $ size , "g" , 1024 ) ) ;
if ( $ rc ) {
return $ rc , "Failure creating image $filename of size $size\n" ;
}
}
}
2009-02-15 22:42:22 +00:00
2010-08-26 18:32:44 +00:00
sub rinv {
shift ;
my $ dom ;
eval {
$ dom = $ hypconn - > get_domain_by_name ( $ node ) ;
} ;
my $ currstate = getpowstate ( $ dom ) ;
my $ currxml ;
if ( $ currstate eq 'on' ) {
$ currxml = $ dom - > get_xml_description ( ) ;
} else {
$ currxml = $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ;
}
unless ( $ currxml ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM does not appear to exist" ] , $ callback , $ node ) ;
return ;
}
my $ domain = $ parser - > parse_string ( $ currxml ) ;
my $ uuid = $ domain - > findnodes ( '/domain/uuid' ) - > [ 0 ] - > to_literal ;
xCAT::SvrUtils:: sendmsg ( "UUID/GUID: $uuid" , $ callback , $ node ) ;
my $ cpus = $ domain - > findnodes ( '/domain/vcpu' ) - > [ 0 ] - > to_literal ;
xCAT::SvrUtils:: sendmsg ( "CPUs: $cpus" , $ callback , $ node ) ;
my $ memnode = $ domain - > findnodes ( '/domain/currentMemory' ) - > [ 0 ] ;
2011-05-31 20:29:59 +00:00
my $ maxmemnode = $ domain - > findnodes ( '/domain/memory' ) - > [ 0 ] ;
2010-08-26 18:32:44 +00:00
unless ( $ memnode ) {
2011-05-31 20:29:59 +00:00
$ memnode = $ maxmemnode ;
2010-08-26 18:32:44 +00:00
}
if ( $ memnode ) {
my $ mem = $ memnode - > to_literal ;
$ mem = $ mem / 1024 ;
xCAT::SvrUtils:: sendmsg ( "Memory: $mem MB" , $ callback , $ node ) ;
}
2011-05-31 20:29:59 +00:00
if ( $ maxmemnode ) {
my $ maxmem = $ maxmemnode - > to_literal ;
$ maxmem = $ maxmem / 1024 ;
xCAT::SvrUtils:: sendmsg ( "Maximum Memory: $maxmem MB" , $ callback , $ node ) ;
}
2010-08-26 18:32:44 +00:00
invstorage ( $ domain ) ;
invnics ( $ domain ) ;
}
2010-08-30 15:54:22 +00:00
sub get_storage_pool_by_volume {
2010-08-28 14:42:45 +00:00
my $ vol = shift ;
2010-08-30 15:54:22 +00:00
my $ path = $ vol - > get_path ( ) ;
return get_storage_pool_by_path ( $ path ) ;
}
sub get_storage_pool_by_path {
#attempts to get pool for a volume, returns false on failure
my $ file = shift ;
2010-08-28 14:42:45 +00:00
my $ pool ;
2010-08-30 19:41:37 +00:00
return eval {
2010-08-30 15:54:22 +00:00
my @ currpools = $ hypconn - > list_storage_pools ( ) ;
push @ currpools , $ hypconn - > list_defined_storage_pools ( ) ;
foreach $ pool ( @ currpools ) {
my $ parsedpool = $ parser - > parse_string ( $ pool - > get_xml_description ( ) ) ;
my $ currpath = $ parsedpool - > findnodes ( "/pool/target/path/text()" ) - > [ 0 ] - > data ;
if ( $ currpath eq $ file or $ file =~ /^$currpath\/[^\/]*$/ ) {
return $ pool ;
}
}
2010-08-30 19:41:37 +00:00
return undef ;
2010-08-30 15:54:22 +00:00
# $pool = $hypconn->get_storage_pool_by_uuid($pooluuid);
2010-08-28 14:42:45 +00:00
} ;
}
2010-08-26 18:32:44 +00:00
sub invstorage {
my $ domain = shift ;
my @ disks = $ domain - > findnodes ( '/domain/devices/disk' ) ;
my $ disk ;
foreach $ disk ( @ disks ) {
my $ name = $ disk - > findnodes ( './target' ) - > [ 0 ] - > getAttribute ( "dev" ) ;
my $ xref = "" ;
my $ addr = $ disk - > findnodes ( './address' ) - > [ 0 ] ;
if ( $ addr ) {
if ( $ name =~ /^vd/ ) {
$ xref = " (v" . $ addr - > getAttribute ( "bus" ) . ":" . $ addr - > getAttribute ( 'slot' ) . "." . $ addr - > getAttribute ( "function" ) . ")" ;
$ xref =~ s/0x//g ;
} else {
$ xref = " (d" . $ addr - > getAttribute ( "controller" ) . ":" . $ addr - > getAttribute ( "bus" ) . ":" . $ addr - > getAttribute ( "unit" ) . ")" ;
}
}
2011-05-31 20:29:59 +00:00
my @ candidatenodes = $ disk - > findnodes ( './source' ) ;
unless ( scalar @ candidatenodes ) {
next ;
}
my $ file = $ candidatenodes [ 0 ] - > getAttribute ( 'file' ) ;
2010-08-27 13:27:17 +00:00
#we'll attempt to map file path to pool name and volume name
#fallback to just reporting filename if not feasible
#libvirt lacks a way to lookup a storage pool by path, so we'll only do so if using the 'default' xCAT scheme with uuid in the path
2010-08-26 18:32:44 +00:00
$ file =~ m !/([^/]*)/($node\..*)\z! ;
my $ volname = $ 2 ;
2010-08-27 13:27:17 +00:00
my $ vollocation = $ file ;
eval {
2010-08-30 15:54:22 +00:00
my $ pool = get_storage_pool_by_path ( $ file ) ;
2010-08-27 13:27:17 +00:00
my $ poolname = $ pool - > get_name ( ) ;
$ vollocation = "[$poolname] $volname" ;
} ;
#at least I get to skip the whole pool mess here
my $ vol = $ hypconn - > get_storage_volume_by_path ( $ file ) ;
2010-08-26 18:32:44 +00:00
my $ size ;
if ( $ vol ) {
my % info = % { $ vol - > get_info ( ) } ;
2010-08-28 01:31:51 +00:00
if ( $ info { allocation } and $ info { capacity } ) {
$ size = $ info { allocation } ;
2010-08-26 18:32:44 +00:00
$ size = $ size / 1048576 ; #convert to MB
2010-09-27 17:56:50 +00:00
$ size = sprintf ( "%.3f" , $ size ) ;
2010-08-28 01:31:51 +00:00
$ size . = "/" . ( $ info { capacity } / 1048576 ) ;
2010-08-26 18:32:44 +00:00
}
}
$ callback - > ( {
node = > {
name = > $ node ,
data = > {
desc = > "Disk $name$xref" ,
2010-08-27 13:27:17 +00:00
contents = > "$size MB @ $vollocation" ,
2010-08-26 18:32:44 +00:00
}
}
} ) ;
}
}
sub invnics {
my $ domain = shift ;
my @ nics = $ domain - > findnodes ( '/domain/devices/interface' ) ;
my $ nic ;
foreach $ nic ( @ nics ) {
my $ mac = $ nic - > findnodes ( './mac' ) - > [ 0 ] - > getAttribute ( 'address' ) ;
my $ addr = $ nic - > findnodes ( './address' ) - > [ 0 ] ;
my $ loc ;
if ( $ addr ) {
my $ bus = $ addr - > getAttribute ( 'bus' ) ;
$ bus =~ s/^0x// ;
my $ slot = $ addr - > getAttribute ( 'slot' ) ;
$ slot =~ s/^0x// ;
my $ function = $ addr - > getAttribute ( 'function' ) ;
$ function =~ s/^0x// ;
$ loc = " at $bus:$slot.$function" ;
}
$ callback - > ( {
node = > {
name = > $ node ,
data = > {
desc = > "Network adapter$loc" ,
contents = > $ mac ,
}
}
} ) ;
}
}
2010-08-26 14:01:54 +00:00
sub rmvm {
shift ;
@ ARGV = @ _ ;
my $ force ;
my $ purge ;
GetOptions (
'f' = > \ $ force ,
'p' = > \ $ purge ,
) ;
my $ dom ;
eval {
$ dom = $ hypconn - > get_domain_by_name ( $ node ) ;
} ;
my $ currstate = getpowstate ( $ dom ) ;
my $ currxml ;
if ( $ currstate eq 'on' ) {
if ( $ force ) {
$ currxml = $ dom - > get_xml_description ( ) ;
$ dom - > destroy ( ) ;
} else {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot rmvm active guest (use -f argument to force)" ] , $ callback , $ node ) ;
return ;
}
} else {
$ currxml = $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ;
}
if ( $ purge and $ currxml ) {
my $ deadman = $ parser - > parse_string ( $ currxml ) ;
my @ purgedisks = $ deadman - > findnodes ( "/domain/devices/disk/source" ) ;
my $ disk ;
foreach $ disk ( @ purgedisks ) {
my $ file = $ disk - > getAttribute ( "file" ) ;
2010-08-27 13:27:17 +00:00
my $ vol = $ hypconn - > get_storage_volume_by_path ( $ file ) ;
if ( $ vol ) {
$ vol - > delete ( ) ;
2010-08-26 14:01:54 +00:00
}
}
}
$ updatetable - > { kvm_nodedata } - > { '!*XCATNODESTODELETE*!' } - > { $ node } = 1 ;
}
2010-07-06 20:52:24 +00:00
sub chvm {
shift ;
my @ addsizes ;
my % resize ;
my $ cpucount ;
my @ purge ;
my @ derefdisks ;
my $ memory ;
2011-05-02 13:40:46 +00:00
my $ cdrom ;
my $ eject ;
2010-07-06 20:52:24 +00:00
@ ARGV = @ _ ;
require Getopt::Long ;
2010-07-21 15:23:37 +00:00
GetOptions (
2010-07-06 20:52:24 +00:00
"a=s" = > \ @ addsizes ,
"d=s" = > \ @ derefdisks ,
"mem=s" = > \ $ memory ,
2011-05-06 14:22:18 +00:00
"optical|cdrom=s" = > \ $ cdrom ,
2011-05-02 13:40:46 +00:00
"eject" = > \ $ eject ,
2010-09-17 14:37:20 +00:00
"cpus=s" = > \ $ cpucount ,
2010-07-06 20:52:24 +00:00
"p=s" = > \ @ purge ,
"resize=s%" = > \ % resize ,
) ;
2010-09-21 19:35:36 +00:00
if ( @ derefdisks ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Detach without purge TODO for kvm" ] , $ callback , $ node ) ;
return ;
}
if ( @ addsizes and @ purge ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Currently adding and purging concurrently is not supported" ] , $ callback , $ node ) ;
return ;
}
2010-07-06 20:52:24 +00:00
my % useddisks ;
2010-09-21 19:35:36 +00:00
my $ dom ;
eval {
$ dom = $ hypconn - > get_domain_by_name ( $ node ) ;
} ;
my $ vmxml ;
if ( $ dom ) {
$ vmxml = $ dom - > get_xml_description ( ) ;
} else {
$ vmxml = $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ;
}
my $ currstate = getpowstate ( $ dom ) ;
2010-07-06 20:52:24 +00:00
if ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ) {
my $ store ;
foreach $ store ( split /\|/ , $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ) {
$ store =~ s/,.*// ;
$ store =~ s/=.*// ;
if ( $ store =~ /^nfs:\/\// ) {
my % disks = % { get_multiple_paths_by_url ( url = > $ store , node = > $ node ) } ;
foreach ( keys % disks ) {
2010-07-30 19:12:45 +00:00
$ useddisks { $ disks { $ _ } - > { device } } = 1 ;
2010-07-06 20:52:24 +00:00
}
}
}
}
if ( @ addsizes ) { #need to add disks, first identify used devnames
my @ diskstoadd ;
my $ location = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ;
$ location =~ s/.*\|// ; #use the rightmost location for making new devices
$ location =~ s/,.*// ; #no comma specified parameters are valid
$ location =~ s/=(.*)// ; #store model if specified here
my $ model = $ 1 ;
2010-08-25 15:42:36 +00:00
unless ( $ model ) {
#if not defined, model will stay undefined like above
$ model = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storagemodel }
}
2010-07-06 20:52:24 +00:00
my $ prefix = 'hd' ;
if ( $ model eq 'scsi' ) {
$ prefix = 'sd' ;
} elsif ( $ model eq 'virtio' ) {
$ prefix = 'vd' ;
}
2010-08-26 13:16:24 +00:00
if ( $ prefix eq 'hd' and $ currstate eq 'on' ) {
xCAT::SvrUtils:: sendmsg ( "VM must be powered off to add IDE drives" , $ callback , $ node ) ;
next ;
}
2010-07-06 20:52:24 +00:00
my @ suffixes ;
if ( $ prefix eq 'hd' ) {
2010-08-26 15:24:48 +00:00
@ suffixes = ( 'a' , 'b' , 'd' .. 'zzz' ) ;
2010-07-06 20:52:24 +00:00
} else {
2010-08-26 15:24:48 +00:00
@ suffixes = ( 'a' .. 'zzz' ) ;
2010-07-06 20:52:24 +00:00
}
my @ newsizes ;
foreach ( @ addsizes ) {
push @ newsizes , split /,/ , $ _ ;
}
foreach ( @ newsizes ) {
my $ dev ;
do {
$ dev = $ prefix . shift ( @ suffixes ) ;
} while ( $ useddisks { $ dev } ) ;
#ok, now I need a volume created to attach
push @ diskstoadd , get_filepath_by_url ( url = > $ location , dev = > $ dev , create = > $ _ ) ;
}
#now that the volumes are made, must build xml for each and attempt attach if and only if the VM is live
2010-08-26 13:16:24 +00:00
foreach ( @ diskstoadd ) {
my $ suffix ;
my $ format ;
if ( /^[^\.]*\.([^\.]*)\.([^\.]*)/ ) {
$ suffix = $ 1 ;
$ format = $ 2 ;
} elsif ( /^[^\.]*\.([^\.]*)/ ) {
$ suffix = $ 1 ;
$ format = 'raw' ;
}
my $ bus ;
if ( $ suffix =~ /^sd/ ) {
$ bus = 'scsi' ;
} elsif ( $ suffix =~ /hd/ ) {
$ bus = 'ide' ;
} elsif ( $ suffix =~ /vd/ ) {
$ bus = 'virtio' ;
}
my $ xml = "<disk type='file' device='disk'><driver name='qemu' type='$format'/><source file='$_'/><target dev='$suffix' bus='$bus'/></disk>" ;
if ( $ currstate eq 'on' ) { #attempt live attach
2010-08-26 15:24:48 +00:00
eval {
2010-08-26 13:16:24 +00:00
$ dom - > attach_device ( $ xml ) ;
2010-08-26 15:24:48 +00:00
} ;
if ( $@ ) {
my $ err = $@ ;
if ( $ err =~ /No more available PCI addresses/ ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Exhausted Virtio limits trying to add $_" ] , $ callback , $ node ) ;
} else {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to attach $_ because of " . $ err ] , $ callback , $ node ) ;
}
my $ file = $ _ ;
2010-08-27 13:27:17 +00:00
my $ vol = $ hypconn - > get_storage_volume_by_path ( $ file ) ;
if ( $ vol ) {
$ vol - > delete ( ) ;
2010-08-26 15:24:48 +00:00
}
}
2010-09-21 19:35:36 +00:00
$ vmxml = $ dom - > get_xml_description ( ) ;
2010-08-26 13:16:24 +00:00
} elsif ( $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ) {
my $ vmxml = $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ;
my $ disknode = $ parser - > parse_balanced_chunk ( $ xml ) ;
my $ vmdoc = $ parser - > parse_string ( $ vmxml ) ;
my $ devicesnode = $ vmdoc - > findnodes ( "/domain/devices" ) - > [ 0 ] ;
$ devicesnode - > appendChild ( $ disknode ) ;
2010-09-21 19:35:36 +00:00
$ vmxml = $ vmdoc - > toString ( ) ;
2010-07-21 15:23:37 +00:00
}
2010-09-21 19:35:36 +00:00
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ vmxml ;
2010-07-06 20:52:24 +00:00
}
2010-07-21 20:33:50 +00:00
} elsif ( @ purge ) {
2010-08-25 21:04:20 +00:00
my $ dom ;
eval {
$ dom = $ hypconn - > get_domain_by_name ( $ node ) ;
} ;
my $ vmxml ;
if ( $ dom ) {
$ vmxml = $ dom - > get_xml_description ( ) ;
} else {
$ vmxml = $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ;
}
2010-07-21 20:33:50 +00:00
my $ currstate = getpowstate ( $ dom ) ;
2010-08-25 21:04:20 +00:00
my @ disklist = get_disks_by_userspecs ( \ @ purge , $ vmxml , 'returnmoddedxml' ) ;
my $ moddedxml = shift @ disklist ;
foreach ( @ disklist ) {
2010-07-21 20:33:50 +00:00
my $ devxml = $ _ - > [ 0 ] ;
my $ file = $ _ - > [ 1 ] ;
$ file =~ m !/([^/]*)/($node\..*)\z! ;
#first, detach the device.
eval {
2010-08-12 18:30:41 +00:00
if ( $ currstate eq 'on' ) {
$ dom - > detach_device ( $ devxml ) ;
2010-09-21 19:35:36 +00:00
$ vmxml = $ dom - > get_xml_description ( ) ;
2010-08-12 18:30:41 +00:00
} else {
2010-09-21 19:35:36 +00:00
$ vmxml = $ moddedxml ;
2010-08-12 18:30:41 +00:00
}
2010-09-21 19:35:36 +00:00
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ vmxml ;
2010-07-21 20:33:50 +00:00
} ;
if ( $@ ) {
2010-08-26 13:16:24 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to remove device" ] , $ callback , $ node ) ;
2010-07-21 20:33:50 +00:00
} else {
#if that worked, remove the disk..
2010-08-27 13:27:17 +00:00
my $ vol = $ hypconn - > get_storage_volume_by_path ( $ file ) ;
if ( $ vol ) {
$ vol - > delete ( ) ;
2010-07-21 20:33:50 +00:00
}
}
}
2010-09-21 19:35:36 +00:00
}
2011-05-02 13:40:46 +00:00
my $ newcdxml ;
if ( $ cdrom ) {
my $ cdpath ;
if ( $ cdrom =~ m !://! ) {
my $ url = $ cdrom ;
$ url =~ s!([^/]+)\z!! ;
my $ imagename = $ 1 ;
my $ poolobj = get_storage_pool_by_url ( $ url ) ;
unless ( $ poolobj ) { die "Cound not get storage pool for $url" ; }
my $ poolxml = $ poolobj - > get_xml_description ( ) ; #yes, I have to XML parse for even this...
my $ parsedpool = $ parser - > parse_string ( $ poolxml ) ;
$ cdpath = $ parsedpool - > findnodes ( "/pool/target/path/text()" ) - > [ 0 ] - > data ;
$ cdpath . = "/" . $ imagename ;
} else {
if ( $ cdrom =~ m !^/dev/! ) {
die "TODO: device pass through if anyone cares" ;
} elsif ( $ cdrom =~ m !^/! ) { #full path... I guess
$ cdpath = $ cdrom ;
} else {
die "TODO: relative paths, use client cwd as hint?" ;
}
}
unless ( $ cdpath ) { die "unable to understand cd path specification" ; }
$ newcdxml = "<disk type='file' device='cdrom'><source file='$cdpath'/><target dev='hdc'/><readonly/></disk>" ;
} elsif ( $ eject ) {
$ newcdxml = "<disk type='file' device='cdrom'><target dev='hdc'/><readonly/></disk>" ;
}
if ( $ newcdxml ) {
if ( $ currstate eq 'on' ) {
$ dom - > attach_device ( $ newcdxml ) ;
$ vmxml = $ dom - > get_xml_description ( ) ;
} else {
unless ( $ vmxml ) {
$ vmxml = $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ;
}
my $ domparsed = $ parser - > parse_string ( $ vmxml ) ;
2011-05-02 15:06:16 +00:00
my $ candidatenodes = $ domparsed - > findnodes ( "//disk[\@device='cdrom']" ) ;
2011-05-02 13:40:46 +00:00
if ( scalar ( @$ candidatenodes ) != 1 ) {
die "shouldn't be possible, should only have one cdrom" ;
}
my $ newcd = $ parser - > parse_balanced_chunk ( $ newcdxml ) ;
$ candidatenodes - > [ 0 ] - > replaceNode ( $ newcd ) ;
2011-05-02 15:02:34 +00:00
my $ moddedxml = $ domparsed - > toString ;
if ( $ moddedxml ) {
$ vmxml = $ moddedxml ;
}
}
if ( $ vmxml ) {
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ vmxml ;
2011-05-02 13:40:46 +00:00
}
}
2011-05-31 20:29:59 +00:00
if ( $ cpucount or $ memory ) {
2010-09-21 19:35:36 +00:00
if ( $ currstate eq 'on' ) {
2011-05-31 20:29:59 +00:00
if ( $ cpucount ) { xCAT::SvrUtils:: sendmsg ( [ 1 , "Hot add of cpus not supported (VM must be powered down to successfuly change)" ] , $ callback , $ node ) ; }
2010-09-21 19:35:36 +00:00
if ( $ cpucount ) {
#$dom->set_vcpus($cpucount); this didn't work out as well as I hoped..
#xCAT::SvrUtils::sendmsg([1,"Hot add of cpus not supported"],$callback,$node);
}
if ( $ memory ) {
2011-05-31 20:29:59 +00:00
eval {
$ dom - > set_memory ( getUnits ( $ memory , "M" , 1024 ) ) ;
} ;
if ( $@ ) {
if ( $@ =~ /cannot set memory higher/ ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to increase memory beyond current capacity (requires VM to be powered down to change)" ] , $ callback , $ node ) ;
}
}
$ vmxml = $ dom - > get_xml_description ( ) ;
if ( $ vmxml ) {
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ vmxml ;
}
2010-09-21 19:35:36 +00:00
}
} else { #offline xml edits
my $ parsed = $ parser - > parse_string ( $ vmxml ) ; #TODO: should only do this once, oh well
if ( $ cpucount ) {
$ parsed - > findnodes ( "/domain/vcpu/text()" ) - > [ 0 ] - > setData ( $ cpucount ) ;
}
if ( $ memory ) {
$ parsed - > findnodes ( "/domain/memory/text()" ) - > [ 0 ] - > setData ( getUnits ( $ memory , "M" , 1024 ) ) ;
my @ currmem = $ parsed - > findnodes ( "/domain/currentMemory/text()" ) ;
foreach ( @ currmem ) {
$ _ - > setData ( getUnits ( $ memory , "M" , 1024 ) ) ;
}
}
$ vmxml = $ parsed - > toString ;
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ vmxml ;
}
2010-07-21 20:33:50 +00:00
}
}
sub get_disks_by_userspecs {
my $ specs = shift ;
my $ xml = shift ;
2010-08-25 21:04:20 +00:00
my $ returnmoddedxml = shift ;
2010-08-25 19:04:52 +00:00
my $ struct = XMLin ( $ xml , forcearray = > 1 ) ;
2010-08-25 21:04:20 +00:00
my $ dominf = $ parser - > parse_string ( $ xml ) ;
my @ disknodes = $ dominf - > findnodes ( '/domain/devices/disk' ) ;
2010-07-21 20:33:50 +00:00
my @ returnxmls ;
foreach my $ spec ( @$ specs ) {
2010-08-25 21:04:20 +00:00
my $ disknode ;
foreach $ disknode ( @ disknodes ) {
2010-07-21 20:33:50 +00:00
if ( $ spec =~ /^.d./ ) { #vda, hdb, sdc, etc, match be equality to target->{dev}
2010-08-25 21:04:20 +00:00
if ( $ disknode - > findnodes ( './target' ) - > [ 0 ] - > getAttribute ( "dev" ) eq $ spec ) {
push @ returnxmls , [ $ disknode - > toString ( ) , $ disknode - > findnodes ( './source' ) - > [ 0 ] - > getAttribute ( 'file' ) ] ;
if ( $ returnmoddedxml ) {
$ disknode - > parentNode - > removeChild ( $ disknode ) ;
}
2010-07-21 20:33:50 +00:00
}
} elsif ( $ spec =~ /^d(.*)/ ) { #delete by scsi unit number..
2010-08-26 18:32:44 +00:00
my $ loc = $ 1 ;
my $ addr = $ disknode - > findnodes ( './address' ) - > [ 0 ] ;
if ( $ loc =~ /:/ ) { #controller, bus, unit
my $ controller ;
my $ bus ;
my $ unit ;
( $ controller , $ bus , $ unit ) = split /:/ , $ loc ;
if ( hex ( $ addr - > getAttribute ( 'controller' ) ) == hex ( $ controller ) and ( $ addr - > getAttribute ( 'bus' ) ) == hex ( $ bus ) and ( $ addr - > getAttribute ( 'unit' ) ) == hex ( $ unit ) ) {
push @ returnxmls , [ $ disknode - > toString ( ) , $ disknode - > findnodes ( './source' ) - > [ 0 ] - > getAttribute ( 'file' ) ] ;
if ( $ returnmoddedxml ) {
$ disknode - > parentNode - > removeChild ( $ disknode ) ;
}
}
} else { #match just on unit number, not helpful on ide as much generally, but whatever
if ( hex ( $ addr - > getAttribute ( 'unit' ) ) == hex ( $ loc ) ) {
push @ returnxmls , [ $ disknode - > toString ( ) , $ disknode - > findnodes ( './source' ) - > [ 0 ] - > getAttribute ( 'file' ) ] ;
if ( $ returnmoddedxml ) {
$ disknode - > parentNode - > removeChild ( $ disknode ) ;
}
}
}
} elsif ( $ spec =~ /^v(.*)/ ) { #virtio pci selector
my $ slot = $ 1 ;
$ slot =~ s/^(.*):// ; #remove pci bus number (not used for now)
my $ bus = $ 1 ;
$ slot =~ s/\.0$// ;
my $ addr = $ disknode - > findnodes ( './address' ) - > [ 0 ] ;
if ( hex ( $ addr - > getAttribute ( 'slot' ) ) == hex ( $ slot ) and hex ( $ addr - > getAttribute ( 'bus' ) == hex ( $ bus ) ) ) {
2010-08-25 21:04:20 +00:00
push @ returnxmls , [ $ disknode - > toString ( ) , $ disknode - > findnodes ( './source' ) - > [ 0 ] - > getAttribute ( 'file' ) ] ;
if ( $ returnmoddedxml ) {
$ disknode - > parentNode - > removeChild ( $ disknode ) ;
}
2010-07-21 20:33:50 +00:00
}
} #other formats TBD
}
2010-07-06 20:52:24 +00:00
}
2010-08-25 21:04:20 +00:00
if ( $ returnmoddedxml ) { #there are list entries to delete
unshift @ returnxmls , $ dominf - > toString ( ) ;
}
2010-07-21 20:33:50 +00:00
return @ returnxmls ;
2010-07-06 20:52:24 +00:00
}
2010-08-30 19:41:37 +00:00
sub promote_vm_to_master {
my % args = @ _ ;
my $ target = $ args { target } ;
my $ force = $ args { force } ;
my $ detach = $ args { detach } ;
2010-09-27 17:56:50 +00:00
if ( $ target !~ m !://! ) { #if not a url, use same place as source
my $ sourcedir = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ;
$ sourcedir =~ s/=.*// ;
$ sourcedir =~ s/,.*// ;
$ sourcedir =~ s!/\z!! ;
$ target = $ sourcedir . "/" . $ target ;
}
2010-08-30 19:41:37 +00:00
unless ( $ target =~ /^nfs:\/\// ) {
2010-09-27 17:56:50 +00:00
xCAT::SvrUtils:: sendmsg ( [ 1 , "KVM plugin only has nfs://<server>/<path>/<mastername> support for cloning at this moment" ] , $ callback , $ node ) ;
2010-08-30 19:41:37 +00:00
return ;
}
my $ dom ;
eval {
$ dom = $ hypconn - > get_domain_by_name ( $ node ) ;
} ;
if ( $ dom and not $ force ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM shut be shut down before attempting to clone (-f to copy unclean disks)" ] , $ callback , $ node ) ;
return ;
}
my $ xml ;
if ( $ dom ) {
$ xml = $ dom - > get_xml_description ( ) ;
$ detach = 1 ; #can't rebase if vm is on
} else {
$ xml = $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ;
}
unless ( $ xml ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "VM must be created before it can be cloned" ] , $ callback , $ node ) ;
return ;
}
my $ parsedxml = $ parser - > parse_string ( $ xml ) ;
2010-09-27 17:56:50 +00:00
my ( $ tmpnod ) = $ parsedxml - > findnodes ( '/domain/uuid/text()' ) ;
2010-09-22 19:16:08 +00:00
if ( $ tmpnod ) {
$ tmpnod - > setData ( "none" ) ; #get rid of the VM specific uuid
}
2010-08-30 19:41:37 +00:00
2010-09-27 17:56:50 +00:00
$ target =~ m !^(.*)/([^/]*)\z! ;
2010-08-30 19:41:37 +00:00
my $ directory = $ 1 ;
my $ mastername = $ 2 ;
2010-09-22 19:16:08 +00:00
( $ tmpnod ) = $ parsedxml - > findnodes ( '/domain/name/text()' ) ;
if ( $ tmpnod ) {
$ tmpnod - > setData ( $ mastername ) ; #name the xml whatever the master name is to be
}
2010-08-30 19:41:37 +00:00
foreach ( $ parsedxml - > findnodes ( "/domain/devices/interface/mac" ) ) { #clear all mac addresses
if ( $ _ - > hasAttribute ( "address" ) ) { $ _ - > setAttribute ( "address" = > '' ) ; }
}
my $ poolobj = get_storage_pool_by_url ( $ directory ) ;
unless ( $ poolobj ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to reach $directory from hypervisor" ] , $ callback , $ node ) ;
return ;
}
#arguments validated, on with our lives
#firrder of business, calculate all the image names to be created and ensure none will conflict.
my @ disks = $ parsedxml - > findnodes ( '/domain/devices/disk/source' ) ;
my % volclonemap ;
foreach ( @ disks ) {
my $ filename = $ _ - > getAttribute ( 'file' ) ;
my $ volname = $ filename ;
$ volname =~ s!.*/!! ; #perl is greedy by default
$ volname =~ s/^$node/$mastername/ ;
my $ novol ;
2011-04-15 14:37:57 +00:00
eval { #use two evals, there is a chance the pool has a task blocking refresh like long-running clone.... libvirt should do better IMO, oh well
2010-08-30 19:41:37 +00:00
$ poolobj - > refresh ( ) ;
2011-04-15 14:37:57 +00:00
} ;
eval {
2010-08-30 19:41:37 +00:00
$ novol = $ poolobj - > get_volume_by_name ( $ volname ) ;
} ;
if ( $ novol ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "$volname already exists in target storage pool" ] , $ callback , $ node ) ;
return ;
}
my $ sourcevol ;
eval {
$ sourcevol = $ hypconn - > get_storage_volume_by_path ( $ filename ) ;
} ;
unless ( $ sourcevol ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Unable to access $filename to clone" ] , $ callback , $ node ) ;
return ;
}
$ volclonemap { $ filename } = [ $ sourcevol , $ volname ] ;
2010-09-27 17:56:50 +00:00
$ filename = get_path_for_pool ( $ poolobj ) ;
$ filename =~ s!/\z!! ;
$ filename . = '/' . $ volname ;
2010-08-30 19:41:37 +00:00
$ _ - > setAttribute ( file = > $ filename ) ;
}
foreach ( keys % volclonemap ) {
my $ sourcevol = $ volclonemap { $ _ } - > [ 0 ] ;
my $ targname = $ volclonemap { $ _ } - > [ 1 ] ;
my $ format ;
$ targname =~ /([^\.]*)$/ ;
$ format = $ 1 ;
my $ newvol ;
my % sourceinfo = % { $ sourcevol - > get_info ( ) } ;
my $ targxml = "<volume><name>$targname</name><target><format type='$format'/></target><capacity>" . $ sourceinfo { capacity } . "</capacity></volume>" ;
xCAT::SvrUtils:: sendmsg ( "Cloning " . $ sourcevol - > get_name ( ) . " (currently is " . ( $ sourceinfo { allocation } /1048576)." MB and has a capacity of ".($sourceinfo{capacity}/ 1048576 ) . "MB)" , $ callback , $ node ) ;
eval {
$ newvol = $ poolobj - > clone_volume ( $ targxml , $ sourcevol ) ;
} ;
if ( $ newvol ) {
% sourceinfo = % { $ newvol - > get_info ( ) } ;
xCAT::SvrUtils:: sendmsg ( "Cloning of " . $ sourcevol - > get_name ( ) . " complete (clone uses " . ( $ sourceinfo { allocation } /1048576)." for a disk size of ".($sourceinfo{capacity}/ 1048576 ) . "MB)" , $ callback , $ node ) ;
unless ( $ detach ) {
my $ rebasepath = $ sourcevol - > get_path ( ) ;
my $ rebasename = $ sourcevol - > get_name ( ) ;
my $ rebasepool = get_storage_pool_by_volume ( $ sourcevol ) ;
unless ( $ rebasepool ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Skipping rebase of $rebasename, unable to find correct storage pool" ] , $ callback , $ node ) ;
next ;
}
xCAT::SvrUtils:: sendmsg ( "Rebasing $rebasename from master" , $ callback , $ node ) ;
$ sourcevol - > delete ( ) ;
my $ newbasexml = "<volume><name>$rebasename</name><target><format type='$format'/></target><capacity>" . $ sourceinfo { capacity } . "</capacity><backingStore><path>" . $ newvol - > get_path ( ) . "</path><format type='$format'/></backingStore></volume>" ;
my $ newbasevol ;
eval {
$ newbasevol = $ rebasepool - > create_volume ( $ newbasexml ) ;
} ;
if ( $ newbasevol ) {
xCAT::SvrUtils:: sendmsg ( "Rebased $rebasename from master" , $ callback , $ node ) ;
} else {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Critical failure, rebasing process failed halfway through, source VM trashed" ] , $ callback , $ node ) ;
}
}
} else {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cloning of " . $ sourcevol - > get_name ( ) . " failed due to " . $@ ] , $ callback , $ node ) ;
return ;
}
}
my $ mastertabentry = { } ;
foreach ( qw/os arch profile/ ) {
if ( defined ( $ confdata - > { nodetype } - > { $ node } - > [ 0 ] - > { $ _ } ) ) {
$ mastertabentry - > { $ _ } = $ confdata - > { nodetype } - > { $ node } - > [ 0 ] - > { $ _ } ;
}
}
foreach ( qw/storagemodel nics/ ) {
if ( defined ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { $ _ } ) ) {
$ mastertabentry - > { $ _ } = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { $ _ } ;
}
}
2010-08-30 21:01:35 +00:00
$ mastertabentry - > { storage } = $ directory ;
2010-08-30 19:41:37 +00:00
$ mastertabentry - > { vintage } = localtime ;
$ mastertabentry - > { originator } = $ requester ;
2010-08-31 20:02:12 +00:00
unless ( $ detach ) {
2010-09-01 12:58:47 +00:00
$ updatetable - > { vm } - > { $ node } - > { master } = $ mastername ;
2010-08-31 20:02:12 +00:00
}
2010-08-30 19:41:37 +00:00
$ updatetable - > { vmmaster } - > { $ mastername } = $ mastertabentry ;
$ updatetable - > { kvm_masterdata } - > { $ mastername } - > { xml } = $ parsedxml - > toString ( ) ;
}
2010-08-27 23:46:06 +00:00
sub clonevm {
shift ; #throw away node
@ ARGV = @ _ ;
my $ target ;
my $ base ;
my $ detach ;
my $ force ;
GetOptions (
'f' = > \ $ force ,
'b=s' = > \ $ base ,
't=s' = > \ $ target ,
'd' = > \ $ detach ,
) ;
if ( $ base and $ target ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "Cannot specify both base (-b) and target (-t)" ] , $ callback , $ node ) ;
return ;
}
if ( $ target ) { #we need to take a single vm and create a master out of it
2010-08-30 19:41:37 +00:00
return promote_vm_to_master ( target = > $ target , force = > $ force , detach = > $ detach ) ;
} elsif ( $ base ) {
2010-08-31 16:11:36 +00:00
return clone_vm_from_master ( base = > $ base , detach = > $ detach ) ;
2010-08-30 19:41:37 +00:00
}
}
2010-08-27 23:46:06 +00:00
2010-08-30 19:41:37 +00:00
sub clone_vm_from_master {
my % args = @ _ ;
my $ base = $ args { base } ;
2010-08-31 16:11:36 +00:00
my $ detach = $ args { detach } ;
2010-08-30 19:41:37 +00:00
my $ vmmastertab = xCAT::Table - > new ( 'vmmaster' , - create = > 0 ) ;
my $ kvmmastertab = xCAT::Table - > new ( 'kvm_masterdata' , - create = > 0 ) ;
unless ( $ vmmastertab and $ kvmmastertab ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "No KVM master images in tables" ] , $ callback , $ node ) ;
return ;
}
my $ mastername = $ base ;
$ mastername =~ s!.*/!! ; #shouldn't be needed, as storage is in there, but just in case
my $ masteref = $ vmmastertab - > getAttribs ( { name = > $ mastername } , [ qw/os arch profile storage storagemodel nics/ ] ) ;
my $ kvmmasteref = $ kvmmastertab - > getAttribs ( { name = > $ mastername } , [ 'xml' ] ) ;
unless ( $ masteref and $ kvmmasteref ) {
xCAT::SvrUtils:: sendmsg ( [ 1 , "KVM master $mastername not found in tables" ] , $ callback , $ node ) ;
return ;
2010-08-27 23:46:06 +00:00
}
2010-08-30 19:41:37 +00:00
my $ newnodexml = $ parser - > parse_string ( $ kvmmasteref - > { xml } ) ;
2010-08-30 21:01:35 +00:00
$ newnodexml - > findnodes ( "/domain/name/text()" ) - > [ 0 ] - > setData ( $ node ) ; #set name correctly
my $ uuid = getNodeUUID ( $ node ) ;
$ newnodexml - > findnodes ( "/domain/uuid/text()" ) - > [ 0 ] - > setData ( $ uuid ) ; #put in correct uuid
#set up mac addresses and such right...
fixup_clone_network ( mastername = > $ mastername , mastertableentry = > $ masteref , kvmmastertableentry = > $ kvmmasteref , xmlinprogress = > $ newnodexml ) ;
#ok, now the fun part, storage...
my $ disk ;
2010-09-27 17:56:50 +00:00
if ( $ masteref - > { storage } ) {
foreach ( split /,/ , $ masteref - > { storage } ) {
s/=.*// ;
get_storage_pool_by_url ( $ _ ) ;
}
}
2010-08-30 21:01:35 +00:00
my $ url ;
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ) {
unless ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } =~ /^nfs:/ ) {
die "not implemented" ;
}
$ url = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ;
} else {
$ url = $ masteref - > { storage } ;
$ updatetable - > { vm } - > { $ node } - > { storage } = $ url ;
}
if ( $ masteref - > { storagemodel } and not $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storagemodel } ) {
$ updatetable - > { vm } - > { $ node } - > { storagemodel } = $ masteref - > { storagemodel } ;
}
$ url =~ s/,.*// ;
my $ destinationpool = get_storage_pool_by_url ( $ url ) ;
foreach $ disk ( $ newnodexml - > findnodes ( "/domain/devices/disk" ) ) {
2011-05-06 14:22:18 +00:00
my ( $ source ) = ( $ disk - > findnodes ( "./source" ) ) ;
unless ( $ source ) { next ; } #most likely an empty cdrom
my $ srcfilename = $ source - > getAttribute ( "file" ) ;
2010-08-30 21:01:35 +00:00
my $ filename = $ srcfilename ;
$ filename =~ s/^.*$mastername/$node/ ;
$ filename =~ m !\.([^\.]*)\z! ;
my $ format = $ 1 ;
2010-08-31 16:11:36 +00:00
my $ newvol ;
if ( $ detach ) {
2010-09-24 14:24:05 +00:00
my $ sourcevol = $ hypconn - > get_storage_volume_by_path ( $ srcfilename ) ;
2010-08-31 16:11:36 +00:00
my % sourceinfo = % { $ sourcevol - > get_info ( ) } ;
my $ targxml = "<volume><name>$filename</name><target><format type='$format'/></target><capacity>" . $ sourceinfo { capacity } . "</capacity></volume>" ;
xCAT::SvrUtils:: sendmsg ( "Cloning " . $ sourcevol - > get_name ( ) . " (currently is " . ( $ sourceinfo { allocation } /1048576)." MB and has a capacity of ".($sourceinfo{capacity}/ 1048576 ) . "MB)" , $ callback , $ node ) ;
eval {
$ newvol = $ destinationpool - > clone_volume ( $ targxml , $ sourcevol ) ;
} ;
2011-05-31 20:52:03 +00:00
if ( $@ ) {
if ( $@ =~ /already exists/ ) {
return 1 , "Storage creation request conflicts with existing file(s)" ;
} else {
return 1 , "Unknown issue $@" ;
}
}
2010-08-31 16:11:36 +00:00
} else {
2010-09-24 14:24:05 +00:00
my $ sourcevol = $ hypconn - > get_storage_volume_by_path ( $ srcfilename ) ;
my % sourceinfo = % { $ sourcevol - > get_info ( ) } ;
my $ newbasexml = "<volume><name>$filename</name><target><format type='$format'/></target><capacity>" . $ sourceinfo { capacity } . "</capacity><backingStore><path>$srcfilename</path><format type='$format'/></backingStore></volume>" ;
2011-05-31 20:52:03 +00:00
eval {
2010-08-31 16:11:36 +00:00
$ newvol = $ destinationpool - > create_volume ( $ newbasexml ) ;
2011-05-31 20:52:03 +00:00
} ;
if ( $@ ) {
if ( $@ =~ /already in use/ ) {
return 1 , "Storage creation request conflicts with existing file(s)" ;
} else {
return 1 , "Unknown issue $@" ;
}
}
2010-08-31 20:02:12 +00:00
$ updatetable - > { vm } - > { $ node } - > { master } = $ mastername ;
2010-08-31 16:11:36 +00:00
}
2010-08-30 21:01:35 +00:00
my $ newfilename = $ newvol - > get_path ( ) ;
$ disk - > findnodes ( "./source" ) - > [ 0 ] - > setAttribute ( "file" = > $ newfilename ) ;
}
my $ textxml = $ newnodexml - > toString ( ) ;
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ textxml ;
2010-08-30 19:41:37 +00:00
2010-08-27 23:46:06 +00:00
}
2010-08-30 19:41:37 +00:00
2010-08-30 21:01:35 +00:00
sub fixup_clone_network {
my % args = @ _ ;
my $ newnodexml = $ args { xmlinprogress } ;
my $ mastername = $ args { mastername } ;
my $ masteref = $ args { mastertableentry } ;
my $ kvmmasteref = $ args { kvmmastertableentry } ;
unless ( ref ( $ confdata - > { vm } - > { $ node } ) ) {
$ confdata - > { vm } - > { $ node } = [ { nics = > $ masteref - > { nics } } ] ;
$ updatetable - > { vm } - > { $ node } - > { nics } = $ masteref - > { nics } ;
}
unless ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nics } ) { #if no nic configuration yet, take the one stored in the master
$ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nics } = $ masteref - > { nics } ;
$ updatetable - > { vm } - > { $ node } - > { nics } = $ masteref - > { nics } ;
}
my @ nics ;
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nics } ) { #could still be empty if it came from master that way
@ nics = split /,/ , $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nics } ;
} else {
@ nics = ( 'virbr0' ) ;
}
my @ nicsinmaster = $ newnodexml - > findnodes ( "/domain/devices/interface" ) ;
if ( scalar @ nicsinmaster > scalar @ nics ) { #we don't have enough places to attach nics to..
xCAT::SvrUtils:: sendmsg ( [ 1 , "KVM master $mastername has " . scalar @ nicsinmaster . " but this vm only has " . scalar @ nics . " defined" ] , $ callback , $ node ) ;
return ;
}
my $ nicstruct ;
my @ macs = xCAT::VMCommon:: getMacAddresses ( $ confdata , $ node , scalar @ nics ) ;
foreach $ nicstruct ( @ nicsinmaster ) {
my $ bridge = shift @ nics ;
$ bridge =~ s/.*:// ;
$ bridge =~ s/=.*// ;
$ nicstruct - > findnodes ( "./mac" ) - > [ 0 ] - > setAttribute ( "address" = > shift @ macs ) ;
$ nicstruct - > findnodes ( "./source" ) - > [ 0 ] - > setAttribute ( "bridge" = > $ bridge ) ;
}
my $ nic ;
my $ deviceroot = $ newnodexml - > findnodes ( "/domain/devices" ) - > [ 0 ] ;
foreach $ nic ( @ nics ) { #need more xml to throw at it..
my $ type = 'e1000' ; #better default fake nic than rtl8139, relevant to most
$ nic =~ s/.*:// ; #the detail of how the bridge was built is of no
#interest to this segment of code
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nicmodel } ) {
$ type = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { nicmodel } ;
}
if ( $ nic =~ /=/ ) {
( $ nic , $ type ) = split /=/ , $ nic , 2 ;
}
my $ xmlsnippet = "<interface type='bridge'><mac address='" . ( shift @ macs ) . "'/><source bridge='" . $ nic . "'/><model type='$type'/></interface>" ;
my $ chunk = $ parser - > parse_balanced_chunk ( $ xmlsnippet ) ;
$ deviceroot - > appendChild ( $ chunk ) ;
2010-08-30 19:41:37 +00:00
}
}
2009-02-21 12:58:41 +00:00
sub mkvm {
2009-03-17 00:49:55 +00:00
shift ; #Throuw away first argument
@ ARGV = @ _ ;
my $ disksize ;
my $ mastername ;
my $ force = 0 ;
require Getopt::Long ;
2010-09-17 14:37:20 +00:00
my $ memory ;
my $ cpucount ;
2009-03-17 00:49:55 +00:00
GetOptions (
'master|m=s' = > \ $ mastername ,
'size|s=s' = > \ $ disksize ,
2010-09-17 14:37:20 +00:00
"mem=s" = > \ $ memory ,
"cpus=s" = > \ $ cpucount ,
2009-03-17 00:49:55 +00:00
'force|f' = > \ $ force
) ;
2009-11-28 00:50:14 +00:00
if ( defined $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ) {
my $ diskname = $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { storage } ;
2009-03-17 00:49:55 +00:00
if ( $ diskname =~ /^phy:/ ) { #in this case, mkvm should have no argumens
if ( $ mastername or $ disksize ) {
return 1 , "mkvm management of block device storage not implemented" ;
}
2009-10-20 00:44:16 +00:00
}
2011-02-18 20:01:39 +00:00
#print "force=$force\n";
2009-11-22 19:41:52 +00:00
if ( $ mastername or $ disksize ) {
2011-05-31 20:37:54 +00:00
my @ return ;
eval {
@ return = createstorage ( $ diskname , $ mastername , $ disksize , $ confdata - > { vm } - > { $ node } - > [ 0 ] , $ force ) ;
} ;
if ( $@ ) {
if ( $@ =~ /ath already exists/ ) {
return 1 , "Storage creation request conflicts with existing file(s)" ;
} else {
return 1 , "Unknown issue $@" ;
}
}
2010-08-26 14:45:12 +00:00
unless ( $ confdata - > { kvmnodedata } - > { $ node } and $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] and $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ) {
my $ xml ;
2010-09-17 14:37:20 +00:00
$ xml = build_xmldesc ( $ node , cpus = > $ cpucount , memory = > $ memory ) ;
2010-08-26 14:45:12 +00:00
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ xml ;
}
return @ return ;
2009-03-17 00:49:55 +00:00
}
2010-09-17 14:37:20 +00:00
unless ( $ confdata - > { kvmnodedata } - > { $ node } and $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] and $ confdata - > { kvmnodedata } - > { $ node } - > [ 0 ] - > { xml } ) {
my $ xml ;
$ xml = build_xmldesc ( $ node , cpus = > $ cpucount , memory = > $ memory ) ;
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ xml ;
}
2009-03-17 00:49:55 +00:00
} else {
if ( $ mastername or $ disksize ) {
return 1 , "Requested initialization of storage, but vm.storage has no value for node" ;
}
}
2009-02-21 12:58:41 +00:00
}
2009-02-15 22:42:22 +00:00
sub power {
2009-03-14 22:17:24 +00:00
@ ARGV = @ _ ;
require Getopt::Long ;
my $ cdloc ;
GetOptions ( 'cdrom|iso|c|i=s' = > \ $ cdloc ) ;
my $ subcommand = shift @ ARGV ;
2009-02-15 22:42:22 +00:00
my $ retstring ;
my $ dom ;
eval {
$ dom = $ hypconn - > get_domain_by_name ( $ node ) ;
} ;
if ( $ subcommand eq "boot" ) {
my $ currstate = getpowstate ( $ dom ) ;
$ retstring = $ currstate . " " ;
if ( $ currstate eq "off" ) {
$ subcommand = "on" ;
} elsif ( $ currstate eq "on" ) {
$ subcommand = "reset" ;
}
}
my $ errstr ;
if ( $ subcommand eq 'on' ) {
unless ( $ dom ) {
2009-10-20 01:02:01 +00:00
if ( $ use_xhrm ) {
2009-11-22 19:41:52 +00:00
if ( xhrm_satisfy ( $ node , $ hyp ) ) {
return ( 1 , "Failure satisfying networking and storage requirements on $hyp for $node" ) ;
}
2009-10-20 01:02:01 +00:00
}
2010-08-27 13:58:40 +00:00
#TODO: here, storage validation is not necessarily performed, consequently, must explicitly do storage validation
#this worked before I started doing the offline xml store because every rpower on tried to rebuild
2009-03-14 22:17:24 +00:00
( $ dom , $ errstr ) = makedom ( $ node , $ cdloc ) ;
2009-02-15 22:42:22 +00:00
if ( $ errstr ) { return ( 1 , $ errstr ) ; }
} else {
2009-04-13 13:14:46 +00:00
$ retstring . = "$status_noop" ;
2009-02-15 22:42:22 +00:00
}
} elsif ( $ subcommand eq 'off' ) {
if ( $ dom ) {
2010-08-13 12:34:32 +00:00
my $ newxml = $ dom - > get_xml_description ( ) ;
2010-08-26 14:45:12 +00:00
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ newxml ;
2009-02-15 22:42:22 +00:00
$ dom - > destroy ( ) ;
2009-04-13 13:16:58 +00:00
undef $ dom ;
2009-04-13 13:14:46 +00:00
} else { $ retstring . = "$status_noop" ; }
2009-02-15 22:42:22 +00:00
} elsif ( $ subcommand eq 'softoff' ) {
if ( $ dom ) {
2010-08-13 12:34:32 +00:00
my $ newxml = $ dom - > get_xml_description ( ) ;
2010-08-26 14:45:12 +00:00
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ newxml ;
2009-02-15 22:42:22 +00:00
$ dom - > shutdown ( ) ;
2009-04-13 13:14:46 +00:00
} else { $ retstring . = "$status_noop" ; }
2009-02-15 22:42:22 +00:00
} elsif ( $ subcommand eq 'reset' ) {
if ( $ dom ) {
2010-08-27 15:49:11 +00:00
my $ oldxml = $ dom - > get_xml_description ( ) ;
2011-05-31 20:29:59 +00:00
my $ newxml = reconfigvm ( $ node , $ oldxml ) ;
2010-08-27 15:49:11 +00:00
#This *was* to be clever, but libvirt doesn't even frontend the capability, great...
unless ( $ newxml ) { $ newxml = $ oldxml ; } #TODO: remove this when the 'else' line can be sanely filled out
if ( $ newxml ) { #need to destroy and repower..
$ updatetable - > { kvm_nodedata } - > { $ node } - > { xml } = $ newxml ;
$ dom - > destroy ( ) ;
undef $ dom ;
if ( $ use_xhrm ) {
xhrm_satisfy ( $ node , $ hyp ) ;
}
( $ dom , $ errstr ) = makedom ( $ node , $ cdloc , $ newxml ) ;
if ( $ errstr ) { return ( 1 , $ errstr ) ; }
} else { #no changes, just restart the domain TODO when possible, stupid lack of feature...
2009-10-20 01:02:01 +00:00
}
2009-02-15 22:42:22 +00:00
$ retstring . = "reset" ;
2009-04-13 13:14:46 +00:00
} else { $ retstring . = "$status_noop" ; }
2009-02-15 22:42:22 +00:00
} else {
unless ( $ subcommand =~ /^stat/ ) {
return ( 1 , "Unsupported power directive '$subcommand'" ) ;
}
}
unless ( $ retstring =~ /reset/ ) {
2009-03-14 22:17:24 +00:00
$ retstring = $ retstring . getpowstate ( $ dom ) ;
2009-02-15 22:42:22 +00:00
}
return ( 0 , $ retstring ) ;
}
2010-08-13 13:23:19 +00:00
sub lsvm {
my $ node = shift ;
my @ doms = $ hypconn - > list_domains ( ) ;
my @ vms ;
foreach ( @ doms ) {
push @ vms , $ _ - > get_name ( ) ;
}
return ( 0 , @ vms ) ;
}
2009-02-15 22:42:22 +00:00
sub guestcmd {
$ hyp = shift ;
$ node = shift ;
my $ command = shift ;
my @ args = @ _ ;
my $ error ;
if ( $ command eq "rpower" ) {
return power ( @ args ) ;
2009-02-21 12:58:41 +00:00
} elsif ( $ command eq "mkvm" ) {
2009-03-17 00:49:55 +00:00
return mkvm ( $ node , @ args ) ;
2010-08-28 01:31:51 +00:00
} elsif ( $ command eq "clonevm" ) {
return clonevm ( $ node , @ args ) ;
2010-07-21 15:23:37 +00:00
} elsif ( $ command eq "chvm" ) {
return chvm ( $ node , @ args ) ;
2010-08-26 14:01:54 +00:00
} elsif ( $ command eq "rmvm" ) {
return rmvm ( $ node , @ args ) ;
2010-08-26 18:32:44 +00:00
} elsif ( $ command eq "rinv" ) {
return rinv ( $ node , @ args ) ;
2009-02-15 22:42:22 +00:00
} elsif ( $ command eq "rmigrate" ) {
return migrate ( $ node , @ args ) ;
} elsif ( $ command eq "getrvidparms" ) {
return getrvidparms ( $ node , @ args ) ;
2009-03-13 23:58:29 +00:00
} elsif ( $ command eq "getcons" ) {
return getcons ( $ node , @ args ) ;
2010-08-13 13:23:19 +00:00
} elsif ( $ command eq "lsvm" ) {
return lsvm ( $ node , @ args ) ;
2009-02-15 22:42:22 +00:00
}
= cut
} elsif ( $ command eq "rvitals" ) {
return vitals ( @ args ) ;
} elsif ( $ command =~ /r[ms]preset/ ) {
return resetmp ( @ args ) ;
} elsif ( $ command eq "rspconfig" ) {
return mpaconfig ( $ mpa , $ user , $ pass , $ node , $ slot , @ args ) ;
} elsif ( $ command eq "rbootseq" ) {
return bootseq ( @ args ) ;
} elsif ( $ command eq "switchblade" ) {
return switchblade ( @ args ) ;
} elsif ( $ command eq "getmacs" ) {
return getmacs ( @ args ) ;
} elsif ( $ command eq "rinv" ) {
return inv ( @ args ) ;
} elsif ( $ command eq "reventlog" ) {
return eventlog ( @ args ) ;
} elsif ( $ command eq "rscan" ) {
return rscan ( \ @ args ) ;
}
= cut
return ( 1 , "$command not a supported command by kvm method" ) ;
}
sub preprocess_request {
my $ request = shift ;
2009-07-15 15:10:54 +00:00
if ( $ request - > { _xcatpreprocessed } - > [ 0 ] == 1 ) { return [ $ request ] ; }
2009-02-15 22:42:22 +00:00
my $ callback = shift ;
my @ requests ;
my $ noderange = $ request - > { node } ; #Should be arrayref
my $ command = $ request - > { command } - > [ 0 ] ;
my $ extrargs = $ request - > { arg } ;
my @ exargs = ( $ request - > { arg } ) ;
if ( ref ( $ extrargs ) ) {
@ exargs = @$ extrargs ;
}
my $ usage_string = xCAT::Usage - > parseCommand ( $ command , @ exargs ) ;
if ( $ usage_string ) {
$ callback - > ( { data = > $ usage_string } ) ;
$ request = { } ;
return ;
}
if ( ! $ noderange ) {
$ usage_string = xCAT::Usage - > getUsage ( $ command ) ;
$ callback - > ( { data = > $ usage_string } ) ;
$ request = { } ;
return ;
}
#print "noderange=@$noderange\n";
# find service nodes for requested nodes
# build an individual request for each service node
my $ service = "xcat" ;
my $ sn = xCAT::Utils - > get_ServiceNode ( $ noderange , $ service , "MN" ) ;
# build each request for each service node
foreach my $ snkey ( keys %$ sn )
{
#print "snkey=$snkey\n";
my $ reqcopy = { %$ request } ;
$ reqcopy - > { node } = $ sn - > { $ snkey } ;
$ reqcopy - > { '_xcatdest' } = $ snkey ;
2009-07-15 15:10:54 +00:00
$ reqcopy - > { _xcatpreprocessed } - > [ 0 ] = 1 ;
2009-02-15 22:42:22 +00:00
push @ requests , $ reqcopy ;
}
return \ @ requests ;
}
sub adopt {
2009-03-18 20:00:51 +00:00
my $ orphash = shift ;
my $ hyphash = shift ;
my % addmemory = ( ) ;
my $ node ;
my $ target ;
2009-11-27 18:55:13 +00:00
my $ vmupdates ;
2009-03-18 20:00:51 +00:00
foreach $ node ( keys % { $ orphash } ) {
$ target = pick_target ( $ node , \ % addmemory ) ;
unless ( $ target ) {
next ;
}
2009-11-28 00:50:14 +00:00
if ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { memory } ) {
$ addmemory { $ target } += getUnits ( $ confdata - > { vm } - > { $ node } - > [ 0 ] - > { memory } , "M" , 1024 ) ;
2009-10-20 04:30:08 +00:00
} else {
$ addmemory { $ target } += getUnits ( "512" , "M" , 1024 ) ;
}
2009-03-18 20:00:51 +00:00
$ hyphash { $ target } - > { nodes } - > { $ node } = 1 ;
delete $ orphash - > { $ node } ;
2009-11-27 18:55:13 +00:00
$ vmupdates - > { $ node } - > { host } = $ target ;
2009-03-18 20:00:51 +00:00
}
2011-06-22 15:23:06 +00:00
unless ( $ vmtab ) { $ vmtab = new xCAT:: Table ( 'vm' , - create = > 1 ) ; }
2009-11-27 18:55:13 +00:00
$ vmtab - > setNodesAttribs ( $ vmupdates ) ;
2009-03-18 20:00:51 +00:00
if ( keys % { $ orphash } ) {
return 0 ;
} else {
return 1 ;
}
2009-02-15 22:42:22 +00:00
}
2009-03-18 20:00:51 +00:00
2009-02-15 22:42:22 +00:00
sub process_request {
$ SIG { INT } = $ SIG { TERM } = sub {
foreach ( keys % vm_comm_pids ) {
kill 2 , $ _ ;
}
exit 0 ;
} ;
2010-08-25 21:04:20 +00:00
unless ( $ parser ) {
$ parser = XML::LibXML - > new ( ) ;
}
2009-10-19 22:47:48 +00:00
% offlinehyps = ( ) ;
2009-12-02 00:37:04 +00:00
% hypstats = ( ) ;
2009-10-19 22:47:48 +00:00
% offlinevms = ( ) ;
2009-02-15 22:42:22 +00:00
my $ request = shift ;
2010-08-27 23:46:06 +00:00
if ( $ request - > { _xcat_authname } - > [ 0 ] ) {
$ requester = $ request - > { _xcat_authname } - > [ 0 ] ;
}
2010-08-26 13:16:24 +00:00
$ callback = shift ;
2009-11-28 00:50:14 +00:00
unless ( $ libvirtsupport ) {
$ libvirtsupport = eval {
2009-04-09 14:10:38 +00:00
require Sys::Virt ;
if ( Sys::Virt - > VERSION < "0.2.0" ) {
die ;
}
1 ;
2009-11-28 00:50:14 +00:00
} ;
}
2009-03-14 15:45:07 +00:00
unless ( $ libvirtsupport ) { #Still no Sys::Virt module
2009-04-09 14:10:38 +00:00
$ callback - > ( { error = > "Sys::Virt perl module missing or older than 0.2.0, unable to fulfill KVM plugin requirements" , errorcode = > [ 42 ] } ) ;
2009-03-14 15:45:07 +00:00
return [] ;
}
require Sys::Virt::Domain ;
% runningstates = ( & Sys::Virt::Domain:: STATE_NOSTATE = > 1 , & Sys::Virt::Domain:: STATE_RUNNING = > 1 , & Sys::Virt::Domain:: STATE_BLOCKED = > 1 ) ;
2009-02-15 22:42:22 +00:00
$ doreq = shift ;
my $ level = shift ;
my $ noderange = $ request - > { node } ;
my $ command = $ request - > { command } - > [ 0 ] ;
my @ exargs ;
unless ( $ command ) {
return ; #Empty request
}
if ( ref ( $ request - > { arg } ) ) {
@ exargs = @ { $ request - > { arg } } ;
} else {
@ exargs = ( $ request - > { arg } ) ;
}
2009-10-19 22:47:48 +00:00
my $ forcemode = 0 ;
my % orphans = ( ) ;
2009-10-20 01:45:04 +00:00
if ( $ command eq 'vmstatenotify' ) {
unless ( $ vmtab ) { $ vmtab = new xCAT:: Table ( 'vm' , - create = > 1 ) ; }
my $ state = $ exargs [ 0 ] ;
if ( $ state eq 'vmoff' ) {
$ vmtab - > setNodeAttribs ( $ exargs [ 1 ] , { powerstate = > 'off' } ) ;
return ;
} elsif ( $ state eq 'vmon' ) {
$ vmtab - > setNodeAttribs ( $ exargs [ 1 ] , { powerstate = > 'on' } ) ;
return ;
} elsif ( $ state eq 'hypshutdown' ) { #turn this into an evacuate
2009-11-17 18:41:42 +00:00
my $ nodelisttab = xCAT::Table - > new ( 'nodelist' ) ;
my $ appstatus = $ nodelisttab - > getNodeAttribs ( $ noderange - > [ 0 ] , [ 'appstatus' ] ) ;
my @ apps = split /,/ , $ appstatus - > { 'appstatus' } ;
my @ newapps ;
foreach ( @ apps ) {
if ( $ _ eq 'virtualization' ) { next ; }
push @ newapps , $ _ ;
}
$ nodelisttab - > setNodeAttribs ( $ noderange - > [ 0 ] , { appstatus = > join ( ',' , @ newapps ) } ) ;
2009-10-20 01:45:04 +00:00
$ command = "revacuate" ;
@ exargs = ( ) ;
} elsif ( $ state eq 'hypstartup' ) { #if starting up, check for nodes on this hypervisor and start them up
2009-11-17 18:41:42 +00:00
my $ nodelisttab = xCAT::Table - > new ( 'nodelist' ) ;
my $ appstatus = $ nodelisttab - > getNodeAttribs ( $ noderange - > [ 0 ] , [ 'appstatus' ] ) ;
my @ apps = split /,/ , $ appstatus - > { appstatus } ;
unless ( grep { $ _ eq 'virtualization' } @ apps ) {
push @ apps , 'virtualization' ;
$ nodelisttab - > setNodeAttribs ( $ noderange - > [ 0 ] , { appstatus = > join ( ',' , @ apps ) } ) ;
}
2009-10-20 01:45:04 +00:00
my @ tents = $ vmtab - > getAttribs ( { host = > $ noderange - > [ 0 ] , power = > 'on' } , [ 'node' ] ) ;
$ noderange = [] ;
foreach ( @ tents ) {
push @$ noderange , noderange ( $ _ - > { node } ) ;
}
$ command = "rpower" ;
@ exargs = ( "on" ) ;
}
}
2009-02-15 22:42:22 +00:00
if ( $ command eq 'revacuate' ) {
my $ newnoderange ;
2009-10-19 22:47:48 +00:00
if ( grep { $ _ eq '-f' } @ exargs ) {
$ forcemode = 1 ;
}
2009-02-15 22:42:22 +00:00
foreach ( @$ noderange ) {
2009-10-19 22:47:48 +00:00
my $ hyp = $ _ ; #I used $_ too much here... sorry
2009-02-15 22:42:22 +00:00
$ hypconn = undef ;
push @ destblacklist , $ _ ;
2009-10-20 23:22:04 +00:00
if ( ( not $ offlinehyps { $ _ } ) and nodesockopen ( $ _ , 22 ) ) {
2009-02-15 22:42:22 +00:00
eval { #Contain bugs that won't be in $@
2009-10-20 23:22:04 +00:00
$ hypconn = Sys::Virt - > new ( uri = > "qemu+ssh://root@" . $ _ . "/system?no_tty=1&netcat=nc" ) ;
2009-02-15 22:42:22 +00:00
} ;
2009-10-20 23:22:04 +00:00
unless ( $ hypconn ) { #retry for socat
eval { #Contain bugs that won't be in $@
$ hypconn = Sys::Virt - > new ( uri = > "qemu+ssh://root@" . $ _ . "/system?no_tty=1" ) ;
} ;
}
2009-02-15 22:42:22 +00:00
}
unless ( $ hypconn ) {
2009-10-20 23:22:04 +00:00
$ offlinehyps { $ hyp } = 1 ;
2009-10-19 22:47:48 +00:00
if ( $ forcemode ) { #forcemode indicates the hypervisor is probably already dead, and to clear vm.host of all the nodes, and adopt the ones that are supposed to be 'on', power them on
unless ( $ vmtab ) { $ vmtab = new xCAT:: Table ( 'vm' , - create = > 0 ) ; }
unless ( $ vmtab ) { next ; }
my @ vents = $ vmtab - > getAttribs ( { host = > $ hyp } , [ 'node' , 'powerstate' ] ) ;
my $ vent ;
my $ nodestozap ;
foreach $ vent ( @ vents ) {
my @ nodes = noderange ( $ vent - > { node } ) ;
if ( $ vent - > { powerstate } eq 'on' ) {
foreach ( @ nodes ) {
$ offlinevms { $ _ } = 1 ;
$ orphans { $ _ } = 1 ;
push @$ newnoderange , $ _ ;
}
}
push @$ nodestozap , @ nodes ;
}
$ vmtab - > setNodesAttribs ( $ nodestozap , { host = > '|^.*$||' } ) ;
} else {
$ callback - > ( { node = > [ { name = > [ $ _ ] , error = > [ "Cannot communicate via libvirt to node" ] } ] } ) ;
}
2009-02-15 22:42:22 +00:00
next ;
}
2009-10-19 22:47:48 +00:00
if ( $ hypconn ) {
foreach ( $ hypconn - > list_domains ( ) ) {
my $ guestname = $ _ - > get_name ( ) ;
if ( $ guestname eq 'Domain-0' ) {
next ;
}
push @$ newnoderange , $ guestname ;
2009-02-15 22:42:22 +00:00
}
}
}
$ hypconn = undef ;
$ noderange = $ newnoderange ;
$ command = 'rmigrate' ;
}
2011-06-22 17:29:43 +00:00
if ( $ ::XCATSITEVALS { usexhrm } ) { $ use_xhrm = 1 ; }
2009-11-28 00:50:14 +00:00
$ vmtab = xCAT::Table - > new ( "vm" ) ;
$ confdata = { } ;
2011-06-22 19:07:40 +00:00
unless ( $ command eq 'lsvm' ) {
xCAT::VMCommon:: grab_table_data ( $ noderange , $ confdata , $ callback ) ;
my $ kvmdatatab = xCAT::Table - > new ( "kvm_nodedata" , - create = > 0 ) ; #grab any pertinent pre-existing xml
2010-08-12 18:30:41 +00:00
if ( $ kvmdatatab ) {
$ confdata - > { kvmnodedata } = $ kvmdatatab - > getNodesAttribs ( $ noderange , [ qw/xml/ ] ) ;
} else {
2010-08-12 18:34:06 +00:00
$ confdata - > { kvmnodedata } = { } ;
2010-08-12 18:30:41 +00:00
}
2011-06-22 19:07:40 +00:00
}
2010-09-27 19:08:15 +00:00
if ( $ command eq 'mkvm' or ( $ command eq 'clonevm' and ( grep { "$_" eq '-b' } @ exargs ) ) or ( $ command eq 'rpower' and ( grep { "$_" eq "on" or $ _ eq "boot" or $ _ eq "reset" } @ exargs ) ) ) {
2009-11-28 00:50:14 +00:00
xCAT::VMCommon:: requestMacAddresses ( $ confdata , $ noderange ) ;
2009-11-30 18:37:01 +00:00
my @ dhcpnodes ;
2009-11-30 18:54:39 +00:00
foreach ( keys % { $ confdata - > { dhcpneeded } } ) {
2009-11-30 18:37:01 +00:00
push @ dhcpnodes , $ _ ;
2009-11-30 18:54:39 +00:00
delete $ confdata - > { dhcpneeded } - > { $ _ } ;
2009-11-30 18:37:01 +00:00
}
2010-10-20 18:41:08 +00:00
unless ( $ ::XCATSITEVALS { 'dhcpsetup' } and ( $ ::XCATSITEVALS { 'dhcpsetup' } =~ /^n/i or $ ::XCATSITEVALS { 'dhcpsetup' } =~ /^d/i or $ ::XCATSITEVALS { 'dhcpsetup' } eq '0' ) ) {
$ doreq - > ( { command = > [ 'makedhcp' ] , node = > \ @ dhcpnodes } ) ;
}
2009-11-28 00:50:14 +00:00
}
2009-02-15 22:42:22 +00:00
if ( $ command eq 'revacuate' or $ command eq 'rmigrate' ) {
$ vmmaxp = 1 ; #for now throttle concurrent migrations, requires more sophisticated heuristics to ensure sanity
} else {
my $ tmp ;
2011-06-22 17:29:43 +00:00
if ( $ ::XCATSITEVALS { vmmaxp } ) { $ vmmaxp = $ ::XCATSITEVALS { vmmaxp } ; }
2009-02-15 22:42:22 +00:00
}
my $ children = 0 ;
2009-04-07 18:42:57 +00:00
$ SIG { CHLD } = sub { my $ cpid ; while ( ( $ cpid = waitpid ( - 1 , WNOHANG ) ) > 0 ) { if ( $ vm_comm_pids { $ cpid } ) { delete $ vm_comm_pids { $ cpid } ; $ children - - ; } } } ;
2009-02-15 22:42:22 +00:00
my $ inputs = new IO:: Select ; ;
my $ sub_fds = new IO:: Select ;
% hyphash = ( ) ;
2010-08-13 13:23:19 +00:00
if ( $ command eq 'lsvm' ) { #command intended for hypervisors, not guests
foreach ( @$ noderange ) { $ hyphash { $ _ } - > { nodes } - > { $ _ } = 1 ; }
} else {
foreach ( keys % { $ confdata - > { vm } } ) {
if ( $ confdata - > { vm } - > { $ _ } - > [ 0 ] - > { host } ) {
$ hyphash { $ confdata - > { vm } - > { $ _ } - > [ 0 ] - > { host } } - > { nodes } - > { $ _ } = 1 ;
} else {
$ orphans { $ _ } = 1 ;
}
2009-02-15 22:42:22 +00:00
}
}
if ( keys % orphans ) {
2009-03-18 20:00:51 +00:00
if ( $ command eq "rpower" ) {
if ( grep /^on$/ , @ exargs or grep /^boot$/ , @ exargs ) {
unless ( adopt ( \ % orphans , \ % hyphash ) ) {
$ callback - > ( { error = > "Can't find " . join ( "," , keys % orphans ) , errorcode = > [ 1 ] } ) ;
return 1 ;
}
} else {
foreach ( keys % orphans ) {
$ callback - > ( { node = > [ { name = > [ $ _ ] , data = > [ { contents = > [ 'off' ] } ] } ] } ) ;
}
2009-02-15 22:42:22 +00:00
}
} elsif ( $ command eq "rmigrate" ) {
2009-10-19 22:47:48 +00:00
if ( $ forcemode ) {
unless ( adopt ( \ % orphans , \ % hyphash ) ) {
$ callback - > ( { error = > "Can't find " . join ( "," , keys % orphans ) , errorcode = > [ 1 ] } ) ;
return 1 ;
}
} else {
$ callback - > ( { error = > "Can't find " . join ( "," , keys % orphans ) , errorcode = > [ 1 ] } ) ;
return ;
}
2010-08-30 21:01:35 +00:00
} elsif ( $ command eq "mkvm" or $ command eq "clonevm" ) { #must adopt to create
2010-07-19 17:50:38 +00:00
unless ( adopt ( \ % orphans , \ % hyphash ) ) {
$ callback - > ( { error = > "Can't find " . join ( "," , keys % orphans ) , errorcode = > [ 1 ] } ) ;
return 1 ;
}
#mkvm used to be able to happen devoid of any hypervisor, make a fake hypervisor entry to allow this to occur
#commenting that out for now
# foreach (keys %orphans) {
# $hyphash{'!@!XCATDUMMYHYPERVISOR!@!'}->{nodes}->{$_}=1;
# }
2009-02-15 22:42:22 +00:00
} else {
$ callback - > ( { error = > "Can't find " . join ( "," , keys % orphans ) , errorcode = > [ 1 ] } ) ;
return ;
}
}
if ( $ command eq "rbeacon" ) {
my % req = ( ) ;
$ req { command } = [ 'rbeacon' ] ;
$ req { arg } = \ @ exargs ;
$ req { node } = [ keys % hyphash ] ;
$ doreq - > ( \ % req , $ callback ) ;
return ;
}
#get new node status
2009-03-25 02:56:58 +00:00
my % oldnodestatus = ( ) ; #saves the old node status
my @ allerrornodes = ( ) ;
2009-02-15 22:42:22 +00:00
my $ check = 0 ;
2009-03-24 03:12:03 +00:00
my $ global_check = 1 ;
2011-06-22 17:29:43 +00:00
if ( $ ::XCATSITEVALS { nodestatus } =~ /0|n|N/ ) { $ global_check = 0 ; }
2009-03-24 03:12:03 +00:00
2009-03-25 02:56:58 +00:00
2009-02-15 22:42:22 +00:00
if ( $ command eq 'rpower' ) {
my $ subcommand = $ exargs [ 0 ] ;
2009-03-24 03:12:03 +00:00
if ( ( $ global_check ) && ( $ subcommand ne 'stat' ) && ( $ subcommand ne 'status' ) ) {
2009-02-15 22:42:22 +00:00
$ check = 1 ;
my @ allnodes = @$ noderange ;
2009-03-25 02:56:58 +00:00
#save the old status
my $ nodelisttab = xCAT::Table - > new ( 'nodelist' ) ;
if ( $ nodelisttab ) {
my $ tabdata = $ nodelisttab - > getNodesAttribs ( \ @ allnodes , [ 'node' , 'status' ] ) ;
foreach my $ node ( @ allnodes )
{
my $ tmp1 = $ tabdata - > { $ node } - > [ 0 ] ;
if ( $ tmp1 ) {
if ( $ tmp1 - > { status } ) { $ oldnodestatus { $ node } = $ tmp1 - > { status } ; }
else { $ oldnodestatus { $ node } = "" ; }
}
}
}
#print "oldstatus:" . Dumper(\%oldnodestatus);
#set the new status to the nodelist.status
my % newnodestatus = ( ) ;
my $ newstat ;
if ( ( $ subcommand eq 'off' ) || ( $ subcommand eq 'softoff' ) ) {
my $ newstat = $ ::STATUS_POWERING_OFF ;
$ newnodestatus { $ newstat } = \ @ allnodes ;
} else {
2009-02-15 22:42:22 +00:00
#get the current nodeset stat
if ( @ allnodes > 0 ) {
my $ nsh = { } ;
2009-03-20 21:36:49 +00:00
my ( $ ret , $ msg ) = xCAT::SvrUtils - > getNodesetStates ( \ @ allnodes , $ nsh ) ;
2009-02-15 22:42:22 +00:00
if ( ! $ ret ) {
foreach ( keys %$ nsh ) {
2009-03-25 02:56:58 +00:00
my $ newstat = xCAT_monitoring::monitorctrl - > getNodeStatusFromNodesetState ( $ _ , "rpower" ) ;
$ newnodestatus { $ newstat } = $ nsh - > { $ _ } ;
2009-02-15 22:42:22 +00:00
}
2009-03-25 02:56:58 +00:00
} else {
$ callback - > ( { data = > $ msg } ) ;
2009-02-15 22:42:22 +00:00
}
}
}
2009-03-25 02:56:58 +00:00
#print "newstatus" . Dumper(\%newnodestatus);
xCAT_monitoring::monitorctrl:: setNodeStatusAttributes ( \ % newnodestatus , 1 ) ;
2009-02-15 22:42:22 +00:00
}
}
2011-06-22 17:29:43 +00:00
if ( $ ::XCATSITEVALS { masterimgdir } ) { $ xCAT_plugin:: kvm:: masterdir = $ ::XCATSITEVALS { masterimgdir } }
2009-03-17 00:49:55 +00:00
2009-02-15 22:42:22 +00:00
foreach $ hyp ( sort ( keys % hyphash ) ) {
while ( $ children > $ vmmaxp ) {
2009-03-25 02:56:58 +00:00
my $ handlednodes = { } ;
forward_data ( $ callback , $ sub_fds , $ handlednodes ) ;
2009-02-15 22:42:22 +00:00
#update the node status to the nodelist.status table
if ( $ check ) {
2009-03-25 02:56:58 +00:00
updateNodeStatus ( $ handlednodes , \ @ allerrornodes ) ;
2009-02-15 22:42:22 +00:00
}
}
$ children + + ;
my $ cfd ;
my $ pfd ;
socketpair ( $ pfd , $ cfd , AF_UNIX , SOCK_STREAM , PF_UNSPEC ) or die "socketpair: $!" ;
$ cfd - > autoflush ( 1 ) ;
$ pfd - > autoflush ( 1 ) ;
my $ cpid = xCAT::Utils - > xfork ;
unless ( defined ( $ cpid ) ) { die "Fork error" ; }
unless ( $ cpid ) {
close ( $ cfd ) ;
dohyp ( $ pfd , $ hyp , $ command , - args = > \ @ exargs ) ;
exit ( 0 ) ;
}
$ vm_comm_pids { $ cpid } = 1 ;
close ( $ pfd ) ;
$ sub_fds - > add ( $ cfd ) ;
}
2011-06-22 15:23:06 +00:00
while ( $ sub_fds - > count > 0 ) { # or $children > 0) { #if count is zero, even if we have live children, we can't possibly get data from them
2009-03-25 02:56:58 +00:00
my $ handlednodes = { } ;
forward_data ( $ callback , $ sub_fds , $ handlednodes ) ;
2009-02-15 22:42:22 +00:00
#update the node status to the nodelist.status table
if ( $ check ) {
2009-03-25 02:56:58 +00:00
updateNodeStatus ( $ handlednodes , \ @ allerrornodes ) ;
2009-02-15 22:42:22 +00:00
}
}
2011-06-22 15:23:06 +00:00
#while (wait() > -1) { } #keep around just in case we find the absolute need to wait for children to be gone
2009-02-15 22:42:22 +00:00
#Make sure they get drained, this probably is overkill but shouldn't hurt
2011-06-22 15:23:06 +00:00
#my $rc=1;
#while ( $rc>0 ) {
# my $handlednodes={};
# $rc=forward_data($callback,$sub_fds,$handlednodes);
# #update the node status to the nodelist.status table
# if ($check) {
# updateNodeStatus($handlednodes, \@allerrornodes);
# }
#}
2009-03-25 02:56:58 +00:00
if ( $ check ) {
#print "allerrornodes=@allerrornodes\n";
#revert the status back for there is no-op for the nodes
my % old = ( ) ;
foreach my $ node ( @ allerrornodes ) {
my $ stat = $ oldnodestatus { $ node } ;
if ( exists ( $ old { $ stat } ) ) {
my $ pa = $ old { $ stat } ;
push ( @$ pa , $ node ) ;
}
else {
$ old { $ stat } = [ $ node ] ;
}
}
xCAT_monitoring::monitorctrl:: setNodeStatusAttributes ( \ % old , 1 ) ;
2009-02-15 22:42:22 +00:00
}
}
sub updateNodeStatus {
2009-03-25 02:56:58 +00:00
my $ handlednodes = shift ;
my $ allerrornodes = shift ;
foreach my $ node ( keys ( %$ handlednodes ) ) {
if ( $ handlednodes - > { $ node } == - 1 ) { push ( @$ allerrornodes , $ node ) ; }
2009-02-15 22:42:22 +00:00
}
}
sub forward_data {
my $ callback = shift ;
my $ fds = shift ;
my $ errornodes = shift ;
my @ ready_fds = $ fds - > can_read ( 1 ) ;
my $ rfh ;
my $ rc = @ ready_fds ;
foreach $ rfh ( @ ready_fds ) {
my $ data ;
if ( $ data = <$rfh> ) {
while ( $ data !~ /ENDOFFREEZE6sK4ci/ ) {
$ data . = <$rfh> ;
}
2009-04-03 19:12:56 +00:00
eval { print $ rfh "ACK\n" ; } ; #ignore failures to send inter-process ack
2009-02-15 22:42:22 +00:00
my $ responses = thaw ( $ data ) ;
foreach ( @$ responses ) {
#save the nodes that has errors and the ones that has no-op for use by the node status monitoring
my $ no_op = 0 ;
if ( $ _ - > { node } - > [ 0 ] - > { errorcode } ) { $ no_op = 1 ; }
else {
my $ text = $ _ - > { node } - > [ 0 ] - > { data } - > [ 0 ] - > { contents } - > [ 0 ] ;
#print "data:$text\n";
if ( ( $ text ) && ( $ text =~ /$status_noop/ ) ) {
$ no_op = 1 ;
#remove the symbols that meant for use by node status
2009-04-13 13:14:46 +00:00
$ _ - > { node } - > [ 0 ] - > { data } - > [ 0 ] - > { contents } - > [ 0 ] =~ s/$status_noop// ;
2009-02-15 22:42:22 +00:00
}
}
#print "data:". $_->{node}->[0]->{data}->[0]->{contents}->[0] . "\n";
if ( $ no_op ) {
if ( $ errornodes ) { $ errornodes - > { $ _ - > { node } - > [ 0 ] - > { name } - > [ 0 ] } = - 1 ; }
} else {
if ( $ errornodes ) { $ errornodes - > { $ _ - > { node } - > [ 0 ] - > { name } - > [ 0 ] } = 1 ; }
}
$ callback - > ( $ _ ) ;
}
} else {
$ fds - > remove ( $ rfh ) ;
close ( $ rfh ) ;
}
}
yield ( ) ; #Try to avoid useless iterations as much as possible
return $ rc ;
}
sub dohyp {
my $ out = shift ;
$ hyp = shift ;
my $ command = shift ;
my % namedargs = @ _ ;
my @ exargs = @ { $ namedargs { - args } } ;
my $ node ;
my $ args = \ @ exargs ;
2011-06-22 15:23:06 +00:00
#$vmtab = xCAT::Table->new("vm");
$ vmtab = undef ;
2009-10-20 23:56:40 +00:00
unless ( $ offlinehyps { $ hyp } or ( $ hyp eq '!@!XCATDUMMYHYPERVISOR!@!' ) or nodesockopen ( $ hyp , 22 ) ) {
2009-10-20 23:22:04 +00:00
$ offlinehyps { $ hyp } = 1 ;
}
2009-02-15 22:42:22 +00:00
eval { #Contain Sys::Virt bugs that make $@ useless
2009-09-04 16:17:12 +00:00
if ( $ hyp eq '!@!XCATDUMMYHYPERVISOR!@!' ) { #Fake connection for commands that have a fake hypervisor key
$ hypconn = 1 ;
2009-10-20 23:22:04 +00:00
} elsif ( not $ offlinehyps { $ hyp } ) {
2009-09-04 16:17:12 +00:00
$ hypconn = Sys::Virt - > new ( uri = > "qemu+ssh://root@" . $ hyp . "/system?no_tty=1&netcat=nc" ) ;
}
2009-02-15 22:42:22 +00:00
} ;
2009-10-20 23:22:04 +00:00
unless ( $ hypconn or $ offlinehyps { $ hyp } ) {
2009-02-15 22:42:22 +00:00
eval { #Contain Sys::Virt bugs that make $@ useless
2009-04-10 13:53:39 +00:00
$ hypconn = Sys::Virt - > new ( uri = > "qemu+ssh://root@" . $ hyp . "/system?no_tty=1" ) ;
2009-02-15 22:42:22 +00:00
} ;
}
unless ( $ hypconn ) {
my % err = ( node = > [] ) ;
foreach ( keys % { $ hyphash { $ hyp } - > { nodes } } ) {
2009-03-18 20:00:51 +00:00
push ( @ { $ err { node } } , { name = > [ $ _ ] , error = > [ "Cannot communicate via libvirt to $hyp" ] , errorcode = > [ 1 ] } ) ;
2009-02-15 22:42:22 +00:00
}
print $ out freeze ( [ \ % err ] ) ;
print $ out "\nENDOFFREEZE6sK4ci\n" ;
yield ( ) ;
waitforack ( $ out ) ;
return 1 , "General error establishing libvirt communication" ;
}
foreach $ node ( sort ( keys % { $ hyphash { $ hyp } - > { nodes } } ) ) {
my ( $ rc , @ output ) = guestcmd ( $ hyp , $ node , $ command , @$ args ) ;
foreach ( @ output ) {
my % output ;
if ( ref ( $ _ ) ) {
print $ out freeze ( [ $ _ ] ) ;
print $ out "\nENDOFFREEZE6sK4ci\n" ;
yield ( ) ;
waitforack ( $ out ) ;
next ;
}
( my $ desc , my $ text ) = split ( /:/ , $ _ , 2 ) ;
unless ( $ text ) {
$ text = $ desc ;
} else {
$ desc =~ s/^\s+// ;
$ desc =~ s/\s+$// ;
if ( $ desc ) {
$ output { node } - > [ 0 ] - > { data } - > [ 0 ] - > { desc } - > [ 0 ] = $ desc ;
}
}
$ text =~ s/^\s+// ;
$ text =~ s/\s+$// ;
$ output { node } - > [ 0 ] - > { errorcode } = $ rc ;
$ output { node } - > [ 0 ] - > { name } - > [ 0 ] = $ node ;
2011-05-31 20:37:54 +00:00
if ( $ rc == 0 ) {
$ output { node } - > [ 0 ] - > { data } - > [ 0 ] - > { contents } - > [ 0 ] = $ text ;
} else {
$ output { node } - > [ 0 ] - > { error } = $ text ;
}
2009-02-15 22:42:22 +00:00
print $ out freeze ( [ \ % output ] ) ;
print $ out "\nENDOFFREEZE6sK4ci\n" ;
yield ( ) ;
waitforack ( $ out ) ;
}
yield ( ) ;
}
2009-11-27 18:55:13 +00:00
foreach ( keys %$ updatetable ) {
my $ tabhandle = xCAT::Table - > new ( $ _ , - create = > 1 ) ;
2010-08-26 14:01:54 +00:00
my $ updates = $ updatetable - > { $ _ } ;
if ( $ updates - > { '!*XCATNODESTODELETE*!' } ) {
2011-06-23 13:58:40 +00:00
my @ delkeys ;
2010-08-26 14:01:54 +00:00
foreach ( keys % { $ updates - > { '!*XCATNODESTODELETE*!' } } ) {
2011-06-23 13:58:40 +00:00
if ( $ _ ) { push @ delkeys , { node = > $ _ } ; }
2010-08-26 14:01:54 +00:00
}
2011-06-23 13:58:40 +00:00
if ( @ delkeys ) { $ tabhandle - > delEntries ( \ @ delkeys ) ; }
2010-08-26 14:01:54 +00:00
delete $ updates - > { '!*XCATNODESTODELETE*!' } ;
}
2009-11-27 18:55:13 +00:00
$ tabhandle - > setNodesAttribs ( $ updatetable - > { $ _ } ) ;
}
2009-02-15 22:42:22 +00:00
#my $msgtoparent=freeze(\@outhashes); # = XMLout(\%output,RootName => 'xcatresponse');
#print $out $msgtoparent; #$node.": $_\n";
}
1 ;