From 65470149e968b7275c0b75f8d9da94b807759da0 Mon Sep 17 00:00:00 2001 From: Arif Ali Date: Wed, 29 Jan 2014 21:47:09 +0000 Subject: [PATCH 01/17] add per node consoleondemand --- perl-xCAT/xCAT/Schema.pm | 7 ++++++- xCAT-server/lib/xcat/plugins/conserver.pm | 17 +++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/perl-xCAT/xCAT/Schema.pm b/perl-xCAT/xCAT/Schema.pm index ae79728ec..932fe20ea 100755 --- a/perl-xCAT/xCAT/Schema.pm +++ b/perl-xCAT/xCAT/Schema.pm @@ -536,7 +536,7 @@ nodegroup => { }, }, nodehm => { - cols => [qw(node power mgt cons termserver termport conserver serialport serialspeed serialflow getmac cmdmapping comments disable)], + cols => [qw(node power mgt cons termserver termport conserver serialport serialspeed serialflow getmac cmdmapping consoleondemand comments disable)], keys => [qw(node)], tablespace =>'XCATTBS16K', table_desc => "Settings that control how each node's hardware is managed. Typically, an additional table that is specific to the hardware type of the node contains additional info. E.g. the ipmi, mp, and ppc tables.", @@ -553,6 +553,7 @@ nodehm => { serialflow => "The flow control value of the serial port for this node. For SOL this is typically 'hard'.", getmac => 'The method to use to get MAC address of the node with the getmac command. If not set, the mgt attribute will be used. Valid values: same as values for mgmt attribute.', cmdmapping => 'The fully qualified name of the file that stores the mapping between PCM hardware management commands and xCAT/third-party hardware management commands for a particular type of hardware device. Only used by PCM.', + consoleondemand => 'This overrides the value from site.consoleondemand; (0=no, 1=yes). Default is the result from site.consoleondemand.', comments => 'Any user-written notes.', disable => "Set to 'yes' or '1' to comment out this row.", }, @@ -1895,6 +1896,10 @@ my @nodeattrs = ( {attr_name => 'serialflow', tabentry => 'nodehm.serialflow', access_tabentry => 'nodehm.node=attr:node', + }, + {attr_name => 'consoleondemand', + tabentry => 'nodehm.consoleondemand', + access_tabentry => 'nodehm.node=attr:node', }, ################## # vpd table # diff --git a/xCAT-server/lib/xcat/plugins/conserver.pm b/xCAT-server/lib/xcat/plugins/conserver.pm index da4aeb285..e3fbd4902 100644 --- a/xCAT-server/lib/xcat/plugins/conserver.pm +++ b/xCAT-server/lib/xcat/plugins/conserver.pm @@ -12,6 +12,7 @@ use strict; use Data::Dumper; my @cservers = qw(mrv cyclades); my %termservers; #list of noted termservers +my $siteondemand; # The site value for consoleondemand my $usage_string= " makeconservercf [-d|--delete] noderange @@ -276,6 +277,10 @@ sub docfheaders { my $site_entry = $entries[0]; if ( defined($site_entry) and $site_entry eq "yes" ) { push @newheaders," options ondemand;\n"; + $siteondemand=1; + } + else { + $siteondemand=0; } push @newheaders,"}\n"; @@ -322,7 +327,7 @@ sub makeconservercf { my $hmtab = xCAT::Table->new('nodehm'); my @cfgents1;# = $hmtab->getAllNodeAttribs(['cons','serialport','mgt','conserver','termserver','termport']); if (($nodes and @$nodes > 0) or $req->{noderange}->[0]) { - @cfgents1 = $hmtab->getNodesAttribs($nodes,['node','cons','serialport','mgt','conserver','termserver','termport']); + @cfgents1 = $hmtab->getNodesAttribs($nodes,['node','cons','serialport','mgt','conserver','termserver','termport','consoleondemand']); # Adjust the data structure to make the result consistent with the getAllNodeAttribs() call we make if a noderange was not specified my @tmpcfgents1; foreach my $ent (@cfgents1) @@ -335,7 +340,7 @@ sub makeconservercf { @cfgents1 = @tmpcfgents1 } else { - @cfgents1 = $hmtab->getAllNodeAttribs(['cons','serialport','mgt','conserver','termserver','termport']); + @cfgents1 = $hmtab->getAllNodeAttribs(['cons','serialport','mgt','conserver','termserver','termport','consoleondemand']); } @@ -538,6 +543,14 @@ foreach my $node (sort keys %$cfgenthash) { push @$content," exec $locerror".$::XCATROOT."/share/xcat/cons/".$cmeth." ".$node.";\n" } } + if (defined($cfgent->{consoleondemand})) { + if ($cfgent->{consoleondemand} && !$siteondemand ) { + push @$content," options ondemand;\n"; + } + elsif (!$cfgent->{consoleondemand} && $siteondemand ) { + push @$content," options !ondemand;\n"; + } + } push @$content,"}\n"; push @$content,"#xCAT END $node CONS\n"; } From 1b6225b8203691b33d0acdc9a72d004b9e7d275c Mon Sep 17 00:00:00 2001 From: Arif Ali Date: Mon, 17 Feb 2014 12:23:17 +0000 Subject: [PATCH 02/17] change comment on path of where the gpfs_updates directory is placed --- xCAT-IBMhpc/share/xcat/IBMhpc/gpfs/gpfs_updates | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xCAT-IBMhpc/share/xcat/IBMhpc/gpfs/gpfs_updates b/xCAT-IBMhpc/share/xcat/IBMhpc/gpfs/gpfs_updates index 1476c2216..2155b8ab4 100755 --- a/xCAT-IBMhpc/share/xcat/IBMhpc/gpfs/gpfs_updates +++ b/xCAT-IBMhpc/share/xcat/IBMhpc/gpfs/gpfs_updates @@ -14,7 +14,7 @@ # postscript (stateful install) or with the otherpkgs processing of # genimage (stateless/statelite install). This script will install any # gpfs update rpms that exist on the xCAT management node in the -# /install/post/gpfs_updates directory. +# /install/post/otherpkgs/gpfs_updates directory. # This is necessary because the GPFS updates can ONLY be installed # after the base rpms have been installed, and the update rpms cannot # exist in any rpm repositories used by xCAT otherpkgs processing From d0e85aeee2176a094d47d2028ea259adc6d64f93 Mon Sep 17 00:00:00 2001 From: lissav Date: Tue, 18 Feb 2014 06:21:39 -0500 Subject: [PATCH 03/17] fix man page --- xCAT-client/pods/man1/geninitrd.1.pod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xCAT-client/pods/man1/geninitrd.1.pod b/xCAT-client/pods/man1/geninitrd.1.pod index ef328b917..9af58234a 100644 --- a/xCAT-client/pods/man1/geninitrd.1.pod +++ b/xCAT-client/pods/man1/geninitrd.1.pod @@ -1,6 +1,6 @@ =head1 NAME -B - Generate an initrd (initial ramfs) which to be used for statefull install or stateless netboot. +B - Generate an initrd (initial ramfs) which to be used for statefull install or stateless netboot. =head1 SYNOPSIS From 22b529f8e3b6a917004e4786a6f9d426a2354279 Mon Sep 17 00:00:00 2001 From: lissav Date: Tue, 18 Feb 2014 08:52:24 -0500 Subject: [PATCH 04/17] more zone support --- xCAT-server/lib/xcat/plugins/zone.pm | 38 +++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/xCAT-server/lib/xcat/plugins/zone.pm b/xCAT-server/lib/xcat/plugins/zone.pm index 17974ef9a..19efa20c7 100644 --- a/xCAT-server/lib/xcat/plugins/zone.pm +++ b/xCAT-server/lib/xcat/plugins/zone.pm @@ -77,6 +77,13 @@ sub process_request return 1; } + # you may not run on AIX + if (xCAT::Utils->isAIX()) { + my $rsp = {}; + $rsp->{error}->[0] = "The $command may only be run on a Linux Cluster."; + xCAT::MsgUtils->message("E", $rsp, $callback, 1); + return 1; + } # test to see if any parms if (scalar($request->{arg} == 0)) { my $rsp = {}; @@ -135,7 +142,36 @@ sub process_request } # save input noderange if ($options{'noderange'}) { + + # check to see if Management Node is in the noderange, if so error $request->{noderange}->[0] = $options{'noderange'}; + my @nodes = xCAT::NodeRange::noderange($request->{noderange}->[0]); + my @mname = xCAT::Utils->noderangecontainsMn(@nodes); + if (@mname) + { # MN in the nodelist + my $nodes=join(',', @mname); + my $rsp = {}; + $rsp->{error}->[0] = + "You must not run $command and include the management node: $nodes."; + xCAT::MsgUtils->message("E", $rsp, $callback, 1); + exit 1; + } + # now check for service nodes in noderange. It they exist that is an error also. + my @SN; + my @CN; + xCAT::ServiceNodeUtils->getSNandCPnodes(\@nodes, \@SN, \@CN); + if (scalar(@SN)) + { # SN in the nodelist + my $nodes=join(',', @SN); + my $rsp = {}; + $rsp->{error}->[0] = + "You must not run $command and include any service nodes: $nodes."; + xCAT::MsgUtils->message("E", $rsp, $callback, 1); + exit 1; + } + # now check for service nodes in noderange. It they exist that is an error also. + + } if ($options{'verbose'}) { @@ -393,7 +429,7 @@ sub updatezonetable my $zonename=$request->{zonename}; if ( $$options{'defaultzone'}) { # set the default # check to see if a default already defined - my $curdefaultzone = xCAT::Zone->getdefaultzone; + my $curdefaultzone = xCAT::Zone->getdefaultzone($callback); if (!(defined ($curdefaultzone))) { # no default defined $tb_cols{defaultzone} ="yes"; } else { # already a default From e16e4d327b431effb3ae69436f9b5f992815f942 Mon Sep 17 00:00:00 2001 From: lissav Date: Tue, 18 Feb 2014 09:15:41 -0500 Subject: [PATCH 05/17] more zone support --- perl-xCAT/xCAT/Zone.pm | 112 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 105 insertions(+), 7 deletions(-) diff --git a/perl-xCAT/xCAT/Zone.pm b/perl-xCAT/xCAT/Zone.pm index 93961ff92..bf62b7635 100644 --- a/perl-xCAT/xCAT/Zone.pm +++ b/perl-xCAT/xCAT/Zone.pm @@ -190,17 +190,27 @@ sub genSSHRootKeys #-------------------------------------------------------------------------------- sub getdefaultzone { + my ($class, $callback) = @_; my $defaultzone; # read all the zone table and find the defaultzone, if it exists my $tab = xCAT::Table->new("zone"); - my @zones = $tab->getAllAttribs('zonename','defaultzone'); - foreach my $zone (@zones) { - # Look for the defaultzone=yes/1 entry - if ((defined($zone->{defaultzone})) && ($zone->{defaultzone} =~ "yes")) { - $defaultzone = $zone->{zonename}; - } + if ($tab){ + my @zones = $tab->getAllAttribs('zonename','defaultzone'); + foreach my $zone (@zones) { + # Look for the defaultzone=yes/1 entry + if ((defined($zone->{defaultzone})) && + (($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} = "1"))) { + $defaultzone = $zone->{zonename}; + } + $tab->close(); + } + } else { + my $rsp = {}; + $rsp->{error}->[0] = + "Error reading the zone table. "; + xCAT::MsgUtils->message("E", $rsp, $callback); + } - $tab->close(); return $defaultzone; } #-------------------------------------------------------------------------------- @@ -228,4 +238,92 @@ sub iszonedefined return 0; } } +#-------------------------------------------------------------------------------- + +=head3 getzoneinfo + Arguments: + An array of nodes + Returns: + Hash array by zonename point to the nodes in that zonename and sshkeydir + zonename1 -> {nodelist} -> array of nodes in the zone + -> {sshkeydir} -> directory containing ssh RSA keys + -> {defaultzone} -> is it the default zone + Example: + my %zonehash =xCAT::Zone->getNodeZones($nodelist); + Rules: + If the nodes nodelist.zonename attribute is a zonename, it is assigned to that zone + If the nodes nodelist.zonename attribute is undefined: + If there is a defaultzone in the zone table, the node is assigned to that zone + If there is no defaultzone in the zone table, the node is assigned to the ~.ssh keydir +=cut + +#-------------------------------------------------------------------------------- +sub getzoneinfo +{ + my ($class, $callback,$nodes) = @_; + + # make the list into an array +# $nodelist=~ s/\s*//g; # remove blanks +# my @nodes = split ',', $nodelist; + my $zonehash; + my $defaultzone; + # read all the zone table + my $zonetab = xCAT::Table->new("zone"); + if ($zonetab){ + my @zones = $zonetab->getAllAttribs('zonename','sshkeydir','defaultzone'); + $zonetab->close(); + if (@zones) { + foreach my $zone (@zones) { + my $zonename=$zone->{zonename}; + $zonehash->{$zonename}->{sshkeydir}= $zone->{sshkeydir}; + $zonehash->{$zonename}->{defaultzone}= $zone->{defaultzone}; + # find the defaultzone + if ((defined($zone->{defaultzone})) && + (($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} = "1"))) { + $defaultzone = $zone->{zonename}; + } + } + } + } else { + my $rsp = {}; + $rsp->{error}->[0] = + "Error reading the zone table. "; + xCAT::MsgUtils->message("E", $rsp, $callback); + return; + + } + my $nodelisttab = xCAT::Table->new("nodelist"); + my $nodehash = $nodelisttab->getNodesAttribs(\@$nodes, ['zonename']); + # for each of the nodes, look up it's zone name and assign to the zonehash + # if the node is a service node, it is assigned to the __xcatzone which gets its keys from + # the ~/.ssh dir no matter what in the database for the zonename. + # If the nodes nodelist.zonename attribute is a zonename, it is assigned to that zone + # If the nodes nodelist.zonename attribute is undefined: + # If there is a defaultzone in the zone table, the node is assigned to that zone + # If there is no defaultzone in the zone table, the node is assigned to the ~.ssh keydir + + + my @allSN=xCAT::ServiceNodeUtils->getAllSN("ALL"); # read all the servicenodes define + my $xcatzone = "__xcatzone"; # if node is in no zones or a service node, use this one + $zonehash->{$xcatzone}->{sshkeydir}= "~/.ssh"; + foreach my $node (@$nodes) { + my $zonename; + if (grep(/^$node$/, @allSN)) { # this is a servicenode, treat special + $zonename=$xcatzone; # always use ~/.ssh directory + } else { # use the nodelist.zonename attribute + $zonename=$nodehash->{$node}->[0]->{zonename}; + } + if (defined($zonename)) { # zonename explicitly defined in nodelist.zonename + push @{$zonehash->{$zonename}->{nodes}},$node; + } else { # no explict zonename + if (defined ($defaultzone)) { # there is a default zone in the zone table, use it + push @{$zonehash->{$defaultzone}->{nodes}},$node; + } else { # if no default then use the ~/.ssh keys as the default, put them in the __xcatzone + push @{$zonehash->{$xcatzone}->{nodes}},$node; + + } + } + } + return; +} 1; From 8ced46f7ccf340d36d04c00009b72ee86d821dba Mon Sep 17 00:00:00 2001 From: lissav Date: Tue, 18 Feb 2014 10:16:24 -0500 Subject: [PATCH 06/17] more improvements for zones --- perl-xCAT/xCAT/Zone.pm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/perl-xCAT/xCAT/Zone.pm b/perl-xCAT/xCAT/Zone.pm index bf62b7635..b4cea8bea 100644 --- a/perl-xCAT/xCAT/Zone.pm +++ b/perl-xCAT/xCAT/Zone.pm @@ -279,7 +279,7 @@ sub getzoneinfo $zonehash->{$zonename}->{defaultzone}= $zone->{defaultzone}; # find the defaultzone if ((defined($zone->{defaultzone})) && - (($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} = "1"))) { + (($zone->{defaultzone} =~ /yes/i) or ($zone->{defaultzone} eq "1"))) { $defaultzone = $zone->{zonename}; } } @@ -324,6 +324,6 @@ sub getzoneinfo } } } - return; + return $zonehash; } 1; From d45ba76f8e3964b025a19daa71e2993e52f110f0 Mon Sep 17 00:00:00 2001 From: lissav Date: Wed, 19 Feb 2014 05:24:13 -0500 Subject: [PATCH 07/17] add missing -g flag --- xCAT-client/pods/man1/updatenode.1.pod | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/xCAT-client/pods/man1/updatenode.1.pod b/xCAT-client/pods/man1/updatenode.1.pod index 24ad11b11..441870d53 100644 --- a/xCAT-client/pods/man1/updatenode.1.pod +++ b/xCAT-client/pods/man1/updatenode.1.pod @@ -8,6 +8,8 @@ B B [B<-V>|B<--verbose>] [B<-F>|B<--sync>] [B<-f>|B<--sns B B [B<-k>|B<--security>] [B<-t timeout>] +B B [B<-g>|B<--genmypost>] + B B [B<-V>|B<--verbose>] [B<-t timeout>] [B] B B [B<-V>|B<--verbose>] [B<-f>|B<--snsync>] @@ -327,6 +329,11 @@ For statelite installations to sync files, you should use the read-only option for files/directories listed in litefile table with source location specified in the litetree table. +=item B<-g|--genmypost + +Will generate a new mypostscript file for the +nodes in the noderange, if site precreatemypostscripts is 1 or YES. + =item B<-h|--help> From cdf72640fd5003dbaa687dd784eab97b62109b58 Mon Sep 17 00:00:00 2001 From: lissav Date: Wed, 19 Feb 2014 07:13:24 -0500 Subject: [PATCH 08/17] add zone table sshbetweennodes attribute --- perl-xCAT/xCAT/Schema.pm | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/perl-xCAT/xCAT/Schema.pm b/perl-xCAT/xCAT/Schema.pm index 02f220aff..dc8f3ab6c 100755 --- a/perl-xCAT/xCAT/Schema.pm +++ b/perl-xCAT/xCAT/Schema.pm @@ -1057,7 +1057,8 @@ site => { " Set to NOGROUPS,if you do not wish to enabled any group of compute nodes.\n". " Service Nodes are not affected by this attribute\n". " they are always setup with\n". - " passwordless root access to nodes and other SN.\n\n". + " passwordless root access to nodes and other SN.\n". + " If using the zone table, this attribute in not used.\n\n". " -----------------\n". "SERVICES ATTRIBUTES\n". " -----------------\n". @@ -1196,12 +1197,13 @@ performance => { }, }, zone => { - cols => [qw(zonename sshkeydir defaultzone comments disable)], + cols => [qw(zonename sshkeydir sshbetweennodes defaultzone comments disable)], keys => [qw(zonename)], table_desc => 'Defines a cluster zone for nodes that share root ssh key access to each other.', descriptions => { zonename => 'The name of the zone.', sshkeydir => 'Directory containing the shared root ssh RSA keys.', + sshbetweennodes => 'Indicates whether passwordless ssh will be setup between the nodes of this zone. Values are yes/1 or no/0. Default is yes. ', defaultzone => 'If nodes are not assigned to any other zone, they will default to this zone. If value is set to yes or 1.', comments => 'Any user-provided notes.', disable => "Set to 'yes' or '1' to comment out this row.", @@ -3101,6 +3103,10 @@ push(@{$defspec{node}->{'attrs'}}, @nodeattrs); tabentry => 'zone.sshkeydir', access_tabentry => 'zone.zonename=attr:zonename', }, + {attr_name => 'sshbetweennodes', + tabentry => 'zone.sshbetweennodes', + access_tabentry => 'zone.zonename=attr:zonename', + }, {attr_name => 'defaultzone', tabentry => 'zone.defaultzone', access_tabentry => 'zone.zonename=attr:zonename', From 1a59531d6c62f99f61872cdaf1539d36591e27db Mon Sep 17 00:00:00 2001 From: lissav Date: Wed, 19 Feb 2014 08:48:43 -0500 Subject: [PATCH 09/17] designchanges --- xCAT-server/lib/xcat/plugins/zone.pm | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/xCAT-server/lib/xcat/plugins/zone.pm b/xCAT-server/lib/xcat/plugins/zone.pm index 19efa20c7..91398d3a8 100644 --- a/xCAT-server/lib/xcat/plugins/zone.pm +++ b/xCAT-server/lib/xcat/plugins/zone.pm @@ -108,6 +108,7 @@ sub process_request 'h|help' => \$options{'help'}, 'k|sshkeypath=s' => \$options{'sshkeypath'}, 'K|genkeys' => \$options{'gensshkeys'}, + 's|sshbetweennodes=s' => \$options{'sshbetweennodes'}, 'v|version' => \$options{'version'}, 'V|Verbose' => \$options{'verbose'}, ) @@ -244,6 +245,7 @@ sub mkzone # Create path to generated ssh keys $keydir .= $request->{zonename}; + # update the zone table $rc=updatezonetable($request, $callback,$options,$keydir); @@ -337,11 +339,11 @@ sub usage my $usagemsg2=""; if ($command eq "mkzone") { $usagemsg1 = " mkzone -h \n mkzone -v \n"; - $usagemsg2 = " mkzone [-V] [--defaultzone] [-k ] [-g] [-f]"; + $usagemsg2 = " mkzone [-V] [--defaultzone] [-k ] [-g] [-f] [-s ]"; } else { if ($command eq "chzone") { $usagemsg1 = " chzone -h \n chzone -v \n"; - $usagemsg2 = " chzone [-V] [--defaultzone] [-k ] [-r ] [-g] "; + $usagemsg2 = " chzone [-V] [--defaultzone] [-k ] [-r ] [-g] [-s ]"; } else { if ($command eq "rmzone") { $usagemsg1 = " rmzone -h \n rmzone -v \n"; @@ -424,6 +426,18 @@ sub updatezonetable my $tab = xCAT::Table->new("zone"); if ($tab) { + # read a record from the zone table, if it is empty then add + # the xcatdefault entry + my @zones = $tab->getAllAttribs('zonename'); + if (!(@zones)) { # table empty + my %xcatdefaultzone; + $xcatdefaultzone{defaultzone} ="yes"; + $xcatdefaultzone{sshbetweennodes} ="yes"; + $xcatdefaultzone{sshkeydir} ="~/.ssh"; + $tab->setAttribs({zonename => "xcatdefault"}, \%xcatdefaultzone); + } + + # now add the users zone my %tb_cols; $tb_cols{sshkeydir} = $keydir; my $zonename=$request->{zonename}; From 9db5ceef6abb3a0fac93e62e2858e6f306b536d7 Mon Sep 17 00:00:00 2001 From: lissav Date: Wed, 19 Feb 2014 08:50:18 -0500 Subject: [PATCH 10/17] designchanges --- perl-xCAT/xCAT/Zone.pm | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/perl-xCAT/xCAT/Zone.pm b/perl-xCAT/xCAT/Zone.pm index b4cea8bea..6c5c06eb6 100644 --- a/perl-xCAT/xCAT/Zone.pm +++ b/perl-xCAT/xCAT/Zone.pm @@ -199,7 +199,7 @@ sub getdefaultzone foreach my $zone (@zones) { # Look for the defaultzone=yes/1 entry if ((defined($zone->{defaultzone})) && - (($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} = "1"))) { + (($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} eq "1"))) { $defaultzone = $zone->{zonename}; } $tab->close(); @@ -255,22 +255,23 @@ sub iszonedefined If the nodes nodelist.zonename attribute is undefined: If there is a defaultzone in the zone table, the node is assigned to that zone If there is no defaultzone in the zone table, the node is assigned to the ~.ssh keydir + $::GETZONEINFO_RC + 0 = good return + 1 = error occured =cut #-------------------------------------------------------------------------------- sub getzoneinfo { my ($class, $callback,$nodes) = @_; - - # make the list into an array -# $nodelist=~ s/\s*//g; # remove blanks -# my @nodes = split ',', $nodelist; + $::GETZONEINFO_RC=0; my $zonehash; my $defaultzone; # read all the zone table my $zonetab = xCAT::Table->new("zone"); + my @zones; if ($zonetab){ - my @zones = $zonetab->getAllAttribs('zonename','sshkeydir','defaultzone'); + @zones = $zonetab->getAllAttribs('zonename','sshkeydir','sshbetweennodes','defaultzone'); $zonetab->close(); if (@zones) { foreach my $zone (@zones) { @@ -279,7 +280,7 @@ sub getzoneinfo $zonehash->{$zonename}->{defaultzone}= $zone->{defaultzone}; # find the defaultzone if ((defined($zone->{defaultzone})) && - (($zone->{defaultzone} =~ /yes/i) or ($zone->{defaultzone} eq "1"))) { + (($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} eq "1"))) { $defaultzone = $zone->{zonename}; } } @@ -289,6 +290,7 @@ sub getzoneinfo $rsp->{error}->[0] = "Error reading the zone table. "; xCAT::MsgUtils->message("E", $rsp, $callback); + $::GETZONEINFO_RC =1; return; } @@ -314,6 +316,15 @@ sub getzoneinfo $zonename=$nodehash->{$node}->[0]->{zonename}; } if (defined($zonename)) { # zonename explicitly defined in nodelist.zonename + # check to see if defined in the zone table + if (!(grep(/^$zonename$/, @zones))) { + my $rsp = {}; + $rsp->{error}->[0] = + "$node has a zonenane: $zonename that is not define in the zone table. Remove the zonename from the node, or create the zone using mkzone."; + xCAT::MsgUtils->message("E", $rsp, $callback); + $::GETZONEINFO_RC =1; + return; + } push @{$zonehash->{$zonename}->{nodes}},$node; } else { # no explict zonename if (defined ($defaultzone)) { # there is a default zone in the zone table, use it @@ -324,6 +335,6 @@ sub getzoneinfo } } } - return $zonehash; + return; } 1; From 2598e30ee1b0dfe5fa1d5ede88e22e25b1b529ea Mon Sep 17 00:00:00 2001 From: lissav Date: Wed, 19 Feb 2014 12:52:11 -0500 Subject: [PATCH 11/17] more zone code --- perl-xCAT/xCAT/RemoteShellExp.pm | 5 +- perl-xCAT/xCAT/TableUtils.pm | 112 ++++++++++++++++++++++++++++--- perl-xCAT/xCAT/Zone.pm | 2 +- 3 files changed, 105 insertions(+), 14 deletions(-) diff --git a/perl-xCAT/xCAT/RemoteShellExp.pm b/perl-xCAT/xCAT/RemoteShellExp.pm index 4d9b3d34d..925465dc0 100755 --- a/perl-xCAT/xCAT/RemoteShellExp.pm +++ b/perl-xCAT/xCAT/RemoteShellExp.pm @@ -495,6 +495,7 @@ sub sendnodeskeys # in $HOME/.ssh/tmp/authorized_keys # copy to the node to the temp directory # scp $HOME/.ssh/tmp/authorized_keys to_userid@:/tmp/$to_userid/.ssh + # scp $HOME/.ssh/id_rsa.pub to_userid@:/tmp/$to_userid/.ssh # If you are going to enable ssh to ssh between nodes, then # scp $HOME/.ssh/id_rsa to that temp directory on the node # copy the script $HOME/.ssh/copy.sh to the node, it will do the @@ -607,11 +608,11 @@ sub sendnodeskeys my $spawncopyfiles; if ($ENV{'DSH_ENABLE_SSH'}) { # we will enable node to node ssh $spawncopyfiles= - "$remotecopy $home/.ssh/id_rsa $home/.ssh/copy.sh $home/.ssh/tmp/authorized_keys $to_userid\@$node:/tmp/$to_userid/.ssh "; + "$remotecopy $home/.ssh/id_rsa $home/.ssh/id_rsa.pub $home/.ssh/copy.sh $home/.ssh/tmp/authorized_keys $to_userid\@$node:/tmp/$to_userid/.ssh "; } else { # no node to node ssh ( don't send private key) $spawncopyfiles= - "$remotecopy $home/.ssh/copy.sh $home/.ssh/tmp/authorized_keys $to_userid\@$node:/tmp/$to_userid/.ssh "; + "$remotecopy $home/.ssh/id_rsa.pub $home/.ssh/copy.sh $home/.ssh/tmp/authorized_keys $to_userid\@$node:/tmp/$to_userid/.ssh "; } # send copy command unless ($sendkeys->spawn($spawncopyfiles)) diff --git a/perl-xCAT/xCAT/TableUtils.pm b/perl-xCAT/xCAT/TableUtils.pm index 9e3b452ad..b44cb6f27 100644 --- a/perl-xCAT/xCAT/TableUtils.pm +++ b/perl-xCAT/xCAT/TableUtils.pm @@ -19,6 +19,7 @@ if ($^O =~ /^aix/i) { use lib "$::XCATROOT/lib/perl"; use strict; require xCAT::Table; +require xCAT::Zone; use File::Path; #----------------------------------------------------------------------- @@ -271,7 +272,7 @@ sub bldnonrootSSHFiles Error: 0=good, 1=error Example: - xCAT::TableUtils->setupSSH(@target_nodes); + xCAT::TableUtils->setupSSH(@target_nodes,$expecttimeout); Comments: Does not setup known_hosts. Assumes automatically setup by SSH ( ssh config option StrictHostKeyChecking no should @@ -335,21 +336,21 @@ sub setupSSH $::REMOTE_SHELL = "/usr/bin/ssh"; my $rsp = {}; + # Get the home directory my $home = xCAT::Utils->getHomeDir($from_userid); $ENV{'DSH_FROM_USERID_HOME'} = $home; - if ($from_userid eq "root") { - # make the directory to hold keys to transfer to the nodes if (!-d $SSHdir) { mkpath("$SSHdir", { mode => 0755 }); } - # generates new keys for root, if they do not already exist + # generates new keys for root, if they do not already exist ~/.ssh + # nodes not used on this option but in there to preserve the interface my $rc= xCAT::RemoteShellExp->remoteshellexp("k",$::CALLBACK,$::REMOTE_SHELL,$n_str,$expecttimeout); @@ -374,7 +375,9 @@ else fi mkdir -p \$dest_dir cat /tmp/$to_userid/.ssh/authorized_keys >> \$home/.ssh/authorized_keys 2>&1 +cat /tmp/$to_userid/.ssh/id_rsa.pub >> \$home/.ssh/authorized_keys 2>&1 cp /tmp/$to_userid/.ssh/id_rsa \$home/.ssh/id_rsa 2>&1 +cp /tmp/$to_userid/.ssh/id_rsa.pub \$home/.ssh/id_rsa.pub 2>&1 chmod 0600 \$home/.ssh/id_* 2>&1 rm -f /tmp/$to_userid/.ssh/* 2>&1 rmdir \"/tmp/$to_userid/.ssh\" @@ -386,6 +389,7 @@ rmdir \"/tmp/$to_userid\" \n"; my $auth_key2=0; if ($from_userid eq "root") { + # this will put the root/.ssh/id_rsa.pub key in the authorized keys file to put on the node my $rc = xCAT::TableUtils->cpSSHFiles($SSHdir); if ($rc != 0) { # error @@ -418,13 +422,43 @@ rmdir \"/tmp/$to_userid\" \n"; xCAT::TableUtils->bldnonrootSSHFiles($from_userid); } - # send the keys to the nodes for root or some other id - # - # This environment variable determines whether to setup - # node to node ssh - # The nodes must be checked against the site.sshbetweennodes attribute # For root user and not to devices only to nodes if (($from_userid eq "root") && (!($ENV{'DEVICETYPE'}))) { + # Need to check if nodes are in a zone. + # If in a zone, then root ssh keys for the node will be taken from the zones ssh keys not ~/.ssh + # zones are only supported on nodes that are not a service node. + # Also for the call to RemoteShellExp, we must group the nodes that are in the same zone + + my $tab = xCAT::Table->new("zone"); + if ($tab) + { + # if we have zones, need to send the zone keys to each node in the zone + my @zones = $tab->getAllAttribs('zonename'); + $tab->close(); + if (@zones) { # we have zones defined + my $rc = xCAT::TableUtils->sendkeystozones($ref_nodes,$expecttimeout); + if ($rc != 0) + { + $rsp->{data}->[0] = "Error sending ssh keys to the zones.\n"; + xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); + return 1; + + } + return 0; + } + } else { + $rsp->{data}->[0] = "Could not open zone table.\n"; + xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); + return 1; + } + + + # if no zone table defined, do it the old way + # send the keys to the nodes for root or some other id + # + # The nodes must be checked against the site.sshbetweennodes attribute + # This site attribute determines whether to setup + # node to node ssh my $enablenodes; my $disablenodes; my @nodelist= split(",", $n_str); @@ -440,10 +474,10 @@ rmdir \"/tmp/$to_userid\" \n"; } } - my $cmd; if ($enablenodes) { # node on list to setup nodetonodessh chop $enablenodes; # remove last comma $ENV{'DSH_ENABLE_SSH'} = "YES"; + # send the keys to the nodes my $rc=xCAT::RemoteShellExp->remoteshellexp("s",$::CALLBACK,"/usr/bin/ssh",$enablenodes,$expecttimeout); if ($rc != 0) { @@ -452,8 +486,9 @@ rmdir \"/tmp/$to_userid\" \n"; } } - if ($disablenodes) { # node on list to setup nodetonodessh + if ($disablenodes) { # node on list to disable nodetonodessh chop $disablenodes; # remove last comma + # send the keys to the nodes my $rc=xCAT::RemoteShellExp->remoteshellexp("s",$::CALLBACK,"/usr/bin/ssh",$disablenodes,$expecttimeout); if ($rc != 0) { @@ -462,6 +497,7 @@ rmdir \"/tmp/$to_userid\" \n"; } } + } else { # from user is not root or it is a device , always send private key $ENV{'DSH_ENABLE_SSH'} = "YES"; my $rc=xCAT::RemoteShellExp->remoteshellexp("s",$::CALLBACK,"/usr/bin/ssh",$n_str,$expecttimeout); @@ -503,6 +539,60 @@ rmdir \"/tmp/$to_userid\" \n"; #-------------------------------------------------------------------------------- +=head3 sendkeystozones + + Transfers the ssh keys + for the root id on the nodes using the zone table. + + + Arguments: + Array of nodes + Timeout for expect call (optional) + Returns: + + Env Variables: $DSH_FROM_USERID, $DSH_TO_USERID, $DSH_REMOTE_PASSWORD + the ssh keys are transferred from the $DSH_FROM_USERID to the $DSH_TO_USERID + on the node(s). The DSH_REMOTE_PASSWORD and the DSH_FROM_USERID + must be obtained by + the calling script or from the xdsh client + + Globals: + $::XCATROOT , $::CALLBACK + Error: + 0=good, 1=error + Example: + xCAT::TableUtils->setupSSH(@target_nodes,$expecttimeout); + Comments: + Does not setup known_hosts. Assumes automatically + setup by SSH ( ssh config option StrictHostKeyChecking no should + be set in the ssh config file). + +=cut + +#-------------------------------------------------------------------------------- +sub sendkeystozones +{ + my ($class, $ref_nodes,$expecttimeout) = @_; + my @nodes=$ref_nodes; + my %zonehash =xCAT::Zone->getNodeZones(@nodes); + # for each zone in the zonehash + # if sshbetweennodes is yes + # $ENV{'DSH_ENABLE_SSH'} = "YES"; + # else + # unset $ENV{'DSH_ENABLE_SSH'} + # send the keys to the nodes + # my $rc=xCAT::RemoteShellExp->remoteshellexp("s",$::CALLBACK,"/usr/bin/ssh",$nodelist,$expecttimeout); + # if ($rc != 0) + # { + # $rsp->{data}->[0] = "remoteshellexp failed sending keys to $zonename."; + # xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); + # + # } + # } # endforeach + return 0; +} +#-------------------------------------------------------------------------------- + =head3 cpSSHFiles Builds authorized_keyfiles for root diff --git a/perl-xCAT/xCAT/Zone.pm b/perl-xCAT/xCAT/Zone.pm index 6c5c06eb6..e974a366d 100644 --- a/perl-xCAT/xCAT/Zone.pm +++ b/perl-xCAT/xCAT/Zone.pm @@ -249,7 +249,7 @@ sub iszonedefined -> {sshkeydir} -> directory containing ssh RSA keys -> {defaultzone} -> is it the default zone Example: - my %zonehash =xCAT::Zone->getNodeZones($nodelist); + my %zonehash =xCAT::Zone->getNodeZones(@nodearray); Rules: If the nodes nodelist.zonename attribute is a zonename, it is assigned to that zone If the nodes nodelist.zonename attribute is undefined: From 542259583c57ed93de00590255eeaa4f32055162 Mon Sep 17 00:00:00 2001 From: linggao Date: Thu, 20 Feb 2014 09:45:15 -0500 Subject: [PATCH 12/17] xCAT baremetal driver for OpenStack --- .../lib/perl/xCAT_plugin/openstack.pm | 778 ++++++++++++++++++ .../lib/python/xcat/__init__.py | 0 .../lib/python/xcat/openstack/__init__.py | 0 .../xcat/openstack/baremetal/__init__.py | 17 + .../python/xcat/openstack/baremetal/driver.py | 255 ++++++ .../xcat/openstack/baremetal/exception.py | 41 + .../xcat/openstack/baremetal/power_states.py | 9 + .../xcat/openstack/baremetal/xcat_driver.py | 257 ++++++ .../pods/man1/opsaddbmnode.1.pod | 75 ++ .../pods/man1/opsaddimage.1.pod | 65 ++ .../openstack/postscripts/config_ops_bm_node | 187 +++++ .../postscripts/deconfig_ops_bm_node | 65 ++ .../xCAT-OpenStack-baremetal.spec | 103 +++ xCAT-OpenStack-baremetal/xpod2man | 213 +++++ 14 files changed, 2065 insertions(+) create mode 100644 xCAT-OpenStack-baremetal/lib/perl/xCAT_plugin/openstack.pm create mode 100644 xCAT-OpenStack-baremetal/lib/python/xcat/__init__.py create mode 100644 xCAT-OpenStack-baremetal/lib/python/xcat/openstack/__init__.py create mode 100644 xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/__init__.py create mode 100644 xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/driver.py create mode 100644 xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/exception.py create mode 100644 xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/power_states.py create mode 100644 xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/xcat_driver.py create mode 100644 xCAT-OpenStack-baremetal/pods/man1/opsaddbmnode.1.pod create mode 100644 xCAT-OpenStack-baremetal/pods/man1/opsaddimage.1.pod create mode 100755 xCAT-OpenStack-baremetal/share/xcat/openstack/postscripts/config_ops_bm_node create mode 100755 xCAT-OpenStack-baremetal/share/xcat/openstack/postscripts/deconfig_ops_bm_node create mode 100644 xCAT-OpenStack-baremetal/xCAT-OpenStack-baremetal.spec create mode 100755 xCAT-OpenStack-baremetal/xpod2man diff --git a/xCAT-OpenStack-baremetal/lib/perl/xCAT_plugin/openstack.pm b/xCAT-OpenStack-baremetal/lib/perl/xCAT_plugin/openstack.pm new file mode 100644 index 000000000..6e591945a --- /dev/null +++ b/xCAT-OpenStack-baremetal/lib/perl/xCAT_plugin/openstack.pm @@ -0,0 +1,778 @@ +# IBM(c) 2013 EPL license http://www.eclipse.org/legal/epl-v10.html +package xCAT_plugin::openstack; +BEGIN +{ + $::XCATROOT = $ENV{'XCATROOT'} ? $ENV{'XCATROOT'} : '/opt/xcat'; +} +use lib "$::XCATROOT/lib/perl"; +use xCAT::Utils; +use xCAT::TableUtils; +use xCAT::SvrUtils; +use xCAT::NetworkUtils; +use xCAT::Table; +use Data::Dumper; +use File::Path; +use File::Copy; +use Getopt::Long; +Getopt::Long::Configure("bundling"); +Getopt::Long::Configure("pass_through"); + + +sub handled_commands { + return { + opsaddbmnode => "openstack", #external command + opsaddimage => "openstack", #external command + deploy_ops_bm_node => "openstack", #internal command called from the baremetal driver + cleanup_ops_bm_node => "openstack", #internal command called from the baremetal driver + } +} + +sub process_request { + my $request = shift; + my $callback = shift; + my $doreq = shift; + my $command = $request->{command}->[0]; + + if ($command eq "opsaddbmnode") { + return opsaddbmnode($request, $callback, $doreq); + } elsif ($command eq "opsaddimage") { + return opsaddimage($request, $callback, $doreq); + } elsif ($command eq "deploy_ops_bm_node") { + return deploy_ops_bm_node($request, $callback, $doreq); + } elsif ($command eq "cleanup_ops_bm_node") { + return cleanup_ops_bm_node($request, $callback, $doreq); + } else { + $callback->({error=>["Unsupported command: $command."],errorcode=>[1]}); + return 1; + } +} + + +#------------------------------------------------------------------------------- + +=head3 opsaddbmnode + This function takes the xCAT nodes and register them + as the OpenStack baremetal nodes +=cut + +#------------------------------------------------------------------------------- +sub opsaddbmnode { + my $request = shift; + my $callback = shift; + my $doreq = shift; + + @ARGV = @{$request->{arg}}; + Getopt::Long::Configure("bundling"); + Getopt::Long::Configure("no_pass_through"); + + my $help; + my $version; + my $host; + + if(!GetOptions( + 'h|help' => \$help, + 'v|version' => \$version, + 's=s' => \$host, + )) + { + &opsaddbmnode_usage($callback); + return 1; + } + # display the usage if -h or --help is specified + if ($help) { + &opsaddbmnode_usage($callback); + return 0; + } + # display the version statement if -v or --verison is specified + if ($version) + { + my $rsp={}; + $rsp->{data}->[0]= xCAT::Utils->Version(); + $callback->($rsp); + return 0; + } + + if (!$request->{node}) { + $callback->({error=>["Please specify at least one node."],errorcode=>[1]}); + return 1; + } + if (!$host) { + $callback->({error=>["Please specify the OpenStack compute host name with -s flag."],errorcode=>[1]}); + return 1; + } + + my $nodes = $request->{node}; + + #get bmc info for the nodes + my $ipmitab = xCAT::Table->new("ipmi", -create => 0); + my $tmp_ipmi; + if ($ipmitab) { + $tmp_ipmi = $ipmitab->getNodesAttribs($nodes, ['bmc','username', 'password'], prefetchcache=>1); + #print Dumper($tmp_ipmi); + } else { + $callback->({error=>["Cannot open the ipmi table."],errorcode=>[1]}); + return 1; + } + #get mac for the nodes + my $mactab = xCAT::Table->new("mac", -create => 0); + my $tmp_mac; + if ($mactab) { + $tmp_mac = $mactab->getNodesAttribs($nodes, ['mac'], prefetchcache=>1); + #print Dumper($tmp_mac); + } else { + $callback->({error=>["Cannot open the mac table."],errorcode=>[1]}); + return 1; + } + + #get cpu, memory and disk info for the nodes + my $hwinvtab = xCAT::Table->new("hwinv", -create => 0); + my $tmp_hwinv; + if ($hwinvtab) { + $tmp_hwinv = $hwinvtab->getNodesAttribs($nodes, ['cpucount', 'memory', 'disksize'], prefetchcache=>1); + #print Dumper($tmp_hwinv); + } else { + $callback->({error=>["Cannot open the hwinv table."],errorcode=>[1]}); + return 1; + } + + foreach my $node (@$nodes) { + #collect the node infomation needed for each node, some info + #may not be defined in the xCAT db + my ($bmc, $bmc_user, $bmc_password, $mac, $cpu, $memory, $disk); + my $ref_ipmi = $tmp_ipmi->{$node}->[0]; + if ($ref_ipmi) { + if (exists($ref_ipmi->{bmc})) { + $bmc = $ref_ipmi->{bmc}; + } + if (exists($ref_ipmi->{username})) { + $bmc_user = $ref_ipmi->{username}; + } + if (exists($ref_ipmi->{password})) { + $bmc_password = $ref_ipmi->{password}; + } + } + + $ref_mac = $tmp_mac->{$node}->[0]; + if ($ref_mac) { + if (exists($ref_mac->{mac})) { + $mac = $ref_mac->{mac}; + } + } + + $ref_hwinv = $tmp_hwinv->{$node}->[0]; + if ($ref_hwinv) { + if (exists($ref_hwinv->{cpucount})) { + $cpu = $ref_hwinv->{cpucount}; + } + if (exists($ref_hwinv->{memory})) { + $memory = $ref_hwinv->{memory}; + #TODO: what if the unit is not in MB? We need to convert it to MB + $memory =~ s/MB|mb//g; + } + if (exists($ref_hwinv->{disksize})) { + #The format of the the disk size is: sda:250GB,sdb:250GB or just 250GB + #We need to get the size of the first one + $disk = $ref_hwinv->{disksize}; + my @a = split(',', $disk); + my @b = split(':', $a[0]); + if (@b > 1) { + $disk = $b[1]; + } else { + $disk = $b[0]; + } + print "a=@a, b=@b\n"; + #TODO: what if the unit is not in GB? We need to convert it to MB + $disk =~ s/GB|gb//g; + } + } + + #some info are mendatory + if (!$mac) { + $callback->({error=>["Mac address is not defined in the mac table for node $node."],errorcode=>[1]}); + next; + } + if (!$cpu) { + #default cpu count is 1 + $cpu = 1; + } + if (!$memory) { + #default memory size is 1024MB=1GB + $memory = 1024; + } + if (!$disk) { + #default disk size is 1GB + $disk = 1; + } + + #print "$bmc, $bmc_user, $bmc_password, $mac, $cpu, $memory, $disk\n"; + + #call OpenStack command to add the node into the OpenStack as + #a baremetal node. + my $cmd_tmp = "nova baremetal-node-create"; + if ($bmc) { + #make sure it is an ip address + if (($bmc !~ /\d+\.\d+\.\d+\.\d+/) && ($bmc !~ /:/)) { + $bmc = xCAT::NetworkUtils->getipaddr($bmc); + } + $cmd_tmp .= " --pm_address=$bmc"; + } + if ($bmc_user) { + $cmd_tmp .= " --pm_user=$bmc_user"; + } + if ($bmc_password) { + $cmd_tmp .= " --pm_password=$bmc_password"; + } + $cmd_tmp .= " $host $cpu $memory $disk $mac"; + + my $cmd = qq~source \~/openrc;$cmd_tmp~; + #print "cmd=$cmd\n"; + my $output = + xCAT::InstUtils->xcmd($callback, $doreq, "xdsh", [$host], $cmd, 0); + if ($::RUNCMD_RC != 0) { + my $rsp; + push @{$rsp->{data}}, "OpenStack creating baremetal node $node:"; + push @{$rsp->{data}}, "$output"; + xCAT::MsgUtils->message("E", $rsp, $callback); + } + } +} + + +#------------------------------------------------------------------------------- + +=head3 opsaddimage + This function takes the xCAT nodes and register them + as the OpenStack baremetal nodes +=cut + +#------------------------------------------------------------------------------- +sub opsaddimage { + my $request = shift; + my $callback = shift; + my $doreq = shift; + + @ARGV = @{$request->{arg}}; + Getopt::Long::Configure("bundling"); + Getopt::Long::Configure("no_pass_through"); + + my $help; + my $version; + #my $cloud; + my $ops_img_names; + my $controller; + + if(!GetOptions( + 'h|help' => \$help, + 'v|version' => \$version, + 'c=s' => \$controller, + 'n=s' => \$ops_img_names, + )) + { + &opsaddimage_usage($callback); + return 1; + } + # display the usage if -h or --help is specified + if ($help) { + &opsaddimage_usage($callback); + return 0; + } + # display the version statement if -v or --verison is specified + if ($version) + { + my $rsp={}; + $rsp->{data}->[0]= xCAT::Utils->Version(); + $callback->($rsp); + return 0; + } + + if (@ARGV ==0) { + $callback->({error=>["Please specify an image name or a list of image names."],errorcode=>[1]}); + return 1; + } + + #make sure the input cloud name is valid. + #if (!$cloud) { + # $callback->({error=>["Please specify the name of the cloud with -c flag."],errorcode=>[1]}); + # return 1; + #} else { + # my $cloudstab = xCAT::Table->new('clouds', -create => 0); + # my @et = $cloudstab->getAllAttribs('name', 'controller'); + # if(@et) { + # foreach my $tmp_et (@et) { + # if ($tmp_et->{name} eq $cloud) { + # if ($tmp_et->{controller}) { + # $controller = $tmp_et->{controller}; + # last; + # } else { + # $callback->({error=>["Please specify the controller in the clouds table for the cloud: $cloud."],errorcode=>[1]}); + # return 1; + # } + # } + # } + # } + + if (!$controller) { + $callback->({error=>["Please specify the OpenStack controller node name with -c."],errorcode=>[1]}); + return 1; + } + #} + + #make sure that the images from the command are valid image names + @images = split(',', $ARGV[0]); + @new_names = (); + if ($ops_img_names) { + @new_names = split(',', $ops_img_names); + } + #print "images=@images, new image names=@new_names, controller=$controller\n"; + + my $image_hash = {}; + my $osimgtab = xCAT::Table->new('osimage', -create => 0); + my @et = $osimgtab->getAllAttribs('imagename'); + if(@et) { + foreach my $tmp_et (@et) { + $image_hash->{$tmp_et->{imagename}}{'xCAT'} = 1; + } + } + my @bad_images; + foreach my $image (@images) { + if (!exists($image_hash->{$image})) { + push @bad_images, $image; + } + } + if (@bad_images > 0) { + $callback->({error=>["The following images cannot be found in xCAT osimage table:\n " . join("\n ", @bad_images) . "\n"],errorcode=>[1]}); + return 1; + } + + my $index=0; + foreach my $image (@images) { + my $new_name = shift(@new_names); + if (!$new_name) { + $new_name = $image; #the default new name is xCAT image name + } + my $cmd_tmp = "glance image-create --name $new_name --public --disk-format qcow2 --container-format bare --property xcat_image_name=\'$image\' < /tmp/$image.qcow2"; + + my $cmd = qq~touch /tmp/$image.qcow2;source \~/openrc;$cmd_tmp;rm /tmp/$image.qcow2~; + #print "cmd=$cmd\ncontroller=$controller\n"; + my $output = + xCAT::InstUtils->xcmd($callback, $doreq, "xdsh", [$controller], $cmd, 0); + if ($::RUNCMD_RC != 0) { + my $rsp; + push @{$rsp->{data}}, "OpenStack creating image $new_name:"; + push @{$rsp->{data}}, "$output"; + xCAT::MsgUtils->message("E", $rsp, $callback); + } + } +} + +#------------------------------------------------------------------------------- + +=head3 deploy_ops_bm_node + This is a internel command called by OpenStack xCAT-baremetal driver. + It prepares the node by adding the config_ops_bm_node postbootscript + to the postscript table for the node, then call nodeset and then boot + the node up. +=cut + +#------------------------------------------------------------------------------- +sub deploy_ops_bm_node { + my $request = shift; + my $callback = shift; + my $doreq = shift; + + @ARGV = @{$request->{arg}}; + Getopt::Long::Configure("bundling"); + Getopt::Long::Configure("no_pass_through"); + + my $node = $request->{node}->[0]; + + my $help; + my $version; + my $img_name; + my $hostname; + my $fixed_ip; + my $netmask; + + if(!GetOptions( + 'h|help' => \$help, + 'v|version' => \$version, + 'image=s' => \$img_name, + 'host=s' => \$hostname, + 'ip=s' => \$fixed_ip, + 'mask=s' => \$netmask, + )) + { + &deploy_ops_bm_node_usage($callback); + return 1; + } + # display the usage if -h or --help is specified + if ($help) { + &deploy_ops_bm_node_usage($callback); + return 0; + } + # display the version statement if -v or --verison is specified + if ($version) + { + my $rsp={}; + $rsp->{data}->[0]= xCAT::Utils->Version(); + $callback->($rsp); + return 0; + } + print "node=$node, image=$img_name, host=$hostname, ip=$fixed_ip, mask=$netmask\n"; + + #validate the image name + my $osimagetab = xCAT::Table->new('osimage', -create=>1); + my $ref = $osimagetab->getAttribs({imagename => $img_name}, 'imagename'); + if (!$ref) { + $callback->({error=>["Invalid image name: $img_name."],errorcode=>[1]}); + return 1; + } + + #check if the fixed ip is within the xCAT management network. + #get the master ip address for the node then check if the master ip and + #the OpenStack fixed_ip are on the same subnet. + #my $same_nw = 0; + #my $master = xCAT::TableUtils->GetMasterNodeName($node); + #my $master_ip = xCAT::NetworkUtils->toIP($master); + #if (xCAT::NetworkUtils::isInSameSubnet($master_ip, $fixed_ip, $netmask, 0)) { + # $same_nw = 1; + #} + + + #add config_ops_bm_node to the node's postbootscript + my $script = "config_ops_bm_node $hostname $fixed_ip $netmask"; + add_postscript($callback, $node, $script); + + #run nodeset + my $cmd = qq~osimage=$img_name~; + my $output = xCAT::Utils->runxcmd( + {command => ["nodeset"], + node => [$node], + arg => [$cmd]}, + $doreq, -1, 1); + if ($::RUNCMD_RC != 0) { + my $rsp; + push @{$rsp->{data}}, "nodeset:"; + push @{$rsp->{data}}, "$output"; + xCAT::MsgUtils->message("E", $rsp, $callback); + return 1; + } + + #set boot order, assuming it is ipmi nodes for now + # TODO: add support for system power hw. + my $cmd = qq~net~; + my $output = xCAT::Utils->runxcmd( + {command => ["rsetboot"], + node => [$node], + arg => [$cmd]}, + $doreq, -1, 1); + if ($::RUNCMD_RC != 0) { + my $rsp; + push @{$rsp->{data}}, "rsetboot:"; + push @{$rsp->{data}}, "$output"; + xCAT::MsgUtils->message("E", $rsp, $callback); + } + + #reboot the node + my $cmd = qq~boot~; + my $output = xCAT::Utils->runxcmd( + {command => ["rpower"], + node => [$node], + arg => [$cmd]}, + $doreq, -1, 1); + if ($::RUNCMD_RC != 0) { + my $rsp; + push @{$rsp->{data}}, "rpower:"; + push @{$rsp->{data}}, "$output"; + xCAT::MsgUtils->message("E", $rsp, $callback); + return 1; + } +} + + +#------------------------------------------------------------------------------- + +=head3 cleanup_ops_bm_node + This is a internel command called by OpenStack xCAT-baremetal driver. + It undoes all the changes made by deploy_ops_bm_node command. It removes + the config_ops_bmn_ode postbootscript from the postscript table for the + node, removes the alias ip and then power off the node. +=cut + +#------------------------------------------------------------------------------- +sub cleanup_ops_bm_node { + my $request = shift; + my $callback = shift; + my $doreq = shift; + + @ARGV = @{$request->{arg}}; + Getopt::Long::Configure("bundling"); + Getopt::Long::Configure("no_pass_through"); + + my $node = $request->{node}->[0]; + + my $help; + my $version; + my $fixed_ip; + + if(!GetOptions( + 'h|help' => \$help, + 'v|version' => \$version, + 'ip=s' => \$fixed_ip, + )) + { + &cleanup_ops_bm_node_usage($callback); + return 1; + } + # display the usage if -h or --help is specified + if ($help) { + &cleanup_ops_bm_node_usage($callback); + return 0; + } + # display the version statement if -v or --verison is specified + if ($version) + { + my $rsp={}; + $rsp->{data}->[0]= xCAT::Utils->Version(); + $callback->($rsp); + return 0; + } + #print "node=$node, ip=$fixed_ip\n"; + + #removes the config_ops_bm_node postbootscript from the postscripts table + remove_postscript($callback, $node, "config_ops_bm_node"); + + + #run updatenode to remove the ip alias + my $cmd = qq~-P deconfig_ops_bm_node $fixed_ip~; + my $output = xCAT::Utils->runxcmd( + {command => ["updatenode"], + node => [$node], + arg => [$cmd]}, + $doreq, -1, 1); + if ($::RUNCMD_RC != 0) { + my $rsp; + push @{$rsp->{data}}, "updatenode:"; + push @{$rsp->{data}}, "$output"; + xCAT::MsgUtils->message("E", $rsp, $callback); + } + + #turn the node power off + $ssh_ok = 0; + my $cmd = qq~stat~; + my $output = xCAT::Utils->runxcmd( + {command => ["rpower"], + node => [$node], + arg => [$cmd]}, + $doreq, -1, 1); + + if ($::RUNCMD_RC != 0) { + my $rsp; + push @{$rsp->{data}}, "rpower:"; + push @{$rsp->{data}}, "$output"; + xCAT::MsgUtils->message("E", $rsp, $callback); + return 1; + } else { + if ($output !~ /: off/) { + #power off the node + my $cmd = qq~off~; + my $output = xCAT::Utils->runxcmd( + {command => ["rpower"], + node => [$node], + arg => [$cmd]}, + $doreq, -1, 1); + if ($::RUNCMD_RC != 0) { + my $rsp; + push @{$rsp->{data}}, "rpower:"; + push @{$rsp->{data}}, "$output"; + xCAT::MsgUtils->message("E", $rsp, $callback); + return 1; + } + } + } +} + +#------------------------------------------------------- +=head3 add_postscript + + It adds the 'config_ops_bm_node' postbootscript to the + postscript table for the given node. + +=cut +#------------------------------------------------------- +sub add_postscript { + my $callback=shift; + my $node=shift; + my $script=shift; + print "script=$script\n"; + + my $posttab=xCAT::Table->new("postscripts", -create =>1); + my %setup_hash; + my $ref = $posttab->getNodeAttribs($node,[qw(postscripts postbootscripts)]); + my $found=0; + if ($ref) { + if (exists($ref->{postscripts})) { + my @a = split(/,/, $ref->{postscripts}); + if (grep(/^config_ops_bm_node/, @a)) { + $found = 1; + if (!grep(/^$script$/, @a)) { + #not exact match, must replace it with the new script + for (@a) { + s/^config_ops_bm_node.*$/$script/; + } + my $new_post = join(',', @a); + $setup_hash{$node}={postscripts=>"$new_post"}; + } + } + } + + + if (exists($ref->{postbootscripts})) { + my $post=$ref->{postbootscripts}; + my @old_a=split(',', $post); + if (grep(/^config_ops_bm_node/, @old_a)) { + if (!grep(/^$script$/, @old_a)) { + #not exact match, will replace it with new script + for (@old_a) { + s/^config_ops_bm_node.*$/$script/; + } + my $new_postboot = join(',', @old_a); + $setup_hash{$node}={postbootscripts=>"$new_postboot"}; + } + } else { + if (! $found) { + $setup_hash{$node}={postbootscripts=>"$post,$script"}; + } + } + } else { + if (! $found) { + $setup_hash{$node}={postbootscripts=>"$script"}; + } + } + } else { + $setup_hash{$node}={postbootscripts=>"$script"}; + } + + if (keys(%setup_hash) > 0) { + $posttab->setNodesAttribs(\%setup_hash); + } + + return 0; +} + +#------------------------------------------------------- +=head3 remove_postscript + + It removes the 'config_ops_bm_node' postbootscript from + the postscript table for the given node. + +=cut +#------------------------------------------------------- +sub remove_postscript { + my $callback=shift; + my $node=shift; + my $script=shift; + + my $posttab=xCAT::Table->new("postscripts", -create =>1); + my %setup_hash; + my $ref = $posttab->getNodeAttribs($node,[qw(postscripts postbootscripts)]); + my $found=0; + if ($ref) { + if (exists($ref->{postscripts})) { + my @old_a = split(/,/, $ref->{postscripts}); + my @new_a = grep(!/^$script/, @old_a); + if (scalar(@new_a) != scalar(@old_a)) { + my $new_post = join(',', @new_a); + $setup_hash{$node}={postscripts=>"$new_post"}; + } + } + + if (exists($ref->{postbootscripts})) { + my @old_b = split(/,/, $ref->{postbootscripts}); + my @new_b = grep(!/^$script/, @old_b); + if (scalar(@new_b) != scalar(@old_b)) { + my $new_post = join(',', @new_b); + $setup_hash{$node}={postbootscripts=>"$new_post"}; + } + } + + } + + if (keys(%setup_hash) > 0) { + $posttab->setNodesAttribs(\%setup_hash); + } + + return 0; +} + + +#------------------------------------------------------------------------------- + +=head3 opsaddbmnode_usage + The usage text for opsaddbmnode command. +=cut + +#------------------------------------------------------------------------------- +sub opsaddbmnode_usage { + my $cb=shift; + my $rsp={}; + + $rsp->{data}->[0]= "Usage: opsaddbmnode -h"; + $rsp->{data}->[1]= " opsaddbmnode -v"; + $rsp->{data}->[2]= " opsaddbmnode -s "; + $cb->($rsp); +} + + +#------------------------------------------------------------------------------- + +=head3 opsaddimage_usage + The usage text for opsaddimage command. +=cut + +#------------------------------------------------------------------------------- +sub opsaddimage_usage { + my $cb=shift; + my $rsp={}; + + $rsp->{data}->[0]= "Usage: opsaddimage -h"; + $rsp->{data}->[1]= " opsaddimage -v"; + $rsp->{data}->[2]= " opsaddimage [-n -c "; + $cb->($rsp); +} + +#------------------------------------------------------------------------------- + +=head3 deploy_ops_bm_node_usage + The usage text for deploy_ops_bm_node command. +=cut + +#------------------------------------------------------------------------------- +sub deploy_ops_bm_node_usage { + my $cb=shift; + my $rsp={}; + + $rsp->{data}->[0]= "Usage: deploy_ops_bm_node -h"; + $rsp->{data}->[1]= " deploy_ops_bm_node -v"; + $rsp->{data}->[2]= " deploy_ops_bm_node --image --host --ip --mask "; + $cb->($rsp); +} + +#------------------------------------------------------------------------------- + +=head3 cleanup_ops_bm_node_usage + The usage text cleanup_ops_bm_node command. +=cut + +#------------------------------------------------------------------------------- +sub cleanup_ops_bm_node_usage { + my $cb=shift; + my $rsp={}; + + $rsp->{data}->[0]= "Usage: cleanup_ops_bm_node -h"; + $rsp->{data}->[1]= " cleanup_ops_bm_node -v"; + $rsp->{data}->[2]= " cleanup_ops_bm_node [--ip ]"; + $cb->($rsp); +} + +1; diff --git a/xCAT-OpenStack-baremetal/lib/python/xcat/__init__.py b/xCAT-OpenStack-baremetal/lib/python/xcat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/__init__.py b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/__init__.py b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/__init__.py new file mode 100644 index 000000000..b0997e201 --- /dev/null +++ b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2012 NTT DOCOMO, INC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from xcat.openstack.baremetal import driver + +BareMetalDriver = driver.xCATBareMetalDriver diff --git a/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/driver.py b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/driver.py new file mode 100644 index 000000000..a0972497d --- /dev/null +++ b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/driver.py @@ -0,0 +1,255 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# coding=utf-8 + +""" +A driver for Bare-metal platform. +""" + +from oslo.config import cfg + +from nova.compute import power_state +from nova import context as nova_context +from nova import exception +from nova.openstack.common import excutils +from nova.openstack.common.gettextutils import _ +from nova.openstack.common import importutils +from nova.openstack.common import jsonutils +from nova.openstack.common import log as logging +from nova.virt.baremetal import baremetal_states +from nova.virt.baremetal import db +from nova.virt.baremetal import driver as bm_driver +from nova.virt.baremetal import utils as bm_utils +from nova.virt import driver +from nova.virt import firewall +from nova.virt.libvirt import imagecache +from xcat.openstack.baremetal import xcat_driver +from xcat.openstack.baremetal import exception as xcat_exception +from xcat.openstack.baremetal import power_states + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF +CONF.import_opt('use_ipv6', 'nova.netconf') + + +class xCATBareMetalDriver(bm_driver.BareMetalDriver): + """BareMetal hypervisor driver.""" + + def __init__(self, virtapi, read_only=False): + super(xCATBareMetalDriver, self).__init__(virtapi) + self.xcat = xcat_driver.xCAT() + + def _get_xCAT_image_name(self, image_meta): + prop = image_meta.get('properties') + xcat_image_name = prop.get('xcat_image_name') + if xcat_image_name: + return xcat_image_name + else: + raise xcat_exception.xCATInvalidImageError(image=image_meta.get('name')) + + def spawn(self, context, instance, image_meta, injected_files, + admin_password, network_info=None, block_device_info=None): + """ + Create a new instance/VM/domain on the virtualization platform. + + Once this successfully completes, the instance should be + running (power_state.RUNNING). + + If this fails, any partial instance should be completely + cleaned up, and the virtualization platform should be in the state + that it was before this call began. + + :param context: security context + :param instance: Instance object as returned by DB layer. + This function should use the data there to guide + the creation of the new instance. + :param image_meta: image object returned by nova.image.glance that + defines the image from which to boot this instance + :param injected_files: User files to inject into instance. + :param admin_password: Administrator password to set in instance. + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param block_device_info: Information about block devices to be + attached to the instance. + """ + import pdb + pdb.set_trace() + node_uuid = self._require_node(instance) + node = db.bm_node_associate_and_update(context, node_uuid, + {'instance_uuid': instance['uuid'], + 'instance_name': instance['hostname'], + 'task_state': baremetal_states.BUILDING}) + + try: + self._plug_vifs(instance, network_info, context=context) + self._attach_block_devices(instance, block_device_info) + self._start_firewall(instance, network_info) + + macs = self.macs_for_instance(instance) + nodename = self.xcat.get_xcat_node_name(macs) + imagename = self._get_xCAT_image_name(image_meta) + hostname = instance.get('hostname') + + #get the network information for the new node + interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6) + if CONF.use_ipv6: + fixed_ip = interfaces[0].get('address_v6') + netmask = interfaces[0].get('netmask_v6') + gateway = interfaces[0].get('gateway_v6') + else: + fixed_ip = interfaces[0].get('address') + netmask = interfaces[0].get('netmask') + gateway = interfaces[0].get('gateway') + #convert netmask from IPAddress to unicode string + if netmask: + netmask = unicode(netmask) + + #let xCAT install it + bm_driver._update_state(context, node, instance, baremetal_states.DEPLOYING) + self.xcat.deploy_node(nodename, imagename, hostname, fixed_ip, netmask, gateway) + bm_driver._update_state(context, node, instance, baremetal_states.ACTIVE) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_("Error occured while deploying instance %(instance)s " + "on baremetal node %(node)s: %(error)s") % + {'instance': instance['uuid'], + 'node': node['uuid'], + 'error':str(e)}) + bm_driver._update_state(context, node, instance, baremetal_states.ERROR) + + def reboot(self, context, instance, network_info, reboot_type, + block_device_info=None, bad_volumes_callback=None): + """Reboot the specified instance. + + After this is called successfully, the instance's state + goes back to power_state.RUNNING. The virtualization + platform should ensure that the reboot action has completed + successfully even in cases in which the underlying domain/vm + is paused or halted/stopped. + + :param instance: Instance object as returned by DB layer. + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param reboot_type: Either a HARD or SOFT reboot + :param block_device_info: Info pertaining to attached volumes + :param bad_volumes_callback: Function to handle any bad volumes + encountered + """ + try: + node = bm_driver._get_baremetal_node_by_instance_uuid(instance['uuid']) + macs = self.macs_for_instance(instance) + nodename = self.xcat.get_xcat_node_name(macs) + self.xcat.reboot_node(nodename) + bm_driver._update_state(context, node, instance, baremetal_states.RUNNING) + except xcat_exception.xCATCommandError as e: + with excutils.save_and_reraise_exception(): + LOG.error(_("Error occured while rebooting instance %(instance)s " + "on baremetal node %(node)s: %(error)s") % + {'instance': instance['uuid'], + 'node': node['uuid'], + 'error':str(e)}) + bm_driver._update_state(context, node, instance, baremetal_states.ERROR) + + def destroy(self, context, instance, network_info, block_device_info=None, + destroy_disks=True): + """Destroy (shutdown and delete) the specified instance. + + If the instance is not found (for example if networking failed), this + function should still succeed. It's probably a good idea to log a + warning in that case. + + :param context: security context + :param instance: Instance object as returned by DB layer. + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param block_device_info: Information about block devices that should + be detached from the instance. + :param destroy_disks: Indicates if disks should be destroyed + """ + #import pdb + #pdb.set_trace() + try: + node = bm_driver._get_baremetal_node_by_instance_uuid(instance['uuid']) + + except exception.InstanceNotFound: + LOG.warning(_("Destroy function called on a non-existing instance %s") + % instance['uuid']) + return + + try: + macs = self.macs_for_instance(instance) + nodename = self.xcat.get_xcat_node_name(macs) + interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6) + fixed_ip=None + if interfaces and interfaces[0]: + if CONF.use_ipv6: + fixed_ip = interfaces[0].get('address_v6') + else: + fixed_ip = interfaces[0].get('address') + if fixed_ip: + self.xcat.cleanup_node(nodename, fixed_ip) + else: + self.xcat.cleanup_node(nodename) + except Exception as e: + #just log it and move on + LOG.warning(_("Destroy called with xCAT error:" + str(e))) + + try: + self._detach_block_devices(instance, block_device_info) + self._stop_firewall(instance, network_info) + self._unplug_vifs(instance, network_info) + + bm_driver._update_state(context, node, None, baremetal_states.DELETED) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_("Error occurred while destroying instance %s: %s") + % (instance['uuid'], str(e))) + bm_driver._update_state(context, node, instance, + baremetal_states.ERROR) + + def power_off(self, instance, node=None): + """Power off the specified instance.""" + macs = self.macs_for_instance(instance) + nodename = self.xcat.get_xcat_node_name(macs) + self.xcat.power_off_node(nodename) + + + def power_on(self, context, instance, network_info, block_device_info=None, + node=None): + """Power on the specified instance.""" + macs = self.macs_for_instance(instance) + nodename = self.xcat.get_xcat_node_name(macs) + self.xcat.power_on_node(nodename) + + + def get_console_output(self, instance): + pass + + def get_info(self, instance): + """Get the current status of an instance, by name (not ID!) + + Returns a dict containing: + :state: the running state, one of the power_state codes + :max_mem: (int) the maximum memory in KBytes allowed + :mem: (int) the memory in KBytes used by the domain + :num_cpu: (int) the number of virtual CPUs for the domain + :cpu_time: (int) the CPU time used in nanoseconds + """ + + node = bm_driver._get_baremetal_node_by_instance_uuid(instance['uuid']) + macs = self.macs_for_instance(instance) + nodename = self.xcat.get_xcat_node_name(macs) + + ps = self.xcat.get_node_power_state(nodename) + if ps == power_states.ON: + pstate = power_state.RUNNING + elif ps == power_states.OFF: + pstate = power_state.SHUTDOWN + else: + pstate = power_state.NOSTATE + + return {'state': pstate, + 'max_mem': node['memory_mb'], + 'mem': node['memory_mb'], + 'num_cpu': node['cpus'], + 'cpu_time': 0} diff --git a/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/exception.py b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/exception.py new file mode 100644 index 000000000..38ce5bf60 --- /dev/null +++ b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/exception.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +"""xCAT baremtal exceptions. +""" + +import functools +import sys + +from oslo.config import cfg +import webob.exc + +from nova.openstack.common import excutils +from nova.openstack.common.gettextutils import _ +from nova.openstack.common import log as logging +from nova import safe_utils +from nova import exception as nova_exception + +LOG = logging.getLogger(__name__) + +class xCATException(Exception): + errmsg = _("xCAT general exception") + + def __init__(self, errmsg=None, **kwargs): + if not errmsg: + errmsg = self.errmsg + errmsg = errmsg % kwargs + + super(xCATException, self).__init__(errmsg) + +class xCATCommandError(xCATException): + errmsg = _("Error returned when calling xCAT command %(cmd)s" + " for node %(node)s:%(error)s") + +class xCATInvalidImageError(xCATException): + errmsg = _("The image %(image)s is not an xCAT image") + +class xCATDeploymentFailure(xCATException): + errmsg = _("xCAT node deployment failed for node %(node)s:%(error)s") + +class xCATRebootFailure(xCATException): + errmsg = _("xCAT node rebooting failed for node %(node)s:%(error)s") diff --git a/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/power_states.py b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/power_states.py new file mode 100644 index 000000000..47e153137 --- /dev/null +++ b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/power_states.py @@ -0,0 +1,9 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +""" +Possible xCAT node power states. +""" + +OFF = 'off' +ON = 'on' +ERROR = 'error' diff --git a/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/xcat_driver.py b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/xcat_driver.py new file mode 100644 index 000000000..b8b3ac56a --- /dev/null +++ b/xCAT-OpenStack-baremetal/lib/python/xcat/openstack/baremetal/xcat_driver.py @@ -0,0 +1,257 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# coding=utf-8 + + +""" +Baremetal xCAT power manager. +""" + +import os +import sys +import stat +from oslo.config import cfg +import datetime + +from nova import context as nova_context +from nova.virt.baremetal import baremetal_states +from nova.openstack.common.gettextutils import _ +from nova.openstack.common import log as logging +from nova.openstack.common import loopingcall +from nova.openstack.common import timeutils +from nova import paths +from nova import utils +from xcat.openstack.baremetal import exception +from xcat.openstack.baremetal import power_states + +LOG = logging.getLogger(__name__) + +# register configuration options +xcat_opts = [ + cfg.IntOpt('deploy_timeout', + help='Timeout for node deployment. Default: 0 second (unlimited)', + default=0), + cfg.IntOpt('reboot_timeout', + help='Timeout for rebooting a node. Default: 0 second (unlimited)', + default=0), + cfg.IntOpt('deploy_checking_interval', + help='Checking interval for node deployment. Default: 10 seconds', + default=10), + cfg.IntOpt('reboot_checking_interval', + help='Checking interval for rebooting a node. Default: 5 seconds', + default=5), + ] +xcat_group = cfg.OptGroup(name='xcat', + title='xCAT Options') +CONF = cfg.CONF +CONF.register_group(xcat_group) +CONF.register_opts(xcat_opts, xcat_group) + + +class xCAT(object): + """A driver that calls xCAT funcions""" + + def __init__(self): + #setup the path for xCAT commands + #xcatroot = os.getenv('XCATROOT', '/opt/xcat/') + #sys.path.append("%s/bin" % xcatroot) + #sys.path.append("%s/sbin" % xcatroot) + pass + + def _exec_xcat_command(self, command): + """Calls xCAT command.""" + args = command.split(" ") + out, err = utils.execute(*args, run_as_root=True) + LOG.debug(_("xCAT command stdout: '%(out)s', stderr: '%(err)s'"), + {'out': out, 'err': err}) + return out, err + + def get_xcat_node_name(self, macs): + """Get the xcat node name given mac addressed. + + It uses the mac address to search for the node name in xCAT. + """ + for mac in macs: + out, err = self._exec_xcat_command("lsdef -w mac=%s" % mac) + if out: + return out.split(" ")[0] + + errstr='No node found in xCAT with the following mac address: ' \ + + ','.join(macs) + LOG.warning(errstr) + raise exception.xCATCommandError(errstr) + + + def deploy_node(self, nodename, imagename, hostname, fixed_ip, netmask, gateway): + """ + Install the node. + + It calls xCAT command deploy_ops_bmnode which prepares the node + by adding the config_ops_bm_node postbootscript to the postscript + table for the node, then call nodeset and then boot the node up. + """ + out, err = self._exec_xcat_command( + "deploy_ops_bm_node %(node)s --image %(image)s" + " --host %(host)s --ip %(ip)s --mask %(mask)s" + % {'node': nodename, + 'image': imagename, + 'host': hostname, + 'ip': fixed_ip, + 'mask': netmask, + }) + if err: + errstr = _("Error returned when calling xCAT deploy_ops_bm_node" + " command for node %s:%s") % (nodename, err) + LOG.warning(errstr) + raise exception.xCATCommandError(errstr) + self._wait_for_node_deploy(nodename) + + def cleanup_node(self, nodename, fixed_ip=None): + """ + Undo all the changes made to the node by deploy_node function. + + It calls xCAT command cleanup_ops_bm_node which removes the + config_ops_bm_node postbootscript from the postscript table + for the node, removes the alias ip and then power the node off. + """ + cmd = "cleanup_ops_bm_node %s" % nodename + if fixed_ip: + cmd += " --ip %s" % fixed_ip + out, err = self._exec_xcat_command(cmd) + + if err: + errstr = _("Error returned when calling xCAT cleanup_ops_bm_node" + " command for node %s:%s") % (nodename, err) + LOG.warning(errstr) + raise exception.xCATCommandError(errstr) + + def power_on_node(self, nodename): + """Power on the node.""" + state = self.get_node_power_state(nodename) + if state == power_states.ON: + LOG.warning(_("Powring on node called, but the node %s " + "is already on") % nodename) + out, err = self._exec_xcat_command("rpower %s on" % nodename) + if err: + errstr = _("Error returned when calling xCAT rpower on" + " for node %s:%s") % (nodename, err) + LOG.warning(errstr) + raise exception.xCATCommandError(errstr) + else: + return power_states.ON + + def power_off_node(self, nodename): + """Power off the node.""" + state = self.get_node_power_state(nodename) + if state == power_states.OFF: + LOG.warning(_("Powring off node called, but the node %s " + "is already off") % nodename) + out, err = self._exec_xcat_command("rpower %s off" % nodename) + if err: + errstr = _("Error returned when calling xCAT rpower off" + " for node %s:%s") % (nodename, err) + LOG.warning(errstr) + raise exception.xCATCommandError(errstr) + else: + return power_states.OFF + + def reboot_node(self, nodename): + """Reboot the node.""" + out, err = self._exec_xcat_command("rpower %s boot" % nodename) + if err: + errstr = _("Error returned when calling xCAT rpower boot" + " for node %s:%s") % (nodename, err) + LOG.warning(errstr) + raise exception.xCATCommandError(errstr) + + self._wait_for_node_reboot(nodename) + return power_states.ON + + + def get_node_power_state(self, nodename): + out, err = self._exec_xcat_command("rpower %s stat" % nodename) + if err: + errstr = _("Error returned when calling xCAT rpower stat" + " for node %s:%s") % (nodename, err) + LOG.warning(errstr) + raise exception.xCATCommandError(errstr) + else: + state = out.split(":")[1] + if state: + state = state.strip() + if state == 'on': + return power_states.ON + elif state == 'off': + return power_states.OFF + + return power_states.ERROR + + def _wait_for_node_deploy(self, nodename): + """Wait for xCAT node deployment to complete.""" + locals = {'errstr':''} + + def _wait_for_deploy(): + out,err = self._exec_xcat_command("nodels %s nodelist.status" % nodename) + if err: + locals['errstr'] = _("Error returned when quering node status" + " for node %s:%s") % (nodename, err) + LOG.warning(locals['errstr']) + raise loopingcall.LoopingCallDone() + + if out: + node,status = out.split(": ") + if status == "booted": + LOG.info(_("Deployment for node %s completed.") + % nodename) + raise loopingcall.LoopingCallDone() + + if (CONF.xcat.deploy_timeout and + timeutils.utcnow() > expiration): + locals['errstr'] = _("Timeout while waiting for" + " deployment of node %s.") % nodename + LOG.warning(locals['errstr']) + raise loopingcall.LoopingCallDone() + + expiration = timeutils.utcnow() + datetime.timedelta( + seconds=CONF.xcat.deploy_timeout) + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy) + # default check every 10 seconds + timer.start(interval=CONF.xcat.deploy_checking_interval).wait() + + if locals['errstr']: + raise exception.xCATDeploymentFailure(locals['errstr']) + + + def _wait_for_node_reboot(self, nodename): + """Wait for xCAT node boot to complete.""" + locals = {'errstr':''} + + def _wait_for_reboot(): + out,err = self._exec_xcat_command("nodels %s nodelist.status" % nodename) + if err: + locals['errstr'] = _("Error returned when quering node status" + " for node %s:%s") % (nodename, err) + LOG.warning(locals['errstr']) + raise loopingcall.LoopingCallDone() + + if out: + node,status = out.split(": ") + if status == "booted": + LOG.info(_("Rebooting node %s completed.") + % nodename) + raise loopingcall.LoopingCallDone() + + if (CONF.xcat.reboot_timeout and + timeutils.utcnow() > expiration): + locals['errstr'] = _("Timeout while waiting for" + " rebooting node %s.") % nodename + LOG.warning(locals['errstr']) + raise loopingcall.LoopingCallDone() + + expiration = timeutils.utcnow() + datetime.timedelta( + seconds=CONF.xcat.reboot_timeout) + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot) + # default check every 5 seconds + timer.start(interval=CONF.xcat.reboot_checking_interval).wait() + + if locals['errstr']: + raise exception.xCATRebootFailure(locals['errstr']) diff --git a/xCAT-OpenStack-baremetal/pods/man1/opsaddbmnode.1.pod b/xCAT-OpenStack-baremetal/pods/man1/opsaddbmnode.1.pod new file mode 100644 index 000000000..9fc8ec90e --- /dev/null +++ b/xCAT-OpenStack-baremetal/pods/man1/opsaddbmnode.1.pod @@ -0,0 +1,75 @@ +=head1 NAME + +B - It adds xCAT baremetal nodes to an OpenStack cloud. + +=head1 SYNOPSIS + +B I B<-s> I + +B [B<-h>|B<--help>] + +B [B<-v>|B<--version>] + +=head1 DESCRIPTION + +The B command registers xCAT nodes to an OpenStack cloud. + +An OpenStack nova baremetal node registration command takes several node attributes: +=item BMC ip addresss, user id and password +=item Name of nova compute host which will control this baremetal node +=item Number of CPUs in the node +=item Memory in the node (MB) +=item Local hard disk in the node (GB) +=item MAC address to provision the node + +The opsaddbmnode command pulls the above baremetal node information from xCAT tables and calls "nova baremetal-node-create" to register the baremetal node with the OpenStack cloud. + +Please make sure the following xCAT tables are filled with correct information for the given nodes before calling this command. +=item ipmi (for BMC ip addresss, user id and password) +=item mac (for MAC address) +=item hwinv (for CPU, memory and disk info.) + +=head1 Parameters + +I is a comma separated node or node group names. + + +=head1 OPTIONS + +=over 10 + +=item B<-s> The node name of the OpenStack compute host that hosts the baremetal nodes. + +=item B<-h|--help> Display usage message. + +=item B<-v|--version> The Command Version. + +=back + +=head1 RETURN VALUE + +0 The command completed successfully. + +1 An error has occurred. + +=head1 EXAMPLES + +=over 3 + +=item 1. + +To register node1, node2 and node3 to OpenStack, sv1 is the compute host. + + opsassbmnode node1,node2,node3 -s sv1 + + +=back + +=head1 FILES + +/opt/xcat/bin/opadddbmnode + +=head1 SEE ALSO + +L + diff --git a/xCAT-OpenStack-baremetal/pods/man1/opsaddimage.1.pod b/xCAT-OpenStack-baremetal/pods/man1/opsaddimage.1.pod new file mode 100644 index 000000000..d44dcec3b --- /dev/null +++ b/xCAT-OpenStack-baremetal/pods/man1/opsaddimage.1.pod @@ -0,0 +1,65 @@ +=head1 NAME + +B - It adds or removes nodes for the vlan. + +=head1 SYNOPSIS + +B I B<-n> I [B<-c> I] + +B [B<-h>|B<--help>] + +B [B<-v>|B<--version>] + +=head1 DESCRIPTION + +The B command adds a list of xCAT images into the OpenStack cloud. + +Under the cover, it creates a fake imgae and registers the fake image into OpenStack with command B. It sets the property in the image to indicate that this is an xCAT image and also stores the original xCAT image name in the property for further reference. + +The xCAT image names can be listed using B command. + +=head1 Parameters + +I a comma separated xCAT images names. + + +=head1 OPTIONS + +=over 10 + +=item B<-n> a comma separated new image names in the OpenStack. If omitted, the default is the original xCAT image nanme. + +=item B<-c> the node name of the OpenStack controller. This node must be an xCAT managed node. + +=item B<-h|--help> Display usage message. + +=item B<-v|--version> The Command Version. + +=back + +=head1 RETURN VALUE + +0 The command completed successfully. + +1 An error has occurred. + +=head1 EXAMPLES + +=over 3 + +=item 1. + +To register xCAT image rhels6.3-x86_64-install-compute into OpenStack. + + opsaddimage rhels6.3-x86_64-install-compute -c sv2 + +=back + +=head1 FILES + +/opt/xcat/bin/opsaddimage + +=head1 SEE ALSO + +L + diff --git a/xCAT-OpenStack-baremetal/share/xcat/openstack/postscripts/config_ops_bm_node b/xCAT-OpenStack-baremetal/share/xcat/openstack/postscripts/config_ops_bm_node new file mode 100755 index 000000000..b9be1257d --- /dev/null +++ b/xCAT-OpenStack-baremetal/share/xcat/openstack/postscripts/config_ops_bm_node @@ -0,0 +1,187 @@ +#!/bin/sh +# IBM(c) 2014 EPL license http://www.eclipse.org/legal/epl-v10.html + +# xCAT post script for configuring the openstack baremetal node. +# The format is: +# config_ops_bm_node ops_hostname ops_ip ops_netmask + + +get_os_type() +{ + #get os type + str_os_type=`uname | tr 'A-Z' 'a-z'` + str_temp='' + if [ "$str_os_type" = "linux" ];then + str_temp=`echo $OSVER | grep -E '(sles|suse)'` + if [ -f "/etc/debian_version" ];then + str_os_type="debian" + elif [ -f "/etc/SuSE-release" -o -n "$str_temp" ];then + str_os_type="sles" + else + str_os_type="redhat" + fi + else + str_os_type="aix" + fi + echo "$str_os_type" +} + + +setup_ip() +{ + str_os_type=$1 + str_if_name=$2 + str_v4ip=$3 + str_v4mask=$4 + + ret=`ifconfig $str_if_name |grep "inet addr" 2>&1` + if [ $? -eq 0 ]; then + old_ip=`echo $ret|cut -d':' -f2 |cut -d' ' -f1` + old_mask=`echo $ret|cut -d':' -f4` + #echo "old ip = $old_ip, old mask=$old_mask" + if [ "$old_ip" == "$str_v4ip" -a "$old_mask" == "$str_v4mask" ]; then + #if nic is up and the address is the same, then donothing + #echo "do nothing" + exit 0 + else + #bring down the nic and reconstruct it. + #echo "bring down the old nic" + ifconfig $str_if_name del $old_ip + fi + fi + + if [ "$str_os_type" = "sles" ];then + str_conf_file="/etc/sysconfig/network/ifcfg-${str_if_name}" + if [ -f $str_conf_file ]; then + rm $str_conf_file + fi + echo "DEVICE=${str_if_name}" > $str_conf_file + echo "BOOTPROTO=static" >> $str_conf_file + echo "IPADDR=${str_v4ip}" >> $str_conf_file + echo "NETMASK=${str_v4mask}" >> $str_conf_file + echo "NETWORK=''" >> $str_conf_file + echo "STARTMODE=onboot" >> $str_conf_file + echo "USERCONTROL=no" >> $str_conf_file + ifup $str_if_name + #debian ubuntu + elif [ "$str_os_type" = "debian" ];then + str_conf_file="/etc/network/interfaces.d/${str_if_name}" + if [ -f $str_conf_file ]; then + rm $str_conf_file + fi + echo "auto ${str_if_name}" > $str_conf_file + echo "iface ${str_if_name} inet static" >> $str_conf_file + echo " address ${str_v4ip}" >> $str_conf_file + echo " netmask ${str_v4mask}" >> $str_conf_file + ifconfig $str_if_name up + else + # Write the info to the ifcfg file for redhat + str_conf_file="/etc/sysconfig/network-scripts/ifcfg-${str_if_name}" + if [ -f $str_conf_file ]; then + rm $str_conf_file + fi + echo "DEVICE=${str_if_name}" > $str_conf_file + echo "BOOTPROTO=static" >> $str_conf_file + echo "NM_CONTROLLED=no" >> $str_conf_file + echo "IPADDR=${str_v4ip}" >> $str_conf_file + echo "NETMASK=${str_v4mask}" >> $str_conf_file + echo "ONBOOT=yes" >> $str_conf_file + ifup $str_if_name + fi +} + + +str_os_type=$(get_os_type) +echo "os_type=$str_os_type" +if [ "$str_os_type" = "aix" ]; then + logger -t xcat "config_ops_bm_node dose not support AIX." + echo "config_ops_bm_node dose not support AIX." + exit 0 +fi + +#change the hostname +if [[ -n "$1" ]]; then + hostname $1 +fi + +#Add the openstack ip to the node +if [[ -n $2 ]]; then + ops_ip=$2 + + if [[ -z $3 ]]; then + logger -t xcat "config_ops_bm_node: Please specify the netmask." + echo "config_ops_bm_node: Please specify the netmask." + exit 1 + else + ops_mask=$3 + fi + + #figure out the install nic + if [[ -n $MACADDRESS ]]; then + pos=0 + #mac has the following format: 01:02:03:04:05:0E!node5|01:02:03:05:0F!node6-eth1 + for x in `echo "$MACADDRESS" | tr "|" "\n"` + do + node="" + mac="" + pos=$((pos+1)) + i=`expr index $x !` + if [[ $i -gt 0 ]]; then + node=`echo ${x##*!}` + mac_tmp=`echo ${x%%!*}` + else + mac_tmp=$x + fi + + if [[ $pos -eq 1 ]]; then + mac1=$mac_tmp + fi + + if [[ "$PRIMARYNIC" = "$mac_tmp" ]]; then + mac=$mac_tmp + break + fi + + if [[ -z "$PRIMARYNIC" ]] || [[ "$PRIMARYNIC" = "mac" ]]; then + if [[ -z $node ]] || [[ "$node" = "$NODE" ]]; then + mac=$mac_tmp + break + fi + fi + done + + if [[ -z $mac ]]; then + if [[ -z "$PRIMARYNIC" ]] || [[ "$PRIMARYNIC" = "mac" ]]; then + mac=$mac1 #if nothing mathes, take the first mac + else + nic=$PRIMARYNIC #or the primary nic itself is the nic + fi + fi + else + logger -t xcat "config_ops_bm_node: no mac addresses are defined in the mac table for the node $NODE" + echo "config_ops_bm_node: no mac addresses are defined in the mac table for the node $NODE" + index=$((index+1)) + continue + fi + echo "mac=$mac" + + #find the nic that has the mac + if [[ -z $nic ]]; then + #go to each nic to match the mac address + ret=`ifconfig |grep -i $mac 2>&1`; + if [ $? -eq 0 ]; then + nic=`echo $ret |head -n1|cut -d' ' -f 1` + else + logger -t xcat "config_ops_bm_node: The mac address for the network for $NODE is not defined." + echo "config_ops_bm_node: The mac address for the network for $NODE is not defined." + fi + fi + echo "nic=$nic" + + #now setup the ip alias + setup_ip $str_os_type $nic:0 $ops_ip $ops_mask +fi + + + + diff --git a/xCAT-OpenStack-baremetal/share/xcat/openstack/postscripts/deconfig_ops_bm_node b/xCAT-OpenStack-baremetal/share/xcat/openstack/postscripts/deconfig_ops_bm_node new file mode 100755 index 000000000..7a69fed1a --- /dev/null +++ b/xCAT-OpenStack-baremetal/share/xcat/openstack/postscripts/deconfig_ops_bm_node @@ -0,0 +1,65 @@ +#!/bin/sh +# IBM(c) 2014 EPL license http://www.eclipse.org/legal/epl-v10.html + +# xCAT post script for deconfiguring the openstack baremetal node. +# The format is: +# deconfig_ops_bm_node ops_ip + + +get_os_type() +{ + #get os type + str_os_type=`uname | tr 'A-Z' 'a-z'` + str_temp='' + if [ "$str_os_type" = "linux" ];then + str_temp=`echo $OSVER | grep -E '(sles|suse)'` + if [ -f "/etc/debian_version" ];then + str_os_type="debian" + elif [ -f "/etc/SuSE-release" -o -n "$str_temp" ];then + str_os_type="sles" + else + str_os_type="redhat" + fi + else + str_os_type="aix" + fi + echo "$str_os_type" +} + + +str_os_type=$(get_os_type) +echo "os_type=$str_os_type" + +if [ $str_os_type == "aix" ]; then + logger -t xcat "deconfig_ops_bm_node dose not support AIX." + echo "deconfig_ops_bm_node dose not support AIX." + exit 0 +fi + +#change the hostname +hostname $NODE + +#remove the openstack ip from the node +if [[ -n $1 ]]; then + ops_ip=$1 + nic=$(ip addr | grep $ops_ip | awk '{print $NF}') + echo "nic=$nic, ops_ip=$ops_ip" + + ifconfig $nic del $ops_ip + + #delete the configuration file + if [ "$str_os_type" = "sles" ]; then + str_conf_file="/etc/sysconfig/network/ifcfg-$nic" + elif [ "$str_os_type" = "debian" ]; then #debian ubuntu + str_conf_file="/etc/network/interfaces.d/$nic" + else #redhat + str_conf_file="/etc/sysconfig/network-scripts/ifcfg-$nic" + fi + if [ -f $str_conf_file ]; then + rm $str_conf_file + fi +fi + + + + diff --git a/xCAT-OpenStack-baremetal/xCAT-OpenStack-baremetal.spec b/xCAT-OpenStack-baremetal/xCAT-OpenStack-baremetal.spec new file mode 100644 index 000000000..afda4c26f --- /dev/null +++ b/xCAT-OpenStack-baremetal/xCAT-OpenStack-baremetal.spec @@ -0,0 +1,103 @@ +Summary: Executables and data of the xCAT baremetal driver for OpenStack +Name: xCAT-OpenStack-baremetal +Version: %(cat Version) +Release: snap%(date +"%Y%m%d%H%M") +Epoch: 4 +License: IBM +Group: Applications/System +Source: xCAT-OpenStack-baremetal-%{version}.tar.gz +Packager: IBM Corp. +Vendor: IBM Corp. +Distribution: %{?_distribution:%{_distribution}}%{!?_distribution:%{_vendor}} +Prefix: /opt/xcat +BuildRoot: /var/tmp/%{name}-%{version}-%{release}-root + +%ifos linux +BuildArch: noarch +%endif + + +Provides: xCAT-OpenStack-baremetal = %{epoch}:%{version} + +Requires: xCAT-client + +%description +xCAT-OpenStack-baremetal provides the baremetal driver for OpenStack. + +%prep +%setup -q -n xCAT-OpenStack-baremetal +%build + +# Convert pods to man pages and html pages +./xpod2man + +%install +# The install phase puts all of the files in the paths they should be in when the rpm is +# installed on a system. The RPM_BUILD_ROOT is a simulated root file system and usually +# has a value like: /var/tmp/xCAT-OpenStack-baremetal-2.0-snap200802270932-root +rm -rf $RPM_BUILD_ROOT + +mkdir -p $RPM_BUILD_ROOT/%{prefix}/bin +mkdir -p $RPM_BUILD_ROOT/%{prefix}/sbin +mkdir -p $RPM_BUILD_ROOT/%{prefix}/lib/perl/xCAT_plugin +mkdir -p $RPM_BUILD_ROOT/%{prefix}/lib/python/xcat/openstack/baremetal +mkdir -p $RPM_BUILD_ROOT/%{prefix}/share/xcat/openstack/postscripts +mkdir -p $RPM_BUILD_ROOT/%{prefix}/share/man/man1 +mkdir -p $RPM_BUILD_ROOT/%{prefix}/share/doc/man1 + + +set +x + +cp -R lib/* $RPM_BUILD_ROOT/%{prefix}/lib +cp share/xcat/openstack/postscripts/* $RPM_BUILD_ROOT/%{prefix}/share/xcat/openstack/postscripts + + +# These were built dynamically in the build phase +cp share/man/man1/* $RPM_BUILD_ROOT/%{prefix}/share/man/man1 +chmod 444 $RPM_BUILD_ROOT/%{prefix}/share/man/man1/* + +# These were built dynamically during the build phase +cp share/doc/man1/* $RPM_BUILD_ROOT/%{prefix}/share/doc/man1 +chmod 644 $RPM_BUILD_ROOT/%{prefix}/share/doc/man1/* + +# These links get made in the RPM_BUILD_ROOT/prefix area +ln -sf ../bin/xcatclient $RPM_BUILD_ROOT/%{prefix}/sbin/deploy_ops_bm_node +ln -sf ../bin/xcatclient $RPM_BUILD_ROOT/%{prefix}/sbin/cleanup_ops_bm_node +ln -sf ../bin/xcatclient $RPM_BUILD_ROOT/%{prefix}/bin/opsaddbmnode +ln -sf ../bin/xcatclientnnr $RPM_BUILD_ROOT/%{prefix}/bin/opsaddimage + +set -x + + +%clean +# This step does not happen until *after* the %files packaging below +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root) +#%doc LICENSE.html +# Just package everything that has been copied into RPM_BUILD_ROOT +%{prefix} + + +%changelog + +%post +#copy the postscripts under /installl/postscripts directory on MN only +if [ -f "/etc/xCATMN" ]; then + cp $RPM_INSTALL_PREFIX0/share/xcat/openstack/postscripts/* /install/postscripts +fi +exit 0 + +%preun +#remove postscripts under /installl/postscripts directory on MN only +if [ -f "/etc/xCATMN" ]; then + for fn in $RPM_INSTALL_PREFIX0/share/xcat/openstack/postscripts/* + do + bn=`basename $fn` + rm /install/postscripts/$bn + done +fi +exit 0 + + diff --git a/xCAT-OpenStack-baremetal/xpod2man b/xCAT-OpenStack-baremetal/xpod2man new file mode 100755 index 000000000..79c4df9c2 --- /dev/null +++ b/xCAT-OpenStack-baremetal/xpod2man @@ -0,0 +1,213 @@ +#!/usr/bin/perl +# IBM(c) 2007 EPL license http://www.eclipse.org/legal/epl-v10.html + +# First builds the xCAT summary man page from Synopsis of each man page. +# Then converts all of the pod man pages into html (including links to each other) + +# We assume that this script is run in the xCAT-vlan-2.0 dir, so everything is +# done relative to that. + +use strict; +#use lib '.'; +use Pod::Man; +use Pod::Html; + +my $poddir = 'pods'; +my $mandir = 'share/man'; +my $htmldir = 'share/doc'; +my $cachedir = '/tmp'; + +my @pods = getPodList($poddir); +#foreach (@pods) { print "$_\n"; } exit; + +# Build the cmd overview page. +writesummarypage("$poddir/man1/xcat.1.pod", @pods); + +# Build the man page for each pod. +#mkdir($mandir) or die "Error: could not create $mandir.\n"; +print "Converting PODs to man pages...\n"; +foreach my $podfile (@pods) { + my $manfile = $podfile; + $manfile =~ s/^$poddir/$mandir/; # change the beginning of the path + $manfile =~ s/\.pod$//; # change the ending + my $mdir = $manfile; + $mdir =~ s|/[^/]*$||; # get rid of the basename part + if (system("mkdir -p $mdir")) { die "Error: could not create $mdir.\n"; } + my ($section) = $podfile =~ /\.(\d+)\.pod$/; + convertpod2man($podfile, $manfile, $section); +} + +my @dummyPods = createDummyPods($poddir, \@pods); + +# Build the html page for each pod. +#mkdir($htmldir) or die "Error: could not create $htmldir.\n"; +print "Converting PODs to HTML pages...\n"; +# have to clear the cache, because old entries can cause a problem +unlink("$cachedir/pod2htmd.tmp", "$cachedir/pod2htmi.tmp"); +foreach my $podfile (@pods) { + my $htmlfile = $podfile; + $htmlfile =~ s/^$poddir/$htmldir/; # change the beginning of the path + $htmlfile =~ s/\.pod$/\.html/; # change the ending + my $hdir = $htmlfile; + $hdir =~ s|/[^/]*$||; # get rid of the basename part + if (system("mkdir -p $hdir")) { die "Error: could not create $hdir.\n"; } + #print "$podfile, $htmlfile, $poddir, $htmldir\n"; + convertpod2html($podfile, $htmlfile, $poddir, $htmldir); +} + +# Remove the dummy pods +unlink @dummyPods; +rmdir "$poddir/man7"; + +exit; + + +# To enable linking between the cmd man pages and the db man pages, need to: +# grep thru the cmd pods searching for references (L<>) to any section 5 man page +# if that pod does not exist, create an empty one that will satisfy pod2html +# keep track of all dummy pods created, so they can be removed later +sub createDummyPods { + my ($poddir, $pods) = @_; + my $cmd = "grep -r -E 'L<.+\\([57]\\)\\|.+\\.[57]>' " . $poddir; + #print "Running cmd: ", $cmd, "\n"; + my @lines = `$cmd`; + if ($?) { print "Error running: $cmd\n"; print join('', @lines); } + #my @lines; + #system($cmd); + my @dummyPods; + foreach my $l (@lines) { + #print "$l\n"; + my @matches = $l =~ /L<([^\(]+)\(([57])\)\|\1\.[57]>/g; # get all the matches in the line + # The above line should create the array with every other entry being the man page name + # and every other entry is the section # (5 or 7) + my $cmd; + while ($cmd=shift @matches) { + #foreach my $m (@matches) { + my $section = shift @matches; + my $filename = "$poddir/man$section/$cmd.$section.pod"; + #print "$filename\n"; + if (!(grep /^$filename$/, @$pods) && !(grep /^$filename$/, @dummyPods)) { push @dummyPods, $filename; } + } + } + + + # Create these empty files + print "Creating empty linked-to files: ", join(', ', @dummyPods), "\n"; + mkdir "$poddir/man7"; + foreach my $d (@dummyPods) { + if (!open(TMP, ">>$d")) { warn "Could not create dummy pod file $d ($!)\n"; } + else { close TMP; } + } + + return @dummyPods; +} + +# Recursively get the list of pod man page files. +sub getPodList { + my $poddir = shift; + my @files; + + # 1st get toplevel dir listing + opendir(DIR, $poddir) or die "Error: could not read $poddir.\n"; + my @topdir = grep !/^\./, readdir(DIR); # / + close(DIR); + + # Now go thru each subdir (these are man1, man3, etc.) + foreach my $mandir (@topdir) { + opendir(DIR, "$poddir/$mandir") or die "Error: could not read $poddir/$mandir.\n"; + my @dir = grep !/^\./, readdir(DIR); # / + close(DIR); + foreach my $file (@dir) { + push @files, "$poddir/$mandir/$file"; + } + } + return sort @files; +} + + +# Create the xcat man page that gives a summary description of each xcat cmd. +sub writesummarypage { + my $file = shift; # relative path file name of the man page + # the rest of @_ contains the pod files that describe each cmd + + open(FILE, ">$file") or die "Error: could not open $file for writing.\n"; + + print FILE <<'EOS1'; +=head1 NAME + +B - extreme Cluster Administration Tool. + +=head1 DESCRIPTION + +Extreme Cluster Administration Toolkit (xCAT). xCAT is a scalable distributed computing management +and provisioning tool that provides a unified interface for hardware control, discovery, and +OS diskful/diskfree deployment. + + +=head1 XCAT DATABASE + +All of the cluster configuration information is in the xCAT database. See L for +descriptions of every table in the database. + +=head1 XCAT COMMANDS + +What follows is a short description of each xCAT command. To get more information about a particular +command, see its man page. Note that the commands are listed in alphabetical order B, +i.e. all the commands in section 1, then the commands in section 3, etc. + +=over 12 +EOS1 + +# extract the summary for each cmd from its man page +foreach my $manpage (@_) { + my ($sectionnum) = $manpage =~ /\.(\d+)\.pod$/; + # Suck in the whole file, then we will parse it. + open(MANPAGE, "$manpage") or die "Error: could not open $manpage for reading.\n"; + my @contents = ; + my $wholemanpage = join('', @contents); + close(MANPAGE); + # This regex matches: optional space, =head1, space, title, space, cmd, space, description, newline + my ($cmd, $description) = $wholemanpage =~ /^\s*=head1\s+\S+\s+(\S+)\s+(.+?)\n/si; + if (!defined($cmd)) { print "Warning: $manpage is not in a recognized structure. It will be ignored.\n"; next; } + if (!defined($description)) { print "Warning: $manpage does not have a description for $cmd. It will be ignored.\n"; next; } + $cmd =~ s/^.<(.+)>$/$1/; # if the cmd name has pod formatting around it, strip it off + $description =~ s/^-\s*//; # if the description has a leading hypen, strip it off + print FILE "\n=item L<$cmd($sectionnum)|$cmd.$sectionnum>\n\n".$description."\n"; +} + +# Artificially add the xcattest cmd, because the xCAT-test rpm will add this +print FILE "\n=item L\n\nRun automated xCAT test cases.\n"; + + print FILE <<"EOS3"; + +=back +EOS3 + + close FILE; +} + + +# Create the html page for one pod. +sub convertpod2html { + my ($podfile, $htmlfile, $poddir, $htmldir) = @_; + + #TODO: use --css= and --title= to make the pages look better + pod2html($podfile, + "--outfile=$htmlfile", + "--podpath=man1", + "--podroot=$poddir", + "--htmldir=$htmldir", + "--recurse", + "--cachedir=$cachedir", + ); + +} + + +# Create the man page for one pod. +sub convertpod2man { + my ($podfile, $manfile, $section) = @_; + + my $parser = Pod::Man->new(section => $section); + $parser->parse_from_file($podfile, $manfile); +} From 4c031e18061b70b9ba75f8e03fb41cb8d75ddc22 Mon Sep 17 00:00:00 2001 From: lissav Date: Thu, 20 Feb 2014 07:25:29 -0500 Subject: [PATCH 13/17] fix for defect 3985 --- xCAT-server/lib/xcat/plugins/anaconda.pm | 3 ++- xCAT-server/lib/xcat/plugins/debian.pm | 2 +- xCAT-server/lib/xcat/plugins/pxe.pm | 2 +- xCAT-server/lib/xcat/plugins/toolscenter.pm | 3 ++- xCAT-server/lib/xcat/plugins/vsmppxe.pm | 2 +- xCAT-server/lib/xcat/plugins/xnba.pm | 8 ++++---- xCAT-server/lib/xcat/plugins/yaboot.pm | 3 ++- 7 files changed, 13 insertions(+), 10 deletions(-) diff --git a/xCAT-server/lib/xcat/plugins/anaconda.pm b/xCAT-server/lib/xcat/plugins/anaconda.pm index 41ddb29a3..1ed0c468e 100644 --- a/xCAT-server/lib/xcat/plugins/anaconda.pm +++ b/xCAT-server/lib/xcat/plugins/anaconda.pm @@ -34,6 +34,7 @@ my $httpport="80"; my $useflowcontrol="0"; + sub handled_commands { return { @@ -62,7 +63,7 @@ sub preprocess_request my @ents = xCAT::TableUtils->get_site_attribute("sharedtftp"); my $site_ent = $ents[0]; unless ( defined($site_ent) - and ($site_ent =~ /no/i or $site_ent =~ /0/)) + and ($site_ent eq "no" or $site_ent eq "NO" or $site_ent eq "0")) { #unless requesting no sharedtftp, don't make hierarchical call diff --git a/xCAT-server/lib/xcat/plugins/debian.pm b/xCAT-server/lib/xcat/plugins/debian.pm index 6908d4188..96225a518 100644 --- a/xCAT-server/lib/xcat/plugins/debian.pm +++ b/xCAT-server/lib/xcat/plugins/debian.pm @@ -77,7 +77,7 @@ sub preprocess_request ($sent) = $stab->getAttribs({key => 'sharedtftp'}, 'value'); unless ( $sent and defined($sent->{value}) - and ($sent->{value} =~ /no/i or $sent->{value} =~ /0/)) + and ($sent->{value} eq "no" or $sent->{value} eq "NO" or $sent->{value} eq "0")) { #unless requesting no sharedtftp, don't make hierarchical call diff --git a/xCAT-server/lib/xcat/plugins/pxe.pm b/xCAT-server/lib/xcat/plugins/pxe.pm index 0e69e63d7..31c7fcab7 100644 --- a/xCAT-server/lib/xcat/plugins/pxe.pm +++ b/xCAT-server/lib/xcat/plugins/pxe.pm @@ -328,7 +328,7 @@ sub preprocess_request { #my $sent = $stab->getAttribs({key=>'sharedtftp'},'value'); my @entries = xCAT::TableUtils->get_site_attribute("sharedtftp"); my $t_entry = $entries[0]; - if ( defined($t_entry) and ($t_entry == 0 or $t_entry =~ /no/i)) { + if ( defined($t_entry) and ($t_entry eq "0" or $t_entry eq "no" or $t_entry eq "NO")) { # check for computenodes and servicenodes from the noderange, if so error out my @SN; my @CN; diff --git a/xCAT-server/lib/xcat/plugins/toolscenter.pm b/xCAT-server/lib/xcat/plugins/toolscenter.pm index a8ef36118..70c500605 100644 --- a/xCAT-server/lib/xcat/plugins/toolscenter.pm +++ b/xCAT-server/lib/xcat/plugins/toolscenter.pm @@ -68,9 +68,10 @@ sub preprocess_request my @entries = xCAT::TableUtils->get_site_attribute("sharedtftp"); my $t_entry = $entries[0]; unless ( defined($t_entry) - and ($t_entry =~ /no/i or $t_entry =~ /0/)) + and ($t_entry eq "no" or $t_entry eq "NO" or $t_entry eq "0")) { + #unless requesting no sharedtftp, don't make hierarchical call return [$req]; } diff --git a/xCAT-server/lib/xcat/plugins/vsmppxe.pm b/xCAT-server/lib/xcat/plugins/vsmppxe.pm index 89e7b1485..46e097522 100644 --- a/xCAT-server/lib/xcat/plugins/vsmppxe.pm +++ b/xCAT-server/lib/xcat/plugins/vsmppxe.pm @@ -260,7 +260,7 @@ sub preprocess_request { my @entries = xCAT::TableUtils->get_site_attribute("sharedtftp"); my $t_entry = $entries[0]; - if ( defined($t_entry) and ($t_entry == 0 or $t_entry =~ /no/i)) { + if ( defined($t_entry) and ($t_entry eq "0" or $t_entry eq "no" or $t_entry eq "NO")) { # check for computenodes and servicenodes from the noderange, if so error out my @SN; my @CN; diff --git a/xCAT-server/lib/xcat/plugins/xnba.pm b/xCAT-server/lib/xcat/plugins/xnba.pm index 8ce1c116a..114283c92 100644 --- a/xCAT-server/lib/xcat/plugins/xnba.pm +++ b/xCAT-server/lib/xcat/plugins/xnba.pm @@ -348,27 +348,27 @@ sub preprocess_request { #they specify no sharedtftp in site table my @entries = xCAT::TableUtils->get_site_attribute("sharedtftp"); my $t_entry = $entries[0]; - if ( defined($t_entry) and ($t_entry == 0 or $t_entry =~ /no/i)) { + if ( defined($t_entry) and ($t_entry eq "0" or $t_entry eq "no" or $t_entry eq "NO")) { # check for computenodes and servicenodes from the noderange, if so error out my @SN; my @CN; xCAT::ServiceNodeUtils->getSNandCPnodes(\@$nodes, \@SN, \@CN); unless (($args[0] eq 'stat') or ($args[0] eq 'enact')) { # mix is ok for these options - if ((@SN > 0) && (@CN >0 )) { # there are both SN and CN + if ((@SN > 0) && (@CN >0 )) { # there are both SN and CN my $rsp; $rsp->{data}->[0] = "Nodeset was run with a noderange containing both service nodes and compute nodes. This is not valid. You must submit with either compute nodes in the noderange or service nodes. \n"; xCAT::MsgUtils->message("E", $rsp, $callback1); return; - } + } } $req->{'_disparatetftp'}=[1]; if ($req->{inittime}->[0]) { return [$req]; } - if (@CN >0 ) { # there are computenodes then run on all servicenodes + if (@CN >0 ) { # if compute nodes broadcast to all servicenodes return xCAT::Scope->get_broadcast_scope($req,@_); } } diff --git a/xCAT-server/lib/xcat/plugins/yaboot.pm b/xCAT-server/lib/xcat/plugins/yaboot.pm index b90c6df25..73cbd6d5e 100644 --- a/xCAT-server/lib/xcat/plugins/yaboot.pm +++ b/xCAT-server/lib/xcat/plugins/yaboot.pm @@ -379,7 +379,8 @@ sub preprocess_request { #if they specify no sharedtftp in site table my @entries = xCAT::TableUtils->get_site_attribute("sharedtftp"); my $t_entry = $entries[0]; - if ( defined($t_entry) and ($t_entry == 0 or $t_entry =~ /no/i)) { + + if ( defined($t_entry) and ($t_entry eq "0" or $t_entry eq "no" or $t_entry eq "NO")) { # check for computenodes and servicenodes from the noderange, if so error out my @SN; my @CN; From e331cf508592a605c989746648faf53d023bc1ee Mon Sep 17 00:00:00 2001 From: lissav Date: Thu, 20 Feb 2014 11:33:54 -0500 Subject: [PATCH 14/17] more code --- xCAT-server/lib/xcat/plugins/zone.pm | 34 +++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/xCAT-server/lib/xcat/plugins/zone.pm b/xCAT-server/lib/xcat/plugins/zone.pm index 91398d3a8..85c026448 100644 --- a/xCAT-server/lib/xcat/plugins/zone.pm +++ b/xCAT-server/lib/xcat/plugins/zone.pm @@ -141,6 +141,32 @@ sub process_request } else { $request->{zonename} = $ARGV[0]; } + # if -s entered must be yes/1 or no/0 + if ($options{'sshbetweennodes'}) { + if ($options{'sshbetweennodes'}=~ /^yes$/i || $options{'sshbetweennodes'} eq "1") { + $options{'sshbetweennodes'}= "yes"; + } else { + if ($options{'sshbetweennodes'}=~ /^no$/i || $options{'sshbetweennodes'} eq "0") { + $options{'sshbetweennodes'}= "no"; + } else { + my $rsp = {}; + $rsp->{error}->[0] = + "The input on the -s flag $options{'sshbetweennodes'} is not valid."; + xCAT::MsgUtils->message("E", $rsp, $callback); + exit 1; + } + } + } + + # check for site.sshbetweennodes attribute, put out a warning it will not be used as long + # as zones are defined in the zone table. + my @entries = xCAT::TableUtils->get_site_attribute("sshbetweennodes"); + if ($entries[0]) { + my $rsp = {}; + $rsp->{info}->[0] = + "The site table sshbetweennodes attribute is set to $entries[0]. It is not used when zones are defined. To get rid of this warning, remove the site table sshbetweennodes attribute."; + xCAT::MsgUtils->message("I", $rsp, $callback); + } # save input noderange if ($options{'noderange'}) { @@ -439,7 +465,13 @@ sub updatezonetable # now add the users zone my %tb_cols; - $tb_cols{sshkeydir} = $keydir; + $tb_cols{sshkeydir} = $keydir; # key directory + # set sshbetweennodes attribute from -s flag or default to yes + if ( $$options{'sshbetweennodes'}) { + $tb_cols{sshbetweennodes} = $$options{'sshbetweennodes'}; + } else { + $tb_cols{sshbetweennodes} = "yes"; + } my $zonename=$request->{zonename}; if ( $$options{'defaultzone'}) { # set the default # check to see if a default already defined From 2e0ccb7d7e7bb62529b9a4b3c6c574470d199c29 Mon Sep 17 00:00:00 2001 From: lissav Date: Thu, 20 Feb 2014 11:35:20 -0500 Subject: [PATCH 15/17] more code --- perl-xCAT/xCAT/Zone.pm | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/perl-xCAT/xCAT/Zone.pm b/perl-xCAT/xCAT/Zone.pm index e974a366d..b9f93c72a 100644 --- a/perl-xCAT/xCAT/Zone.pm +++ b/perl-xCAT/xCAT/Zone.pm @@ -199,7 +199,8 @@ sub getdefaultzone foreach my $zone (@zones) { # Look for the defaultzone=yes/1 entry if ((defined($zone->{defaultzone})) && - (($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} eq "1"))) { + (($zone->{defaultzone} =~ /^yes$/i ) + || ($zone->{defaultzone} eq "1"))) { $defaultzone = $zone->{zonename}; } $tab->close(); @@ -279,8 +280,9 @@ sub getzoneinfo $zonehash->{$zonename}->{sshkeydir}= $zone->{sshkeydir}; $zonehash->{$zonename}->{defaultzone}= $zone->{defaultzone}; # find the defaultzone - if ((defined($zone->{defaultzone})) && - (($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} eq "1"))) { + if ((defined($zone->{defaultzone})) && + (($zone->{defaultzone} =~ /^yes$/i ) + || ($zone->{defaultzone} eq "1"))) { $defaultzone = $zone->{zonename}; } } From 56e8e8b0d5d7b0674c466ebafc1e9623a86a19cd Mon Sep 17 00:00:00 2001 From: lissav Date: Thu, 20 Feb 2014 12:34:51 -0500 Subject: [PATCH 16/17] zone coce --- perl-xCAT/xCAT/TableUtils.pm | 166 ++++++++++++++++++++++------------- 1 file changed, 105 insertions(+), 61 deletions(-) diff --git a/perl-xCAT/xCAT/TableUtils.pm b/perl-xCAT/xCAT/TableUtils.pm index b44cb6f27..faec9af36 100644 --- a/perl-xCAT/xCAT/TableUtils.pm +++ b/perl-xCAT/xCAT/TableUtils.pm @@ -422,78 +422,38 @@ rmdir \"/tmp/$to_userid\" \n"; xCAT::TableUtils->bldnonrootSSHFiles($from_userid); } + # send the keys # For root user and not to devices only to nodes if (($from_userid eq "root") && (!($ENV{'DEVICETYPE'}))) { # Need to check if nodes are in a zone. - # If in a zone, then root ssh keys for the node will be taken from the zones ssh keys not ~/.ssh - # zones are only supported on nodes that are not a service node. - # Also for the call to RemoteShellExp, we must group the nodes that are in the same zone - + my @zones; my $tab = xCAT::Table->new("zone"); if ($tab) { # if we have zones, need to send the zone keys to each node in the zone my @zones = $tab->getAllAttribs('zonename'); $tab->close(); - if (@zones) { # we have zones defined - my $rc = xCAT::TableUtils->sendkeystozones($ref_nodes,$expecttimeout); - if ($rc != 0) - { - $rsp->{data}->[0] = "Error sending ssh keys to the zones.\n"; - xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); - return 1; - - } - return 0; - } } else { $rsp->{data}->[0] = "Could not open zone table.\n"; xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); return 1; } - - - # if no zone table defined, do it the old way - # send the keys to the nodes for root or some other id - # - # The nodes must be checked against the site.sshbetweennodes attribute - # This site attribute determines whether to setup - # node to node ssh - my $enablenodes; - my $disablenodes; - my @nodelist= split(",", $n_str); - foreach my $n (@nodelist) - { - my $enablessh=xCAT::TableUtils->enablessh($n); - if ($enablessh == 1) { - $enablenodes .= $n; - $enablenodes .= ","; - } else { - $disablenodes .= $n; - $disablenodes .= ","; + if (@zones) { # we have zones defined + my $rc = xCAT::TableUtils->sendkeysTOzones($ref_nodes,$expecttimeout); + if ($rc != 0) + { + $rsp->{data}->[0] = "Error sending ssh keys to the zones.\n"; + xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); + } - - } - if ($enablenodes) { # node on list to setup nodetonodessh - chop $enablenodes; # remove last comma - $ENV{'DSH_ENABLE_SSH'} = "YES"; - # send the keys to the nodes - my $rc=xCAT::RemoteShellExp->remoteshellexp("s",$::CALLBACK,"/usr/bin/ssh",$enablenodes,$expecttimeout); + } else { # no zones + + # if no zone table defined, do it the old way , keys are in ~/.ssh + my $rc = xCAT::TableUtils->sendkeysNOzones($ref_nodes,$expecttimeout); if ($rc != 0) - { - $rsp->{data}->[0] = "remoteshellexp failed sending keys to enablenodes."; - xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); - - } - } - if ($disablenodes) { # node on list to disable nodetonodessh - chop $disablenodes; # remove last comma - # send the keys to the nodes - my $rc=xCAT::RemoteShellExp->remoteshellexp("s",$::CALLBACK,"/usr/bin/ssh",$disablenodes,$expecttimeout); - if ($rc != 0) - { - $rsp->{data}->[0] = "remoteshellexp failed sending keys to disablenodes."; - xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); + { + $rsp->{data}->[0] = "Error sending ssh keys to the nodes.\n"; + xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); } } @@ -539,10 +499,11 @@ rmdir \"/tmp/$to_userid\" \n"; #-------------------------------------------------------------------------------- -=head3 sendkeystozones +=head3 sendkeysNOzones Transfers the ssh keys - for the root id on the nodes using the zone table. + for the root id on the nodes no zones + key from ~/.ssh site.sshbetweennodes honored Arguments: @@ -561,7 +522,7 @@ rmdir \"/tmp/$to_userid\" \n"; Error: 0=good, 1=error Example: - xCAT::TableUtils->setupSSH(@target_nodes,$expecttimeout); + xCAT::TableUtils->sendkeysNOzones($ref_nodes,$expecttimeout); Comments: Does not setup known_hosts. Assumes automatically setup by SSH ( ssh config option StrictHostKeyChecking no should @@ -570,11 +531,94 @@ rmdir \"/tmp/$to_userid\" \n"; =cut #-------------------------------------------------------------------------------- -sub sendkeystozones +sub sendkeysNOzones +{ + my ($class, $ref_nodes,$expecttimeout) = @_; + my @nodes=$ref_nodes; + my $enablenodes; + my $disablenodes; + my $n_str = $nodes[0]; + my @nodelist= split(",", $n_str); + my $rsp = (); + foreach my $n (@nodelist) + { + my $enablessh=xCAT::TableUtils->enablessh($n); + if ($enablessh == 1) { + $enablenodes .= $n; + $enablenodes .= ","; + } else { + $disablenodes .= $n; + $disablenodes .= ","; + } + + } + if ($enablenodes) { # node on list to setup nodetonodessh + chop $enablenodes; # remove last comma + $ENV{'DSH_ENABLE_SSH'} = "YES"; + # send the keys to the nodes + my $rc=xCAT::RemoteShellExp->remoteshellexp("s",$::CALLBACK,"/usr/bin/ssh",$enablenodes,$expecttimeout); + if ($rc != 0) + { + $rsp->{data}->[0] = "remoteshellexp failed sending keys to enablenodes."; + xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); + + } + } + if ($disablenodes) { # node on list to disable nodetonodessh + chop $disablenodes; # remove last comma + # send the keys to the nodes + my $rc=xCAT::RemoteShellExp->remoteshellexp("s",$::CALLBACK,"/usr/bin/ssh",$disablenodes,$expecttimeout); + if ($rc != 0) + { + $rsp->{data}->[0] = "remoteshellexp failed sending keys to disablenodes."; + xCAT::MsgUtils->message("E", $rsp, $::CALLBACK); + + } + } +} +#-------------------------------------------------------------------------------- + +=head3 sendkeysTOzones + + Transfers the ssh keys + for the root id on the nodes using the zone table. + If in a zone, then root ssh keys for the node will be taken from the zones ssh keys not ~/.ssh + zones are only supported on nodes that are not a service node. + Also for the call to RemoteShellExp, we must group the nodes that are in the same zone + + + Arguments: + Array of nodes + Timeout for expect call (optional) + Returns: + + Env Variables: $DSH_FROM_USERID, $DSH_TO_USERID, $DSH_REMOTE_PASSWORD + the ssh keys are transferred from the $DSH_FROM_USERID to the $DSH_TO_USERID + on the node(s). The DSH_REMOTE_PASSWORD and the DSH_FROM_USERID + must be obtained by + the calling script or from the xdsh client + + Globals: + $::XCATROOT , $::CALLBACK + Error: + 0=good, 1=error + Example: + xCAT::TableUtils->sendkeysTOzones($ref_nodes,$expecttimeout); + Comments: + Does not setup known_hosts. Assumes automatically + setup by SSH ( ssh config option StrictHostKeyChecking no should + be set in the ssh config file). + +=cut + +#-------------------------------------------------------------------------------- +sub sendkeysTOzones { my ($class, $ref_nodes,$expecttimeout) = @_; my @nodes=$ref_nodes; - my %zonehash =xCAT::Zone->getNodeZones(@nodes); + my $n_str = $nodes[0]; + my @nodelist= split(",", $n_str); + my %zonehash =xCAT::Zone->getNodeZones(@nodelist); # for each zone in the zonehash # if sshbetweennodes is yes # $ENV{'DSH_ENABLE_SSH'} = "YES"; From 54982068cfa96580d1366ec9233bae9da26de1da Mon Sep 17 00:00:00 2001 From: ligc Date: Fri, 21 Feb 2014 12:39:57 -0600 Subject: [PATCH 17/17] fix for bug 3991: if the node can not be resolved or ip address is not valid, print warning message and ignore the node --- xCAT-server/lib/xcat/plugins/hosts.pm | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/xCAT-server/lib/xcat/plugins/hosts.pm b/xCAT-server/lib/xcat/plugins/hosts.pm index 77217fd52..abd762dc8 100755 --- a/xCAT-server/lib/xcat/plugins/hosts.pm +++ b/xCAT-server/lib/xcat/plugins/hosts.pm @@ -288,9 +288,16 @@ sub add_hosts_content { } else { - my $rsp; - push @{$rsp->{data}}, "Invalid IP Addr \'$ref->{ip}\' for node \'$ref->{node}\'."; - xCAT::MsgUtils->message("E", $rsp, $callback); + my $rsp; + if (!$ip) + { + push @{$rsp->{data}}, "Ignoring node \'$nodename\', it can not be resolved."; + } + else + { + push @{$rsp->{data}}, "Ignoring node \'$nodename\', its ip address \'$ip\' is not valid."; + } + xCAT::MsgUtils->message("W", $rsp, $callback); } if (defined($ref->{otherinterfaces}))