Compare commits

..

6 Commits

Author SHA1 Message Date
lissav 9db88139d3 Defect 4061 2014-04-30 09:56:53 +01:00
arif a9068dbd0f Revert "temporary fix for postscripts for EXECUTE and EXECUTEALWAYS"
This reverts commit 06cad71050.
2014-04-30 09:56:21 +01:00
arif 06cad71050 temporary fix for postscripts for EXECUTE and EXECUTEALWAYS 2014-04-29 07:45:15 +01:00
arif 3e52ee0c40 defect #4061: fix DSHCLI.pm for append in hierarchy 2014-04-24 14:44:30 +01:00
lissav 993e2f77a8 defect 4061 2014-04-24 14:44:23 +01:00
arif da0431e69a defect #4061: fix DSHCLI.pm for merge in hierarchy 2014-04-24 14:44:14 +01:00
226 changed files with 2179 additions and 10101 deletions
+1 -1
View File
@@ -1 +1 @@
2.8.4
2.8.3
+241
View File
@@ -0,0 +1,241 @@
#!/bin/sh
# Update GSA Ubuntu Repositories or create a local repository
#
# Author: Leonardo Tonetto (tonetto@linux.vnet.ibm.com)
# Revisor: Arif Ali (aali@ocf.co.uk)
#
# After running this script, add the following line to
# /etc/apt/sources.list for local repository
# deb file://<core_repo_path>/xcat-core/ maverick main
# deb file://<dep_repo_path>/xcat-dep/ maverick main
#
# For the purpose of getting the distribution name
# Supported distributions
dists="squeeze"
a_flag= # automatic flag - only update if repo was updated
c_flag= # xcat-core (trunk-delvel) path
d_flag= # xcat-dep (trunk) path
local_flag= # build the repository localy
while getopts 'c:d:u:p:l:a' OPTION
do
case $OPTION in
c) c_flag=1
xcat_core_path="$OPTARG"
;;
d) d_flag=1
xcat_dep_path="$OPTARG"
;;
l) local_flag=1
local_repo_path="$OPTARG"
;;
a) a_flag=1
;;
?) printf "Usage: %s -c <core_trunk_path> [-d <dep_trunk_path>] -l <local-repo_path> [-a]\n" $(basename $0) >&2
echo "-a Automatic: update only if there's any update on repo"
exit 2
;;
esac
done
shift $(($OPTIND - 1))
if [ -z "$c_flag" -a -z "$d_flag" ]
then
printf "Usage: %s -c <core_trunk_path> [-d <dep_trunk_path>] { -l <local-repo_path> | [-u <gsa_id> -p <gsa_passwd>] } [-a]\n" $(basename $0) >&2
echo "-a Automatic: update only if there's any update on repo"
exit 2
fi
if [ ! -d $xcat_core_path ]
then
printf "%s: No such directory\n" "$xcat_core_path" >&2
exit 2
fi
if [ "$d_flag" ]
then
if [ ! -d $xcat_dep_path ]
then
printf "%s: No such directory\n" "$xcat_dep_path" >&2
exit 2
fi
fi
if [ "$local_flag" ]
then
repo_xcat_core_path=$local_repo_path"/xcat-core"
repo_xcat_dep_path=$local_repo_path"/xcat-dep"
else
printf "Usage: %s -c <core_trunk_path> [-d <dep_trunk_path>] -l <local-repo_path> [-a]\n" $(basename $0) >&2
echo "-a Automatic: update only if there's any update on repo"
exit 2
fi
if [ "$a_flag" ]
then
touch svcupdate.trace
SVCUP='svcupdate.trace'
svn update $xcat_core_path 1> $SVCUP 2>&1
if ! grep 'Tree is up to date' $SVCUP
then
update_core=1
else
update_core=
fi
rm -f $SVCUP
else
update_core=1
fi
if [ "$c_flag" -a "$update_core" ]
then
echo "###############################"
echo "# Building xcat-core packages #"
echo "###############################"
CMD_PATH=`pwd`
cd $xcat_core_path
./build-debs-all "snap" "Nightly_Builds"
echo "#################################"
echo "# Creating xcat-core repository #"
echo "#################################"
if [ -d $repo_xcat_core_path ]; then
rm -rf $repo_xcat_core_path
fi
mkdir -p $repo_xcat_core_path/conf
find . -iname '*.deb' -exec mv {} $repo_xcat_core_path \;
rm -rf debs/
cd $CMD_PATH
rm -rf $repo_xcat_core_path/conf/distributions
for dist in $dists; do
cat << __EOF__ >> $repo_xcat_core_path/conf/distributions
Origin: xCAT internal repository
Label: xcat-core bazaar repository
Codename: $dist
Architectures: amd64
Components: main
Description: Repository automatically genereted conf
__EOF__
done
cat << __EOF__ > $repo_xcat_core_path/conf/options
verbose
basedir .
__EOF__
for dist in $dists; do
for file in `ls $repo_xcat_core_path/*.deb`; do
reprepro -b $repo_xcat_core_path includedeb $dist $file;
done
done
mv $xcat_core_path/latest_version $repo_xcat_core_path/xcat-core_latest-build
cat << '__EOF__' > $repo_xcat_core_path/mklocalrepo.sh
codename=`lsb_release -a 2>null | grep Codename | awk '{print $2}'`
cd `dirname $0`
echo deb file://"`pwd`" $codename main > /etc/apt/sources.list.d/xcat-core.list
__EOF__
chmod 775 $repo_xcat_core_path/mklocalrepo.sh
rm -rf $repo_xcat_core_path/*.deb
if [ -z "$local_flag" ]
then
echo "###############################"
echo "# Updating GSA xcat-core repo #"
echo "###############################"
lftp -e "mirror -R --delete-first $repo_xcat_core_path /projects/i/ipl-xcat/ubuntu/; exit;" -u $gsa_id,$gsa_passwd -p 22 sftp://ausgsa.ibm.com
fi ### if [ -z "$local_flag" ]
fi ### if [ "$a_flag" ]
if [ "$a_flag" -a "$d_flag" ]
then
touch svcupdate.trace
SVCUP='svcupdate.trace'
svn update $xcat_dep_path 1> $SVCUP 2>&1
if ! grep 'Tree is up to date' $SVCUP
then
update_dep=1
else
update_dep=
fi
rm -f $SVCUP
else
update_dep=1
fi
if [ "$d_flag" -a "$update_dep" ]
then
echo "##############################"
echo "# Building xcat-dep packages #"
echo "##############################"
CMD_PATH=`pwd`
cd $xcat_dep_path
./build-debs-all "snap" "Nightly_Builds"
echo "################################"
echo "# Creating xcat-dep repository #"
echo "################################"
rm -rf $repo_xcat_dep_path
mkdir -p $repo_xcat_dep_path/conf
find $xcat_dep_path -iname '*.deb' -exec cp {} $repo_xcat_dep_path \;
rm -rf $repo_xcat_core_path/conf/distributions
for dist in $dists; do
cat << __EOF__ >> $repo_xcat_dep_path/conf/distributions
Origin: xCAT internal repository
Label: xcat-dep bazaar repository
Codename: $dist
Architectures: amd64
Components: main
Description: Repository automatically genereted conf
__EOF__
done
cat << __EOF__ > $repo_xcat_dep_path/conf/options
verbose
basedir .
__EOF__
for dist in $dists; do
for file in `ls $repo_xcat_dep_path/*.deb`; do
reprepro -b $repo_xcat_dep_path includedeb $dist $file;
done
done
cat << '__EOF__' > $repo_xcat_dep_path/mklocalrepo.sh
codename=`lsb_release -a 2>null | grep Codename | awk '{print $2}'`
cd `dirname $0`
echo deb file://"`pwd`" $codename main > /etc/apt/sources.list.d/xcat-dep.list
__EOF__
chmod 775 $repo_xcat_dep_path/mklocalrepo.sh
rm -rf $repo_xcat_dep_path/*.deb
if [ -z "$local_flag" ]
then
echo "##############################"
echo "# Updating GSA xcat-dep repo #"
echo "##############################"
lftp -e "mirror -R --delete-first $repo_xcat_dep_path /projects/i/ipl-xcat/ubuntu/; exit;" -u $gsa_id,$gsa_passwd -p 22 sftp://ausgsa.ibm.com
fi ### if [ -z "$local_flag" ]
fi ### if [ "$d_flag" -a "$a_flag"]
if [ -z "$local_flag" ] # delete the temp repo after upload is done
then
rm -rf ./gsa-repo_temp
fi
exit 0
+2 -6
View File
@@ -53,7 +53,7 @@ for i in $*; do
done
# Supported distributions
dists="maverick natty oneiric precise saucy"
dists="maverick natty oneiric precise"
c_flag= # xcat-core (trunk-delvel) path
d_flag= # xcat-dep (trunk) path
@@ -276,7 +276,7 @@ __EOF__
done
#create the mklocalrepo script
cat << '__EOF__' > mklocalrepo.sh
cat << __EOF__ > mklocalrepo.sh
. /etc/lsb-release
cd `dirname $0`
echo deb file://"`pwd`" $DISTRIB_CODENAME main > /etc/apt/sources.list.d/xcat-core.list
@@ -413,10 +413,6 @@ __EOF__
while [ $((i+=1)) -le 5 ] && ! rsync -urLv --delete xcat-dep ${uploader},xcat@web.sourceforge.net:${sf_dir}/ubuntu/
do : ; done
#upload the tarball
i=0
echo "Uploading $dep_tar_name to ${sf_dir}/xcat-dep/2.x_Ubuntu/ ..."
while [ $((i+=1)) -le 5 ] && ! rsync -v $dep_tar_name ${uploader},xcat@web.sourceforge.net:${sf_dir}/xcat-dep/2.x_Ubuntu/
do : ; done
cd $old_pwd
fi
exit 0
+3
View File
@@ -10,6 +10,9 @@
<packagereq type="required">xCAT-server</packagereq>
<packagereq type="required">xCAT-client</packagereq>
<packagereq type="required">perl-xCAT</packagereq>
<packagereq type="required">xCAT-nbroot-core-x86_64</packagereq>
<packagereq type="required">xCAT-nbroot-core-x86</packagereq>
<packagereq type="optional">xCAT-nbroot-core-ppc64</packagereq>
</packagelist>
</group>
</comps>
-1
View File
@@ -75,7 +75,6 @@ function makexcat {
tar --exclude .svn --exclude upflag -czf $RPMROOT/SOURCES/postscripts.tar.gz postscripts LICENSE.html
tar --exclude .svn -czf $RPMROOT/SOURCES/prescripts.tar.gz prescripts
tar --exclude .svn -czf $RPMROOT/SOURCES/templates.tar.gz templates
tar --exclude .svn -czf $RPMROOT/SOURCES/winpostscripts.tar.gz winpostscripts
cp xcat.conf $RPMROOT/SOURCES
cp xCATMN $RPMROOT/SOURCES
cd - >/dev/null
-1
View File
@@ -223,7 +223,6 @@ if (ref($request) eq 'HASH') { # the request is an array, not pure XML
SSL_key_file => $keyfile,
SSL_cert_file => $certfile,
SSL_ca_file => $cafile,
SSL_verify_mode => SSL_VERIFY_PEER,
SSL_use_cert => 1,
Timeout => 0,
);
+1 -1
View File
@@ -618,7 +618,7 @@ sub getDBtable
{
# need to get info from DB
my $thistable = xCAT::Table->new($table, -create => 1);
my $thistable = xCAT::Table->new($table, -create => 1, -autocommit => 0);
if (!$thistable)
{
return undef;
+28 -17
View File
@@ -4972,7 +4972,8 @@ sub parse_rsync_input_file_on_MN
$::process_line = 0;
my $destfileisdir;
my $clause=0;
my $addmergescript =0;
my $addappendscript =0;
open(INPUTFILE, "< $input_file") || die "File $input_file does not exist\n";
while (my $line = <INPUTFILE>)
{
@@ -5013,12 +5014,16 @@ sub parse_rsync_input_file_on_MN
# this triggers the running of the appendscript
$::appendscript ="/opt/xcat/share/xcat/scripts/xdcpappend.sh";
}
# add the append script to the sync
my $appscript ="/opt/xcat/share/xcat/scripts/xdcpappend.sh";
my $appendscriptline = "$appscript -> $appscript";
$syncappendscript=1; # syncing the xdcpappend.sh script
&build_append_rsync($appendscriptline,$nodes, $options, $input_file,$rsyncSN, $syncdir,$nodesyncfiledir,$onServiceNode,$syncappendscript);
}
if ($addappendscript == 0) { # only add once
my $appscript ="/opt/xcat/share/xcat/scripts/xdcpappend.sh";
my $appendscriptline = "$appscript -> $appscript";
$syncappendscript=1; # syncing the xdcpappend.sh script
&build_append_rsync($appendscriptline,$nodes, $options, $input_file,$rsyncSN, $syncdir,$nodesyncfiledir,$onServiceNode,$syncappendscript);
$addappendscript=1;
}
} # end APPEND clause
if ($clause =~ /MERGE:/) {
# location of the base merge script
# for MERGE we have to sync the mergescript and the
@@ -5030,12 +5035,16 @@ sub parse_rsync_input_file_on_MN
# this triggers the running of the mergescript
$::mergescript ="/opt/xcat/share/xcat/scripts/xdcpmerge.sh";
}
# add the merge script to the sync
my $mergescript ="/opt/xcat/share/xcat/scripts/xdcpmerge.sh";
my $mergescriptline = "$mergescript -> $mergescript";
$syncmergescript=1; # syncing the xdcpmerge.sh script
&build_merge_rsync($mergescriptline,$nodes, $options, $input_file,$rsyncSN, $syncdir,$nodesyncfiledir,$onServiceNode,$syncmergescript);
}
if ($addmergescript == 0) { # only add once
my $mergescript ="/opt/xcat/share/xcat/scripts/xdcpmerge.sh";
my $mergescriptline = "$mergescript -> $mergescript";
$syncmergescript=1; # syncing the xdcpmerge.sh script
&build_merge_rsync($mergescriptline,$nodes, $options, $input_file,$rsyncSN, $syncdir,$nodesyncfiledir,$onServiceNode,$syncmergescript);
$addmergescript=1;
}
} # end MERGE clause
}
} else { # not processing EXECUTE, EXECUTEALWAYS or APPEND
@@ -5199,6 +5208,7 @@ sub build_append_rsync
push @::appendlines,$line;
}
my $src_file = $1; # append file left of arror
my $orig_src_file = $1; # append file left of arror
# it will be sync'd to $nodesyncfiledir/$append_file
my $dest_file = $nodesyncfiledir;
$dest_file .= $src_file;
@@ -5227,7 +5237,7 @@ sub build_append_rsync
# to pick up files from /var/xcat/syncfiles...
if ($onServiceNode == 1) {
my $newsrcfile = $syncdir; # add SN syndir on front
$newsrcfile .= $src_file;
$newsrcfile .= $orig_src_file;
$src_file=$newsrcfile;
}
# destination file name
@@ -5311,6 +5321,7 @@ sub build_merge_rsync
push @::mergelines,$line;
}
my $src_file = $1; # merge file left of arror
my $orig_src_file = $1;
# it will be sync'd to $nodesyncfiledir/$merge_file
my $dest_file = $nodesyncfiledir;
$dest_file .= $src_file;
@@ -5339,7 +5350,7 @@ sub build_merge_rsync
# to pick up files from /var/xcat/syncfiles...
if ($onServiceNode == 1) {
my $newsrcfile = $syncdir; # add SN syndir on front
$newsrcfile .= $src_file;
$newsrcfile .= $orig_src_file;
$src_file=$newsrcfile;
}
# destination file name
@@ -6029,11 +6040,11 @@ sub run_always_rsync_postscripts
# if on the service node need to add the $syncdir directory
# to the path
if (xCAT::Utils->isServiceNode()) {
my $tmpp=$syncdir . $ps;
$ps=$tmpp;
my $tmps=$syncdir . $ps;
push @args, $tmps;
} else{
push @args, $ps;
}
push @args, $ps;
push (@nodes, @{$$dshparms{'postscripts'}{$ps}});
$out=xCAT::Utils->runxcmd( { command => ['xdsh'],
+16 -185
View File
@@ -15,7 +15,6 @@ use xCAT::PPCcli qw(SUCCESS EXPECT_ERROR RC_ERROR NR_ERROR);
use xCAT::Usage;
use xCAT::NodeRange;
use xCAT::FSPUtils;
use xCAT::VMCommon;
#use Data::Dumper;
use xCAT::MsgUtils qw(verbose_message);
##############################################
@@ -53,7 +52,7 @@ sub chvm_parse_extra_options {
my $args = shift;
my $opt = shift;
# Partition used attributes #
my @support_ops = qw(vmcpus vmmemory vmphyslots vmothersetting vmstorage vmnics del_vadapter);
my @support_ops = qw(vmcpus vmmemory vmphyslots vmothersetting);
if (ref($args) ne 'ARRAY') {
return "$args";
}
@@ -85,24 +84,6 @@ sub chvm_parse_extra_options {
$opt->{bsr} = $1;
}
next;
} elsif ($cmd eq "vmstorage") {
if (exists($opt->{vios})) {
if ($value !~ /\d+/) {
return "'$value' is invalid, must be numbers";
} else {
my @array = ();
for (1..$value) {
push @array, 0;
}
$value = \@array;
}
} else {
if ($value =~ /^([\w_-]*):(\d+)$/) {
$value = ["0,$1:$2"];
} else {
return "'$value' is invalid, must be in form of 'Server_name:slotnum'";
}
}
}
} else {
@@ -143,7 +124,7 @@ sub chvm_parse_args {
$Getopt::Long::ignorecase = 0;
Getopt::Long::Configure( "bundling" );
if ( !GetOptions( \%opt, qw(V|verbose p=s i=s m=s r=s p775 vios) )) {
if ( !GetOptions( \%opt, qw(V|verbose p=s i=s m=s r=s p775) )) {
return( usage() );
}
####################################
@@ -417,7 +398,7 @@ sub mkvm_parse_args {
push @unsupport_ops, $tmpop;
}
}
my @support_ops = qw(vmcpus vmmemory vmphyslots vmothersetting vmnics vmstorage);
my @support_ops = qw(vmcpus vmmemory vmphyslots vmothersetting);
if (defined(@ARGV[0]) and defined($opt{full})) {
return(usage("Option 'full' shall be used alone."));
} elsif (defined(@ARGV[0])) {
@@ -730,45 +711,6 @@ sub do_op_extra_cmds {
$action = "part_set_lpar_pending_proc";
} elsif ($op eq "vmphyslots") {
$action = "set_io_slot_owner_uber";
} elsif ($op eq "del_vadapter") {
$action = "part_clear_vslot_config";
} elsif ($op eq "vmnics") {
my @vlans = split /,/,$param;
foreach (@vlans) {
if (/vlan(\d+)/i) {
my $vlanid = $1;
my $mac = lc(xCAT::VMCommon::genMac($name));
if ($mac =~ /(..):(..):(..):(..):(..):(..)/) {
my $tail = hex($6)+$vlanid;
$mac = sprintf("$1$2$3$4$5%02x",$tail);
}
my $value = xCAT::FSPUtils::fsp_api_action($request,$name, $d, "part_set_veth_slot_config",0,"0,$vlanid,$mac");
if (@$value[1] && ((@$value[1] =~ /Error/i) && (@$value[2] ne '0'))) {
return ([[$name, @$value[1], '1']]) ;
} else {
push @values, [$name, "Success", '0'];
}
}
}
next;
} elsif ($op eq "vmstorage") {
foreach my $v_info (@$param) {
if ($v_info =~ /(\d+),([\w_-]*):(\d+)/) {
my $vios = &find_lpar_id($request, @$d[3], $2);
my $r_slotid = $3;
if (!defined($vios)) {
return ([[$name, "Cannot find lparid for Server lpar:$1", '1']]);
}
$v_info = "$1,$vios,$r_slotid";
}
my $value = xCAT::FSPUtils::fsp_api_action($request,$name, $d, "part_set_vscsi_slot_config",0,$v_info);
if (@$value[1] && ((@$value[1] =~ /Error/i) && (@$value[2] ne '0'))) {
return ([[$name, @$value[1], '1']]) ;
} else {
push @values, [$name, "Success", '0'];
}
}
next;
} elsif ($op eq "vmmemory") {
my @td = @$d;
@td[0] = 0;
@@ -808,9 +750,6 @@ sub do_op_extra_cmds {
$action = "part_set_lpar_pending_mem";
} elsif ($op eq "bsr") {
$action = "set_lpar_bsr";
} elsif ($op eq "vios") {
print __LINE__."=========>op=vios===\n";
next;
} else {
last;
}
@@ -1699,34 +1638,6 @@ sub query_cec_info_actions {
#$data .= "\n";
next;
}
if ($action eq "part_get_all_vio_info") {
my @output = split /\n/, @$values[1];
my ($drc_index,$drc_name);
foreach my $line (@output) {
chomp($line);
if ($line =~ /Index:.*drc_index:([^,]*),\s*drc_name:(.*)$/) {
$drc_index = $1;
$drc_name = $2;
next;
} elsif ($line =~ /\s*lpar_id=(\d+),type=(vSCSI|vSerial),slot=(\d+),attr=(\d+).*remote_lpar_id=(0x\w+),remote_slot_num=(0x\w+)/) {
if ($4 eq '0') {
push @array, [$name, "$1,$3,$drc_name,$drc_index,$2 Client(Server_lparid=$5,Server_slotid=$6)", 0];
} else {
push @array, [$name, "$1,$3,$drc_name,$drc_index,$2 Server", 0];
}
} elsif ($line =~ /\s*lpar_id=(\d+),type=(vEth),slot=(\d+).*port_vlan_id=(\d+),mac_addr=(\w+)/) {
push @array, [$name, "$1,$3,$drc_name,$drc_index,$2 (port_vlanid=$4,mac_addr=$5)", 0];
#} elsif ($line =~ /\s*lpar_id=(\d+),type=(\w+),slot=(\d+)/) {
# push @array, [$name, "$1,$3,$drc_name,$drc_index,$2", 0];
#} else {
#print "=====>line:$line\n";
#push @array, [$name, $line, 0];
}
$drc_index = '';
$drc_name = '';
}
next;
}
}
#$data .= "@$values[1]\n\n";
push @array, [$name, @$values[1], @$values[2]];
@@ -1749,16 +1660,14 @@ sub query_cec_info {
my $args = $request->{opt};
my @td = ();
my @result = ();
#print Dumper($request);
#print Dumper($hash);
while (my ($mtms,$h) = each(%$hash) ) {
while (my ($name, $d) = each (%$h)) {
@td = @$d;
if (@$d[0] == 0 && @$d[4] !~ /lpar|vios/) {
if (@$d[0] == 0 && @$d[4] ne "lpar") {
last;
}
#my $rethash = query_cec_info_actions($request, $name, $d, 0, ["part_get_lpar_processing","part_get_lpar_memory","part_get_all_vio_info","lpar_lhea_mac","part_get_all_io_bus_info","get_huge_page","get_cec_bsr"]);
my $rethash = query_cec_info_actions($request, $name, $d, 0, ["part_get_lpar_processing","part_get_lpar_memory","part_get_all_io_bus_info","part_get_all_vio_info","get_huge_page","get_cec_bsr"]);
my $rethash = query_cec_info_actions($request, $name, $d, 0, ["part_get_lpar_processing","part_get_lpar_memory","part_get_all_io_bus_info","get_huge_page","get_cec_bsr"]);
#push @result, [$name, $rethash, 0];
push @result, @$rethash;
}
@@ -1857,7 +1766,7 @@ sub deal_with_avail_mem {
} else {
$cur_avail = $lparhash->{hyp_avail_mem} + $used_regions - $tmphash{lpar0_used_mem};
}
#xCAT::MsgUtils->verbose_message($request, "====****====used:$used_regions,avail:$cur_avail,($min:$cur:$max).");
xCAT::MsgUtils->verbose_message($request, "====****====used:$used_regions,avail:$cur_avail,($min:$cur:$max).");
if ($cur_avail < $min) {
return([$name, "Parse reserverd regions failed, no enough memory, available:$lparhash->{hyp_avail_mem}.", 1]);
}
@@ -1872,17 +1781,6 @@ sub deal_with_avail_mem {
return 0;
}
sub find_lpar_id {
my $request = shift;
my $parent = shift;
my $name = shift;
my %mapping = %{$request->{ppc}->{$parent}->{mapping}};
if (exists($mapping{$name})) {
return $mapping{$name};
}
return undef;
}
sub create_lpar {
my $request = shift;
my $name = shift;
@@ -1906,42 +1804,12 @@ sub create_lpar {
xCAT::FSPUtils::fsp_api_action($request, $name, $d, "part_set_lpar_group_id");
xCAT::FSPUtils::fsp_api_action($request, $name, $d, "part_set_lpar_avail_priority");
#print "======>physlots:$lparhash->{physlots}.\n";
if (exists($lparhash->{physlots})) {
$values = xCAT::FSPUtils::fsp_api_action($request, $name, $d, "set_io_slot_owner_uber", 0, $lparhash->{physlots});
#$values = xCAT::FSPUtils::fsp_api_action($request, $name, $d, "set_io_slot_owner", 0, join(",",@phy_io_array));
if (@$values[2] ne 0) {
&set_lpar_undefined($request, $name, $d);
return ([$name, @$values[1], @$values[2]]);
}
$values = xCAT::FSPUtils::fsp_api_action($request, $name, $d, "set_io_slot_owner_uber", 0, $lparhash->{physlots});
#$values = xCAT::FSPUtils::fsp_api_action($request, $name, $d, "set_io_slot_owner", 0, join(",",@phy_io_array));
if (@$values[2] ne 0) {
&set_lpar_undefined($request, $name, $d);
return ([$name, @$values[1], @$values[2]]);
}
if (exists($lparhash->{nics})) {
my @vlans = split /,/,$lparhash->{nics};
foreach (@vlans) {
if (/vlan(\d+)/i) {
my $vlanid = $1;
my $mac = lc(xCAT::VMCommon::genMac($name));
if ($mac =~ /(..):(..):(..):(..):(..):(..)/) {
my $tail = hex($6)+$vlanid;
$mac = sprintf("$1$2$3$4$5%02x",$tail);
}
$values = xCAT::FSPUtils::fsp_api_action($request,$name, $d, "part_set_veth_slot_config",0,"0,$vlanid,$mac");
if (@$values[2] ne 0) {
&set_lpar_undefined($request, $name, $d);
return ([$name, @$values[1], @$values[2]]);
}
}
}
}
if (exists($lparhash->{storage})) {
foreach my $v_info (@{$lparhash->{storage}}) {
$values = xCAT::FSPUtils::fsp_api_action($request,$name, $d, "part_set_vscsi_slot_config",0,$v_info);
if (@$values[2] ne 0) {
&set_lpar_undefined($request, $name, $d);
return ([$name, @$values[1], @$values[2]]);
}
}
}
# ====== ====== #
if (exists($lparhash->{phy_hea})) {
my $phy_hash = $lparhash->{phy_hea};
foreach my $phy_drc (keys %$phy_hash) {
@@ -1982,7 +1850,6 @@ sub create_lpar {
&set_lpar_undefined($request, $name, $d);
return ([$name, @$values[1], @$values[2]]);
}
xCAT::FSPUtils::fsp_api_action($request, $name, $d, "part_set_lpar_comp_modes");
#print "======>memory:$lparhash->{huge_page}.\n";
xCAT::FSPUtils::fsp_api_action($request, $name, $d, "set_huge_page", 0, $lparhash->{huge_page});
@@ -1999,6 +1866,7 @@ sub create_lpar {
}
return ([$name, "Done", 0]);
}
sub mkspeclpar {
my $request = shift;
my $hash = shift;
@@ -2012,7 +1880,7 @@ sub mkspeclpar {
while (my ($mtms, $h) = each (%$hash)) {
my $memhash;
my @nodes = keys(%$h);
my $ent = $vmtab->getNodesAttribs(\@nodes, ['cpus', 'memory','physlots', 'othersettings', 'storage', 'nics']);
my $ent = $vmtab->getNodesAttribs(\@nodes, ['cpus', 'memory','physlots', 'othersettings']);
while (my ($name, $d) = each (%$h)) {
if (@$d[4] ne 'lpar') {
push @result, [$name, "Node must be LPAR", 1];
@@ -2021,7 +1889,7 @@ sub mkspeclpar {
if (!exists($memhash->{run})) {
my @td = @$d;
@td[0] = 0;
$memhash = &query_cec_info_actions($request, $name, \@td, 1, ["part_get_hyp_process_and_mem","lpar_lhea_mac","part_get_all_io_bus_info"]);
$memhash = &query_cec_info_actions($request, $name, \@td, 1, ["part_get_hyp_process_and_mem","lpar_lhea_mac"]);
$memhash->{run} = 1;
}
my $tmp_ent = $ent->{$name}->[0];
@@ -2034,50 +1902,13 @@ sub mkspeclpar {
if (exists($opt->{vmphyslots})) {
$tmp_ent->{physlots} = $opt->{vmphyslots};
}
if (exists($opt->{vmothersetting})) {
$tmp_ent->{othersettings} = $opt->{vmothersetting};
}
if (exists($opt->{vmstorage})) {
$tmp_ent->{storage} = $opt->{vmstorage};
}
if (exists($opt->{vmnics})) {
$tmp_ent->{nics} = $opt->{vmnics};
}
if (exists($opt->{vios})) {
if (!exists($tmp_ent->{physlots})) {
my @phy_io_array = keys(%{$memhash->{bus}});
$tmp_ent->{physlots} = join(",", @phy_io_array);
}
if (exists($tmp_ent->{storage}) and $tmp_ent->{storage} !~ /^\d+$/) {
return ([[$name, "Parameter for 'vmstorage' is invalid", 1]]);
} elsif (exists($tmp_ent->{storage})) {
my $num = $tmp_ent->{storage};
my @array = ();
for (1..$num) {
push @array, '0';
}
$tmp_ent->{storage} = \@array;
}
} else {
if (exists($tmp_ent->{storage}) and $tmp_ent->{storage} !~ /^[\w_-]*:\d+$/) {
return ([[$name, "Parameter for 'vmstorage' is invalid", 1]]);
} elsif (exists($tmp_ent->{storage})) {
if ($tmp_ent->{storage} =~ /([\w_-]*):(\d+)/) {
my $vios = &find_lpar_id($request, @$d[3], $1);
my $r_slotid = $2;
if (!defined($vios)) {
return ([[$name, "Cannot find lparid for Server lpar:$1"]]);
}
$tmp_ent->{storage} = ["0,$vios,$r_slotid"];
}
}
}
if (!defined($tmp_ent) ) {
return ([[$name, "Not find params", 1]]);
#} elsif (!exists($tmp_ent->{cpus}) || !exists($tmp_ent->{memory}) || !exists($tmp_ent->{physlots})) {
} elsif (!exists($tmp_ent->{cpus}) || !exists($tmp_ent->{memory})) {
return ([[$name, "The attribute 'vmcpus', 'vmmemory' are needed to be specified.", 1]]);
} elsif (!exists($tmp_ent->{cpus}) || !exists($tmp_ent->{memory}) || !exists($tmp_ent->{physlots})) {
return ([[$name, "The attribute 'vmcpus', 'vmmemory' and 'vmphyslots' are all needed to be specified.", 1]]);
}
if ($tmp_ent->{memory} =~ /(\d+)([G|M]?)\/(\d+)([G|M]?)\/(\d+)([G|M]?)/i) {
my $memsize = $memhash->{mem_region_size};
-2
View File
@@ -56,7 +56,6 @@ $::STATUS_SHELL="shell";
$::STATUS_DEFINED="defined";
$::STATUS_UNKNOWN="unknown";
$::STATUS_FAILED="failed";
$::STATUS_BMCREADY="bmcready";
%::VALID_STATUS_VALUES = (
$::STATUS_ACTIVE=>1,
$::STATUS_INACTIVE=>1,
@@ -73,7 +72,6 @@ $::STATUS_BMCREADY="bmcready";
$::STATUS_DEFINED=>1,
$::STATUS_UNKNOWN=>1,
$::STATUS_FAILED=>1,
$::STATUS_BMCREADY=>1,
$::STATUS_SYNCING=>1,
$::STATUS_OUT_OF_SYNC=>1,
-10
View File
@@ -1353,16 +1353,6 @@ sub dolitesetup
return 1;
}
# also copy $instrootloc/.statelite contents
$ccmd = "/usr/bin/cp -p -r $instrootloc/.statelite $SRloc";
$out = xCAT::Utils->runcmd("$ccmd", -1);
if ($::RUNCMD_RC != 0)
{
my $rsp;
push @{$rsp->{data}}, "Could not copy $instrootloc/.statelite to $SRloc.";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
}
}
+9 -51
View File
@@ -827,7 +827,7 @@ sub get_mac_addr {
$done[0] = 0;
$cmd[0] = "\" local-mac-address\" ". $phandle . " get-package-property\r";
$msg[0] = "Status: return code and mac-address now on stack\n";
$pattern[0] = "local-mac-address.*ok";#"\s*3 >";
$pattern[0] = "ok";#"\s*3 >";
$newstate[0] = 1;
# cmd(1) is a dot (.). This is a stack manipulation command that removes one
@@ -1231,8 +1231,8 @@ sub ping_server{
$done[2] = 0;
$cmd[2] = "dev /packages/net\r";
$msg[2] = "Status: selected the /packages/net node as the active package\n";
$pattern[2] = ".*dev.*packages.*net(.*)ok(.*)0 >(.*)";
#$pattern[2] = "ok";
#$pattern[2] = ".*dev(.*)ok(.*)0 >(.*)";
$pattern[2] = "ok";
$newstate[2]= 3;
# state 3, ping the server
@@ -1266,7 +1266,6 @@ sub ping_server{
# state 5, all done
$done[5] = 1;
# for ping, only need to set speed and duplex for ethernet adapters
#
if ( $list_type eq "ent" ) {
@@ -1324,10 +1323,8 @@ sub ping_server{
$timeout = 300;
while ( $done[$state] eq 0 ) {
send_command($verbose, $rconsole, $cmd[$state]);
@result = $rconsole->expect(
$timeout,
[qr/$pattern[$state]/s=>
sub {
@@ -1365,9 +1362,7 @@ sub ping_server{
}
],
);
return 1 if ($rc eq 1);
return 1 if ($rc eq 1);
if ( $state eq 1 ) {
$adap_conn = $adap_conn_list[$j];
$cmd[1] = "\" ethernet,$adap_speed,$adap_conn,$adap_duplex\" encode-string \" chosen-network-type\" property\r";
@@ -2055,46 +2050,14 @@ sub multiple_open_dev {
; \r";
send_command($verbose, $rconsole, $command);
$timeout = 30;
$rconsole->expect(
$timeout,
[qr/new-open-dev(.*)ok/=>
#[qr/>/=>
sub {
nc_msg($verbose, "Status: at End of multiple_open_dev \n");
$rconsole->clear_accum();
}
],
[qr/]/=>
sub {
nc_msg($verbose, "Unexpected prompt\n");
$rconsole->clear_accum();
$rc = 1;
}
],
[timeout =>
sub {
send_user(2, "Timeout\n");
$rconsole->clear_accum();
$rc = 1;
}
],
[eof =>
sub {
send_user(2, "Cannot connect to $node\n");
$rconsole->clear_accum();
$rc = 1;
}
],
);
$command = "patch new-open-dev open-dev net-ping \r";
send_command($verbose, $rconsole, $command);
$timeout = 30;
$rconsole->expect(
$timeout,
[qr/patch new-open-dev(.*)ok/=>
#[qr/>/=>
#[qr/patch new-open-dev(.*)>/=>
[qr/>/=>
sub {
nc_msg($verbose, "Status: at End of multiple_open_dev \n");
$rconsole->clear_accum();
@@ -2123,7 +2086,6 @@ sub multiple_open_dev {
}
],
);
return $rc;
}
###################################################################
@@ -2607,7 +2569,7 @@ sub lparnetbootexp
####################################
nc_msg($verbose, "Connecting to the $node.\n");
sleep 3;
$timeout = 10;
$timeout = 2;
$rconsole->expect(
$timeout,
[ qr/Enter.* for help.*/i =>
@@ -2816,8 +2778,6 @@ sub lparnetbootexp
$done = 0;
$retry_count = 0;
$timeout = 10;
while (!$done) {
my @result = $rconsole->expect(
$timeout,
@@ -2925,7 +2885,6 @@ sub lparnetbootexp
}
}
##############################
# Call multiple_open_dev to
# circumvent firmware OPEN-DEV
@@ -2960,7 +2919,6 @@ sub lparnetbootexp
$match_pat = ".*";
}
if($colon) {
nc_msg($verbose, "#Type:Location_Code:MAC_Address:Full_Path_Name:Ping_Result:Device_Type:Size_MB:OS:OS_Version:\n");
$outputarrayindex++; # start from 1, 0 is used to set as 0
@@ -3014,7 +2972,7 @@ sub lparnetbootexp
} else {
for( $i = 0; $i < $adapter_found; $i++) {
if ($adap_type[$i] =~ /$match_pat/) {
if (!($adap_type[$i] eq "hfi-ent")) {
if ($adap_type[$i] eq "hfi-ent") {
$mac_address = get_mac_addr($phandle_array[$i], $rconsole, $node, $verbose);
$loc_code = get_adaptr_loc($phandle_array[$i], $rconsole, $node, $verbose);
}
+1 -1
View File
@@ -149,7 +149,7 @@ sub nodesbycriteria {
}
if ($neednewcache) {
if ($nodelist) {
#$nodelist->_clear_cache();
$nodelist->_clear_cache();
$nodelist->_build_cache(\@cachedcolumns);
}
}
+1 -71
View File
@@ -265,7 +265,6 @@ sub rackformat_to_numricformat{
values are attributes of a specific nic, like:
type : nic type
hostnamesuffix: hostname suffix
hostnameprefix: hostname prefix
customscript: custom script for this nic
network: network name for this nic
ip: ip address of this nic.
@@ -277,7 +276,7 @@ sub get_nodes_nic_attrs{
my $nodes = shift;
my $nicstab = xCAT::Table->new( 'nics');
my $entry = $nicstab->getNodesAttribs($nodes, ['nictypes', 'nichostnamesuffixes', 'nichostnameprefixes', 'niccustomscripts', 'nicnetworks', 'nicips']);
my $entry = $nicstab->getNodesAttribs($nodes, ['nictypes', 'nichostnamesuffixes', 'niccustomscripts', 'nicnetworks', 'nicips']);
my %nicsattrs;
my @nicattrslist;
@@ -309,20 +308,6 @@ sub get_nodes_nic_attrs{
}
}
if($entry->{$node}->[0]->{'nichostnameprefixes'}){
@nicattrslist = split(",", $entry->{$node}->[0]->{'nichostnameprefixes'});
foreach (@nicattrslist){
my @nicattrs;
if ($_ =~ /!/) {
@nicattrs = split("!", $_);
} else {
@nicattrs = split(":", $_);
}
$nicsattrs{$node}{$nicattrs[0]}{'hostnameprefix'} = $nicattrs[1];
}
}
if($entry->{$node}->[0]->{'niccustomscripts'}){
@nicattrslist = split(",", $entry->{$node}->[0]->{'niccustomscripts'});
foreach (@nicattrslist){
@@ -748,27 +733,6 @@ sub get_imageprofile_prov_method
#-------------------------------------------------------------------------------
=head3 get_imageprofile_prov_osvers
Description : Get A node's provisioning os version and profile from its imageprofile attribute.
Arguments : $imgprofilename - imageprofile name
Returns : node's osversion and profile
=cut
#-------------------------------------------------------------------------------
sub get_imageprofile_prov_osvers
{
my $class = shift;
my $imgprofilename = shift;
my $osimgtab = xCAT::Table->new('osimage');
my $osimgentry = ($osimgtab->getAllAttribsWhere("imagename = '$imgprofilename'", 'ALL' ))[0];
my $osversion = $osimgentry->{'osvers'};
my $profile = $osimgentry->{'profile'};
return ($osversion, $profile);
}
#-------------------------------------------------------------------------------
=head3 check_profile_consistent
Description : Check if three profile consistent
Arguments : $imageprofile - image profile name
@@ -1036,40 +1000,6 @@ sub parse_nodeinfo_file
return 1, "";
}
#-------------------------------------------------------
=head3 update the table prodkey, in order to support windows
per node license key
Returns: $retcode.
$retcode = 1. update failed, the value is undef
$retcode = 0. save into db is OK..
=cut
#-------------------------------------------------------
sub update_windows_prodkey
{
my $class = shift;
my $node = shift;
my $product = shift;
my $key = shift;
unless(defined($node) && defined($product) && defined($key))
{
return 1;
}
# please notice this db usage
my %keyhash;
my %updates;
$keyhash{'node'} = $node;
$updates{'product'} = $product;
$updates{'key'} = $key;
my $tab = xCAT::Table->new('prodkey', -create=>1, -autocommit=>0);
$tab->setAttribs( \%keyhash,\%updates );
$tab->commit;
$tab->close;
return 0;
}
#-------------------------------------------------------------------------------
=head3 check_nicips
Description: Check if the nicips defined in MAC file is correct
Executable → Regular
+428 -589
View File
File diff suppressed because it is too large Load Diff
Executable → Regular
+112 -276
View File
@@ -134,7 +134,7 @@ litetree => {
table_desc => 'Directory hierarchy to traverse to get the initial contents of node files. The files that are specified in the litefile table are searched for in the directories specified in this table.',
descriptions => {
priority => 'This number controls what order the directories are searched. Directories are searched from smallest priority number to largest.',
image => "The name of the image (as specified in the osimage table) that will use this directory. You can also specify an image group name that is listed in the osimage.groups attribute of some osimages. 'ALL' means use this row for all images.",
image => "The name of the image that will use this directory, as specified in the osimage table. If image is not supplied, the default is 'ALL'. 'ALL' means use it for all images.",
directory => 'The location (hostname:path) of a directory that contains files specified in the litefile table. Variables are allowed. E.g: $noderes.nfsserver://xcatmasternode/install/$node/#CMD=uname-r#/',
mntopts => "A comma-separated list of options to use when mounting the litetree directory. (Ex. 'soft') The default is to do a 'hard' mount.",
comments => 'Any user-written notes.',
@@ -148,7 +148,7 @@ litefile => {
required => [qw(image file)], # default type is rw nfsroot
table_desc => 'The litefile table specifies the directories and files on the statelite nodes that should be readwrite, persistent, or readonly overlay. All other files in the statelite nodes come from the readonly statelite image.',
descriptions => {
image => "The name of the image (as specified in the osimage table) that will use these options on this dir/file. You can also specify an image group name that is listed in the osimage.groups attribute of some osimages. 'ALL' means use this row for all images.",
image => "The name of the image that will use these files, as specified in the osimage table. 'ALL' means use it for all images.",
file => "The full pathname of the file. e.g: /etc/hosts. If the path is a directory, then it should be terminated with a '/'. ",
options => "Options for the file:\n\n".
qq{ tmpfs - It is the default option if you leave the options column blank. It provides a file or directory for the node to use when booting, its permission will be the same as the original version on the server. In most cases, it is read-write; however, on the next statelite boot, the original version of the file or directory on the server will be used, it means it is non-persistent. This option can be performed on files and directories..\n\n}.
@@ -200,7 +200,7 @@ vm => {
'mgr' => 'The function manager for the virtual machine',
'host' => 'The system that currently hosts the VM',
'migrationdest' => 'A noderange representing candidate destinations for migration (i.e. similar systems, same SAN, or other criteria that xCAT can use',
'storage' => 'A list of storage files or devices to be used. i.e. dir:///cluster/vm/<nodename> or nfs://<server>/path/to/folder/',
'storage' => 'A list of storage files or devices to be used. i.e. /cluster/vm/<nodename> or nfs://<server>/path/to/folder/',
'storagemodel' => 'Model of storage devices to provide to guest',
'cfgstore' => 'Optional location for persistant storage separate of emulated hard drives for virtualization solutions that require persistant store to place configuration data',
'memory' => 'Megabytes of memory the VM currently should be set to.',
@@ -538,8 +538,8 @@ nodehm => {
table_desc => "Settings that control how each node's hardware is managed. Typically, an additional table that is specific to the hardware type of the node contains additional info. E.g. the ipmi, mp, and ppc tables.",
descriptions => {
node => 'The node name or group name.',
power => 'The method to use to control the power of the node. If not set, the mgt attribute will be used. Valid values: ipmi, blade, hmc, ivm, fsp, kvm, esx, rhevm. If "ipmi", xCAT will search for this node in the ipmi table for more info. If "blade", xCAT will search for this node in the mp table. If "hmc", "ivm", or "fsp", xCAT will search for this node in the ppc table.',
mgt => 'The method to use to do general hardware management of the node. This attribute is used as the default if power or getmac is not set. Valid values: ipmi, blade, hmc, ivm, fsp, bpa, kvm, esx, rhevm. See the power attribute for more details.',
power => 'The method to use to control the power of the node. If not set, the mgt attribute will be used. Valid values: ipmi, blade, hmc, ivm, fsp. If "ipmi", xCAT will search for this node in the ipmi table for more info. If "blade", xCAT will search for this node in the mp table. If "hmc", "ivm", or "fsp", xCAT will search for this node in the ppc table.',
mgt => 'The method to use to do general hardware management of the node. This attribute is used as the default if power or getmac is not set. Valid values: ipmi, blade, hmc, ivm, fsp, bpa. See the power attribute for more details.',
cons => 'The console method. If nodehm.serialport is set, this will default to the nodehm.mgt setting, otherwise it defaults to unused. Valid values: cyclades, mrv, or the values valid for the mgt attribute.',
termserver => 'The hostname of the terminal server.',
termport => 'The port number on the terminal server that this node is connected to.',
@@ -554,7 +554,7 @@ nodehm => {
},
},
nodelist => {
cols => [qw(node groups status statustime appstatus appstatustime primarysn hidden updatestatus updatestatustime zonename comments disable)],
cols => [qw(node groups status statustime appstatus appstatustime primarysn hidden updatestatus updatestatustime comments disable)],
keys => [qw(node)],
tablespace =>'XCATTBS32K',
table_desc => "The list of all the nodes in the cluster, including each node's current status and what groups it is in.",
@@ -569,7 +569,6 @@ nodelist => {
hidden => "Used to hide fsp and bpa definitions, 1 means not show them when running lsdef and nodels",
updatestatus => "The current node update status. Valid states are synced out-of-sync,syncing,failed.",
updatestatustime => "The date and time when the updatestatus was updated.",
zonename => "The name of the zone to which the node is currently assigned. If undefined, then it is not assigned to any zone. ",
comments => 'Any user-written notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
},
@@ -592,7 +591,7 @@ nodepos => {
},
},
noderes => {
cols => [qw(node servicenode netboot tftpserver tftpdir nfsserver monserver nfsdir installnic primarynic discoverynics cmdinterface xcatmaster current_osimage next_osimage nimserver routenames nameservers proxydhcp comments disable)],
cols => [qw(node servicenode netboot tftpserver tftpdir nfsserver monserver nfsdir installnic primarynic discoverynics cmdinterface xcatmaster current_osimage next_osimage nimserver routenames nameservers comments disable)],
keys => [qw(node)],
tablespace =>'XCATTBS16K',
table_desc => 'Resources and settings to use when installing nodes.',
@@ -615,7 +614,6 @@ noderes => {
nimserver => 'Not used for now. The NIM server for this node (as known by this node).',
routenames => 'A comma separated list of route names that refer to rows in the routes table. These are the routes that should be defined on this node when it is deployed.',
nameservers => 'An optional node/group specific override for name server list. Most people want to stick to site or network defined nameserver configuration.',
proxydhcp => 'To specify whether the node supports proxydhcp protocol. Valid values: yes or 1, no or 0. Default value is yes.',
comments => 'Any user-written notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
},
@@ -733,21 +731,6 @@ linuximage => {
disable => "Set to 'yes' or '1' to comment out this row.",
},
},
winimage => {
cols => [qw(imagename template installto partitionfile winpepath comments disable)],
keys => [qw(imagename)],
tablespace =>'XCATTBS32K',
table_desc => 'Information about a Windows operating system image that can be used to deploy cluster nodes.',
descriptions => {
imagename => 'The name of this xCAT OS image definition.',
template => 'The fully qualified name of the template file that is used to create the windows unattend.xml file for diskful installation.',
installto => 'The disk and partition that the Windows will be deployed to. The valid format is <disk>:<partition>. If not set, default value is 0:1 for bios boot mode(legacy) and 0:3 for uefi boot mode; If setting to 1, it means 1:1 for bios boot and 1:3 for uefi boot',
partitionfile => 'The path of partition configuration file. Since the partition configuration for bios boot mode and uefi boot mode are different, this configuration file should include two parts if customer wants to support both bios and uefi mode. If customer just wants to support one of the modes, specify one of them anyway. Example of partition configuration file: [BIOS]xxxxxxx[UEFI]yyyyyyy. To simplify the setting, you also can set installto in partitionfile with section likes [INSTALLTO]0:1',
winpepath => 'The path of winpe which will be used to boot this image. If the real path is /tftpboot/winboot/winpe1/, the value for winpepath should be set to winboot/winpe1',
comments => 'Any user-written notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
}
},
passwd => {
cols => [qw(key username password cryptmethod authdomain comments disable)],
keys => [qw(key username)],
@@ -837,13 +820,13 @@ ppchcp => {
},
},
servicenode => {
cols => [qw(node nameserver dhcpserver tftpserver nfsserver conserver monserver ldapserver ntpserver ftpserver nimserver ipforward dhcpinterfaces proxydhcp comments disable)],
cols => [qw(node nameserver dhcpserver tftpserver nfsserver conserver monserver ldapserver ntpserver ftpserver nimserver ipforward dhcpinterfaces comments disable)],
keys => [qw(node)],
tablespace =>'XCATTBS16K',
table_desc => 'List of all Service Nodes and services that will be set up on the Service Node.',
descriptions => {
node => 'The hostname of the service node as known by the Management Node.',
nameserver => 'Do we set up DNS on this service node? Valid values: 2, 1, no or 0. If 2, creates named.conf as dns slave, using the management node as dns master, and starts named. If 1, creates named.conf file with forwarding to the management node and starts named. If no or 0, it does not change the current state of the service. ',
nameserver => 'Do we set up DNS on this service node? Valid values:yes or 1, no or 0. If yes, creates named.conf file with forwarding to the management node and starts named. If no or 0, it does not change the current state of the service. ',
dhcpserver => 'Do we set up DHCP on this service node? Not supported on AIX. Valid values:yes or 1, no or 0. If yes, runs makedhcp -n. If no or 0, it does not change the current state of the service. ',
tftpserver => 'Do we set up TFTP on this service node? Not supported on AIX. Valid values:yes or 1, no or 0. If yes, configures and starts atftp. If no or 0, it does not change the current state of the service. ',
nfsserver => 'Do we set up file services (HTTP,FTP,or NFS) on this service node? For AIX will only setup NFS, not HTTP or FTP. Valid values:yes or 1, no or 0.If no or 0, it does not change the current state of the service. ',
@@ -855,7 +838,6 @@ servicenode => {
nimserver => 'Not used. Do we set up a NIM server on this service node? Valid values:yes or 1, no or 0. If no or 0, it does not change the current state of the service.',
ipforward => 'Do we set up ip forwarding on this service node? Valid values:yes or 1, no or 0. If no or 0, it does not change the current state of the service.',
dhcpinterfaces => 'The network interfaces DHCP server should listen on for the target node. This attribute can be used for management node and service nodes. If defined, it will override the values defined in site.dhcpinterfaces. This is a comma separated list of device names. !remote! indicates a non-local network for relay DHCP. For example: !remote!,eth0,eth1',
proxydhcp => 'Do we set up proxydhcp service on this node? valid values: yes or 1, no or 0. If yes, the proxydhcp daemon will be enabled on this node.',
comments => 'Any user-written notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
@@ -864,44 +846,35 @@ servicenode => {
site => {
cols => [qw(key value comments disable)],
keys => [qw(key)],
table_desc => "Global settings for the whole cluster. This table is different from the \nother tables in that each attribute is just named in the key column, rather \nthan having a separate column for each attribute. The following is a list of \nattributes currently used by xCAT organized into categories.\n",
table_desc => "Global settings for the whole cluster. This table is different from the \nother tables in that each attribute is just named in the key column, rather \nthan having a separate column for each attribute. The following is a list of \nthe attributes currently used by xCAT.\n",
descriptions => {
# Do not put description text past column 88, so it displays well in a 100 char wide window.
# ----------------------------------------------------------------------------------|----------
key => "Attribute Name: Description\n\n".
" ------------\n".
"AIX ATTRIBUTES\n".
" ------------\n".
" nimprime : The name of NIM server, if not set default is the AIX MN.
If Linux MN, then must be set for support of mixed cluster (TBD).\n\n".
" useSSHonAIX: (yes/1 or no/0). Default is yes. The support for rsh/rcp is deprecated.\n".
" useNFSv4onAIX: (yes/1 or no/0). If yes, NFSv4 will be used with NIM. If no,\n".
" NFSv3 will be used with NIM. Default is no.\n\n".
" -----------------\n".
"DATABASE ATTRIBUTES\n".
" -----------------\n".
" auditskipcmds: List of commands and/or client types that will not be\n".
" written to the auditlog table.\n".
" auditskipcmds: List of commands and/or client types that will not be written to the auditlog table.\n".
" 'ALL' means all cmds will be skipped. If attribute is null, all\n".
" commands will be written.\n".
" clienttype:web would skip all commands from the web client\n".
" For example: tabdump,nodels,clienttype:web \n".
" will not log tabdump,nodels and any web client commands.\n\n".
" blademaxp: The maximum number of concurrent processes for blade hardware control.\n\n".
" cleanupxcatpost: (yes/1 or no/0). Set to 'yes' or '1' to clean up the /xcatpost\n".
" directory on the stateless and statelite nodes after the\n".
" postscripts are run. Default is no.\n\n".
" consoleondemand: When set to 'yes', conserver connects and creates the console\n".
" output only when the user opens the console. Default is no on\n".
" Linux, yes on AIX.\n\n".
" databaseloc: Directory where we create the db instance directory.\n".
" Default is /var/lib. Only DB2 is currently supported.\n".
" Do not use the directory in the site.installloc or\n".
" installdir attribute. This attribute must not be changed\n".
" once db2sqlsetup script has been run and DB2 has been setup.\n\n".
" excludenodes: A set of comma separated nodes and/or groups that would automatically\n".
" be subtracted from any noderange, it can be used for excluding some\n".
" failed nodes for any xCAT commands. See the 'noderange' manpage for\n".
" details on supported formats.\n\n".
" nodestatus: If set to 'n', the nodelist.status column will not be updated during\n".
" the node deployment, node discovery and power operations. The default is to update.\n\n".
" skiptables: Comma separated list of tables to be skipped by dumpxCATdb\n\n".
" -------------\n".
"DHCP ATTRIBUTES\n".
" -------------\n".
" db2installloc: The location which the service nodes should mount for\n".
" the db2 code to install. Format is hostname:/path. If hostname is\n".
" omitted, it defaults to the management node. Default is /mntdb2.\n\n".
" defserialflow: The default serial flow - currently only used by the mknb command.\n\n".
" defserialport: The default serial port - currently only used by mknb.\n\n".
" defserialspeed: The default serial speed - currently only used by mknb.\n\n".
" dhcpinterfaces: The network interfaces DHCP should listen on. If it is the same\n".
" for all nodes, use a simple comma-separated list of NICs. To\n".
" specify different NICs for different nodes:\n".
@@ -914,19 +887,59 @@ site => {
" disjointdhcps: If set to '1', the .leases file on a service node only contains\n".
" the nodes it manages. The default value is '0'.\n".
" '0' value means include all the nodes in the subnet.\n\n".
" pruneservices: Whether to enable service pruning when noderm is run (i.e.\n".
" removing DHCP entries when noderm is executed)\n\n".
" ------------\n".
"DNS ATTRIBUTES\n".
" ------------\n".
" dnshandler: Name of plugin that handles DNS setup for makedns.\n".
" domain: The DNS domain name used for the cluster.\n\n".
" ea_primary_hmc: The hostname of the HMC that the Integrated Switch Network\n".
" Management Event Analysis should send hardware serviceable\n".
" events to for processing and potentially sending to IBM.\n\n".
" ea_backup_hmc: The hostname of the HMC that the Integrated Switch Network\n".
" Management Event Analysis should send hardware serviceable\n".
" events to if the primary HMC is down.\n\n".
" enableASMI: (yes/1 or no/0). If yes, ASMI method will be used after fsp-api. If no,\n".
" when fsp-api is used, ASMI method will not be used. Default is no.\n\n".
" excludenodes: A set of comma separated nodes and/or groups that would automatically\n".
" be subtracted from any noderange, it can be used for excluding some\n".
" failed nodes for any xCAT commands. See the 'noderange' manpage for\n".
" details on supported formats.\n\n".
" forwarders: The DNS servers at your site that can provide names outside of the\n".
" cluster. The makedns command will configure the DNS on the management\n".
" node to forward requests it does not know to these servers.\n".
" Note that the DNS servers on the service nodes will ignore this value\n".
" and always be configured to forward requests to the management node.\n\n".
" fsptimeout: The timeout, in milliseconds, to use when communicating with FSPs.\n\n".
" genmacprefix: When generating mac addresses automatically, use this manufacturing\n".
" prefix (e.g. 00:11:aa)\n\n".
" genpasswords: Automatically generate random passwords for BMCs when configuring\n".
" them.\n\n".
" httpport: The port number that the booting/installing nodes should contact the\n".
" http server on the MN/SN on. It is your responsibility to configure\n".
" the http server to listen on that port - xCAT will not do that.\n\n".
" installdir: The local directory name used to hold the node deployment packages.\n\n".
" installloc: The location from which the service nodes should mount the \n".
" deployment packages in the format hostname:/path. If hostname is\n".
" omitted, it defaults to the management node. The path must\n".
" match the path in the installdir attribute.\n\n".
" ipmidispatch: Whether or not to send ipmi hw control operations to the service\n".
" node of the target compute nodes. Default is 'y'.\n\n".
" hwctrldispatch: Whether or not to send hw control operations to the service\n".
" node of the target nodes. Default is 'y'.(At present, this attribute\n".
" is only used for IBM Flex System)\n\n".
" ipmimaxp: The max # of processes for ipmi hw ctrl. The default is 64. Currently,\n".
" this is only used for HP hw control.\n\n".
" ipmiretries: The # of retries to use when communicating with BMCs. Default is 3.\n\n".
" ipmisdrcache: If set to 'no', then the xCAT IPMI support will not cache locally\n".
" the target node's SDR cache to improve performance.\n\n".
" ipmitimeout: The timeout to use when communicating with BMCs. Default is 2.\n".
" This attribute is currently not used.\n\n".
" iscsidir: The path to put the iscsi disks in on the mgmt node.\n\n".
" master: The hostname of the xCAT management node, as known by the nodes.\n\n".
" maxssh: The max # of SSH connections at any one time to the hw ctrl point for PPC\n".
" This parameter doesn't take effect on the rpower command.\n".
" It takes effects on other PPC hardware control command\n".
" getmacs/rnetboot/rbootseq and so on. Default is 8.\n\n".
" mnroutenames: The name of the routes to be setup on the management node.\n".
" It is a comma separated list of route names that are defined in the\n".
" routes table.\n\n".
" nameservers: A comma delimited list of DNS servers that each node in the cluster\n".
" should use. This value will end up in the nameserver settings of the\n".
" /etc/resolv.conf on each node. It is common (but not required) to set\n".
@@ -936,35 +949,18 @@ site => {
" \"<xcatmaster>\" to mean the DNS server for each node should be the\n".
" node that is managing it (either its service node or the management\n".
" node).\n\n".
" -------------------------\n".
"HARDWARE CONTROL ATTRIBUTES\n".
" -------------------------\n".
" blademaxp: The maximum number of concurrent processes for blade hardware control.\n\n".
" ea_primary_hmc: The hostname of the HMC that the Integrated Switch Network\n".
" Management Event Analysis should send hardware serviceable\n".
" events to for processing and potentially sending to IBM.\n\n".
" ea_backup_hmc: The hostname of the HMC that the Integrated Switch Network\n".
" Management Event Analysis should send hardware serviceable\n".
" events to if the primary HMC is down.\n\n".
" enableASMI: (yes/1 or no/0). If yes, ASMI method will be used after fsp-api. If no,\n".
" when fsp-api is used, ASMI method will not be used. Default is no.\n\n".
" fsptimeout: The timeout, in milliseconds, to use when communicating with FSPs.\n\n".
" hwctrldispatch: Whether or not to send hw control operations to the service\n".
" node of the target nodes. Default is 'y'.(At present, this attribute\n".
" is only used for IBM Flex System)\n\n".
" ipmidispatch: Whether or not to send ipmi hw control operations to the service\n".
" node of the target compute nodes. Default is 'y'.\n\n".
" ipmimaxp: The max # of processes for ipmi hw ctrl. The default is 64. Currently,\n".
" this is only used for HP hw control.\n\n".
" ipmiretries: The # of retries to use when communicating with BMCs. Default is 3.\n\n".
" ipmisdrcache: If set to 'no', then the xCAT IPMI support will not cache locally\n".
" the target node's SDR cache to improve performance.\n\n".
" ipmitimeout: The timeout to use when communicating with BMCs. Default is 2.\n".
" This attribute is currently not used.\n\n".
" maxssh: The max # of SSH connections at any one time to the hw ctrl point for PPC\n".
" This parameter doesn't take effect on the rpower command.\n".
" It takes effects on other PPC hardware control command\n".
" getmacs/rnetboot/rbootseq and so on. Default is 8.\n\n".
" nimprime : The name of NIM server, if not set default is the AIX MN.
If Linux MN, then must be set for support of mixed cluster (TBD).\n\n".
" nodestatus: If set to 'n', the nodelist.status column will not be updated during\n".
" the node deployment, node discovery and power operations. The default is to update.\n\n".
" ntpservers: A comma delimited list of NTP servers for the cluster - often the\n".
" xCAT management node.\n\n".
" runbootscripts: If set to 'yes' the scripts listed in the postbootscripts\n".
" attribute in the osimage and postscripts tables will be run during\n".
" each reboot of stateful (diskful) nodes. This attribute has no\n".
" effect on stateless and statelite nodes. Please run the following\n" .
" command after you change the value of this attribute: \n".
" 'updatenode <nodes> -P setuppostbootscripts'\n\n".
" syspowerinterval: For system p CECs, this is the number of seconds the rpower\n".
" command will wait between performing the action for each CEC.\n".
" For system x IPMI servers, this is the number of seconds the\n".
@@ -991,45 +987,15 @@ site => {
" ppctimeout: The timeout, in milliseconds, to use when communicating with PPC hw\n".
" through HMC. It only takes effect on the hardware control commands\n".
" through HMC. Default is 0.\n\n".
" snmpc: The snmp community string that xcat should use when communicating with the\n".
" switches.\n\n".
" ---------------------------\n".
"INSTALL/DEPLOYMENT ATTRIBUTES\n".
" ---------------------------\n".
" cleanupxcatpost: (yes/1 or no/0). Set to 'yes' or '1' to clean up the /xcatpost\n".
" directory on the stateless and statelite nodes after the\n".
" postscripts are run. Default is no.\n\n".
" db2installloc: The location which the service nodes should mount for\n".
" the db2 code to install. Format is hostname:/path. If hostname is\n".
" omitted, it defaults to the management node. Default is /mntdb2.\n\n".
" defserialflow: The default serial flow - currently only used by the mknb command.\n\n".
" defserialport: The default serial port - currently only used by mknb.\n\n".
" defserialspeed: The default serial speed - currently only used by mknb.\n\n".
" genmacprefix: When generating mac addresses automatically, use this manufacturing\n".
" prefix (e.g. 00:11:aa)\n\n".
" genpasswords: Automatically generate random passwords for BMCs when configuring\n".
" them.\n\n".
" installdir: The local directory name used to hold the node deployment packages.\n\n".
" installloc: The location from which the service nodes should mount the \n".
" deployment packages in the format hostname:/path. If hostname is\n".
" omitted, it defaults to the management node. The path must\n".
" match the path in the installdir attribute.\n\n".
" iscsidir: The path to put the iscsi disks in on the mgmt node.\n\n".
" mnroutenames: The name of the routes to be setup on the management node.\n".
" It is a comma separated list of route names that are defined in the\n".
" routes table.\n\n".
" runbootscripts: If set to 'yes' the scripts listed in the postbootscripts\n".
" attribute in the osimage and postscripts tables will be run during\n".
" each reboot of stateful (diskful) nodes. This attribute has no\n".
" effect on stateless and statelite nodes. Please run the following\n" .
" command after you change the value of this attribute: \n".
" 'updatenode <nodes> -P setuppostbootscripts'\n\n".
" precreatemypostscripts: (yes/1 or no/0). Default is no. If yes, it will \n".
" instruct xCAT at nodeset and updatenode time to query the db once for\n".
" all of the nodes passed into the cmd and create the mypostscript file\n".
" for each node, and put them in a directory of tftpdir(such as: /tftpboot)\n".
" If no, it will not generate the mypostscript file in the tftpdir.\n\n".
" setinstallnic: Set the network configuration for installnic to be static.\n\n".
" pruneservices: Whether to enable service pruning when noderm is run (i.e.\n".
" removing DHCP entries when noderm is executed)\n\n".
" rsh: This is no longer used. path to remote shell command for xdsh.\n\n".
" rcp: This is no longer used. path to remote copy command for xdcp.\n\n".
" sharedtftp: Set to 0 or no, xCAT should not assume the directory\n".
" in tftpdir is mounted on all on Service Nodes. Default is 1/yes.\n".
" If value is set to a hostname, the directory in tftpdir\n".
@@ -1040,30 +1006,18 @@ site => {
" shared filesystem is being used across all service nodes.\n".
" 'all' means that the management as well as the service nodes\n".
" are all using a common shared filesystem. The default is 'no'.\n".
" xcatconfdir: Where xCAT config data is (default /etc/xcat).\n\n".
" --------------------\n".
"REMOTESHELL ATTRIBUTES\n".
" --------------------\n".
" nodesyncfiledir: The directory on the node, where xdcp will rsync the files\n".
" skiptables: Comma separated list of tables to be skipped by dumpxCATdb\n".
" SNsyncfiledir: The directory on the Service Node, where xdcp will rsync the files\n".
" from the MN that will eventually be rsync'd to the compute nodes.\n\n".
" sshbetweennodes: Comma separated list of groups of compute nodes to enable passwordless root \n".
" nodesyncfiledir: The directory on the node, where xdcp will rsync the files\n".
" snmpc: The snmp community string that xcat should use when communicating with the\n".
" switches.\n\n".
" sshbetweennodes: Comma separated list of groups to enable passwordless root \n".
" ssh during install, or xdsh -K. Default is ALLGROUPS.\n".
" Set to NOGROUPS,if you do not wish to enabled any group of compute nodes.\n".
" Set to NOGROUPS,if you do not wish to enabled any groups.\n".
" Service Nodes are not affected by this attribute\n".
" they are always setup with\n".
" passwordless root access to nodes and other SN.\n\n".
" -----------------\n".
"SERVICES ATTRIBUTES\n".
" -----------------\n".
" consoleondemand: When set to 'yes', conserver connects and creates the console\n".
" output only when the user opens the console. Default is no on\n".
" Linux, yes on AIX.\n\n".
" httpport: The port number that the booting/installing nodes should contact the\n".
" http server on the MN/SN on. It is your responsibility to configure\n".
" the http server to listen on that port - xCAT will not do that.\n\n".
" ntpservers: A comma delimited list of NTP servers for the cluster - often the\n".
" xCAT management node.\n\n".
" svloglocal: if set to 1, syslog on the service node will not get forwarded to the\n".
" mgmt node.\n\n".
" timezone: (e.g. America/New_York)\n\n".
@@ -1073,26 +1027,11 @@ site => {
" useNmapfromMN: When set to yes, nodestat command should obtain the node status\n".
" using nmap (if available) from the management node instead of the\n".
" service node. This will improve the performance in a flat network.\n\n".
" vsftp: Default is 'n'. If set to 'y', the xcatd on the mn will automatically\n".
" bring up vsftpd. (You must manually install vsftpd before this.\n".
" This setting does not apply to the service node. For sn\n".
" you need to set servicenode.ftpserver=1 if you want xcatd to\n".
" bring up vsftpd.\n\n".
" -----------------------\n".
"VIRTUALIZATION ATTRIBUTES\n".
" -----------------------\n".
" useSSHonAIX: (yes/1 or no/0). If yes, ssh/scp will be setup and used. If no, rsh/rcp. The support for rsh/rcp is deprecated.\n".
" usexhrm: Have xCAT run its xHRM script when booting up KVM guests to set the\n".
" virtual network bridge up correctly. See\n".
" https://sourceforge.net/apps/mediawiki/xcat/index.php?title=XCAT_Virtualization_with_KVM#Setting_up_a_network_bridge\n\n".
" vcenterautojoin: When set to no, the VMWare plugin will not attempt to auto remove\n".
" and add hypervisors while trying to perform operations. If users\n".
" or tasks outside of xCAT perform the joining this assures xCAT\n".
" will not interfere.\n\n".
" vmwarereconfigonpower: When set to no, the VMWare plugin will make no effort to\n".
" push vm.cpus/vm.memory updates from xCAT to VMWare.\n\n".
" --------------------\n".
"XCAT DAEMON ATTRIBUTES\n".
" --------------------\n".
" rsh/rcp will be setup and used on AIX. Default is yes.\n\n".
" useflowcontrol: (yes/1 or no/0). If yes, the postscript processing on each node\n".
" contacts xcatd on the MN/SN using a lightweight UDP packet to wait\n".
" until xcatd is ready to handle the requests associated with\n".
@@ -1103,14 +1042,28 @@ site => {
" xcatd, and retry. On a new install of xcat, this value will be set to yes.\n".
" See the following document for details:\n".
" https://sourceforge.net/apps/mediawiki/xcat/index.php?title=Hints_and_Tips_for_Large_Scale_Clusters\n\n".
" useNFSv4onAIX: (yes/1 or no/0). If yes, NFSv4 will be used with NIM. If no,\n".
" NFSv3 will be used with NIM. Default is no.\n\n".
" vcenterautojoin: When set to no, the VMWare plugin will not attempt to auto remove\n".
" and add hypervisors while trying to perform operations. If users\n".
" or tasks outside of xCAT perform the joining this assures xCAT\n".
" will not interfere.\n\n".
" vmwarereconfigonpower: When set to no, the VMWare plugin will make no effort to\n".
" push vm.cpus/vm.memory updates from xCAT to VMWare.\n\n".
" vsftp: Default is 'n'. If set to 'y', the xcatd on the mn will automatically\n".
" bring up vsftpd. (You must manually install vsftpd before this.\n".
" This setting does not apply to the service node. For sn\n".
" you need to set servicenode.ftpserver=1 if you want xcatd to\n".
" bring up vsftpd.\n\n".
" xcatconfdir: Where xCAT config data is (default /etc/xcat).\n\n".
" xcatmaxconnections: Number of concurrent xCAT protocol requests before requests\n".
" begin queueing. This applies to both client command requests\n".
" and node requests, e.g. to get postscripts. Default is 64.\n\n".
" xcatmaxbatchconnections: Number of concurrent xCAT connections allowed from the nodes.\n".
" Value must be less than xcatmaxconnections. Default is 50.\n\n".
" xcatdport: The port used by the xcatd daemon for client/server communication.\n\n".
" xcatiport: The port used by xcatd to receive install status updates from nodes.\n\n".
" xcatsslversion: The ssl version by xcatd. Default is SSLv3.\n\n".
" xcatiport: The port used by xcatd to receive install status updates from nodes.\n\n",
" xcatsslversion: The ssl version by xcatd. Default is SSLv3.\n\n",
" xcatsslciphers: The ssl cipher by xcatd. Default is 3DES.\n\n",
value => 'The value of the attribute specified in the "key" column.',
comments => 'Any user-written notes.',
@@ -1190,18 +1143,6 @@ performance => {
disable => "Set to 'yes' or '1' to comment out this row.",
},
},
zone => {
cols => [qw(zonename sshkeydir defaultzone comments disable)],
keys => [qw(zonename)],
table_desc => 'Defines a cluster zone for nodes that share root ssh key access to each other.',
descriptions => {
zonename => 'The name of the zone.',
sshkeydir => 'Directory containing the shared root ssh RSA keys.',
defaultzone => 'If nodes are not assigned to any other zone, they will default to this zone. If value is set to yes or 1.',
comments => 'Any user-provided notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
},
},
eventlog => {
cols => [qw(recid eventtime eventtype monitor monnode node application component id severity message rawdata comments disable)],
@@ -1338,7 +1279,7 @@ firmware => {
},
nics => {
cols => [qw(node nicips nichostnamesuffixes nichostnameprefixes nictypes niccustomscripts nicnetworks nicaliases comments disable)],
cols => [qw(node nicips nichostnamesuffixes nictypes niccustomscripts nicnetworks nicaliases comments disable)],
keys => [qw(node)],
tablespace =>'XCATTBS16K',
table_desc => 'Stores NIC details.',
@@ -1356,13 +1297,6 @@ nics => {
<nic1>!<ext1>|<ext2>,<nic2>!<ext1>|<ext2>,..., for example, eth0!-eth0|-eth0-ipv6,ib0!-ib0|-ib0-ipv6.
The xCAT object definition commands support to use nichostnamesuffixes.<nicname> as the sub attributes.
Note: According to DNS rules a hostname must be a text string up to 24 characters drawn from the alphabet (A-Z), digits (0-9), minus sign (-),and period (.). When you are specifying "nichostnamesuffixes" or "nicaliases" make sure the resulting hostnames will conform to this naming convention',
nichostnameprefixes => 'Comma-separated list of hostname prefixes per NIC.
If only one ip address is associated with each NIC:
<nic1>!<ext1>,<nic2>!<ext2>,..., for example, eth0!eth0-,ib0!ib-
If multiple ip addresses are associcated with each NIC:
<nic1>!<ext1>|<ext2>,<nic2>!<ext1>|<ext2>,..., for example, eth0!eth0-|eth0-ipv6i-,ib0!ib-|ib-ipv6-.
The xCAT object definition commands support to use nichostnameprefixes.<nicname> as the sub attributes.
Note: According to DNS rules a hostname must be a text string up to 24 characters drawn from the alphabet (A-Z), digits (0-9), minus sign (-),and period (.). When you are specifying "nichostnameprefixes" or "nicaliases" make sure the resulting hostnames will conform to this naming convention',
nictypes => 'Comma-separated list of NIC types per NIC. <nic1>!<type1>,<nic2>!<type2>, e.g. eth0!Ethernet,ib0!Infiniband. The xCAT object definition commands support to use nictypes.<nicname> as the sub attributes.',
niccustomscripts => 'Comma-separated list of custom scripts per NIC. <nic1>!<script1>,<nic2>!<script2>, e.g. eth0!configeth eth0, ib0!configib ib0. The xCAT object definition commands support to use niccustomscripts.<nicname> as the sub attribute
.',
@@ -1547,20 +1481,8 @@ mic => {
disable => "Do not use. tabprune will not work if set to yes or 1",
},
},
hwinv => {
cols => [qw(node cputype cpucount memory disksize comments disable)],
keys => [qw(node)],
table_desc => 'The hardware inventory for the node.',
descriptions => {
node => 'The node name or group name.',
cputype => 'The cpu model name for the node.',
cpucount => 'The number of cpus for the node.',
memory => 'The size of the memory for the node.',
disksize => 'The size of the disks for the node.',
comments => 'Any user-provided notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
},
},
); # end of tabspec definition
@@ -1638,7 +1560,6 @@ foreach my $tabname (keys(%xCAT::ExtTab::ext_tabspec)) {
rack => { attrs => [], attrhash => {}, objkey => 'rackname' },
osdistro=> { attrs => [], attrhash => {}, objkey => 'osdistroname' },
osdistroupdate=> { attrs => [], attrhash => {}, objkey => 'osupdatename' },
zone=> { attrs => [], attrhash => {}, objkey => 'zonename' },
);
@@ -1707,11 +1628,6 @@ my @nodeattrs = (
tabentry => 'noderes.monserver',
access_tabentry => 'noderes.node=attr:node',
},
{attr_name => 'supportproxydhcp',
tabentry => 'noderes.proxydhcp',
access_tabentry => 'noderes.node=attr:node',
},
{attr_name => 'kernel',
tabentry => 'bootparams.kernel',
access_tabentry => 'bootparams.node=attr:node',
@@ -1780,10 +1696,6 @@ my @nodeattrs = (
{attr_name => 'setupipforward',
tabentry => 'servicenode.ipforward',
access_tabentry => 'servicenode.node=attr:node',
},
{attr_name => 'setupproxydhcp',
tabentry => 'servicenode.proxydhcp',
access_tabentry => 'servicenode.node=attr:node',
},
# - moserver not used yet
# {attr_name => 'setupmonserver',
@@ -2265,10 +2177,6 @@ my @nodeattrs = (
tabentry => 'nics.nichostnamesuffixes',
access_tabentry => 'nics.node=attr:node',
},
{attr_name => 'nichostnameprefixes',
tabentry => 'nics.nichostnameprefixes',
access_tabentry => 'nics.node=attr:node',
},
{attr_name => 'nictypes',
tabentry => 'nics.nictypes',
access_tabentry => 'nics.node=attr:node',
@@ -2544,25 +2452,6 @@ my @nodeattrs = (
tabentry => 'mic.powermgt',
access_tabentry => 'mic.node=attr:node',
},
#####################
## hwinv table #
#####################
{attr_name => 'cputype',
tabentry => 'hwinv.cputype',
access_tabentry => 'hwinv.node=attr:node',
},
{attr_name => 'cpucount',
tabentry => 'hwinv.cpucount',
access_tabentry => 'hwinv.node=attr:node',
},
{attr_name => 'memory',
tabentry => 'hwinv.memory',
access_tabentry => 'hwinv.node=attr:node',
},
{attr_name => 'disksize',
tabentry => 'hwinv.disksize',
access_tabentry => 'hwinv.node=attr:node',
},
); # end of @nodeattrs that applies to both nodes and groups
@@ -2613,10 +2502,6 @@ my @nodeattrs = (
{attr_name => 'updatestatustime',
tabentry => 'nodelist.updatestatustime',
access_tabentry => 'nodelist.node=attr:node',
},
{attr_name => 'zonename',
tabentry => 'nodelist.zonename',
access_tabentry => 'nodelist.node=attr:node',
},
{attr_name => 'usercomment',
tabentry => 'nodelist.comments',
@@ -2822,29 +2707,6 @@ push(@{$defspec{node}->{'attrs'}}, @nodeattrs);
access_tabentry => 'linuximage.imagename=attr:imagename',
},
####################
# winimage table#
####################
{attr_name => 'template',
only_if => 'imagetype=windows',
tabentry => 'winimage.template',
access_tabentry => 'winimage.imagename=attr:imagename',
},
{attr_name => 'installto',
only_if => 'imagetype=windows',
tabentry => 'winimage.installto',
access_tabentry => 'winimage.imagename=attr:imagename',
},
{attr_name => 'partitionfile',
only_if => 'imagetype=windows',
tabentry => 'winimage.partitionfile',
access_tabentry => 'winimage.imagename=attr:imagename',
},
{attr_name => 'winpepath',
only_if => 'imagetype=windows',
tabentry => 'winimage.winpepath',
access_tabentry => 'winimage.imagename=attr:imagename',
},
####################
# nimimage table#
####################
{attr_name => 'nimtype',
@@ -3071,32 +2933,6 @@ push(@{$defspec{node}->{'attrs'}}, @nodeattrs);
access_tabentry => 'rack.rackname=attr:rackname',
},
);
####################
# zone table #
####################
@{$defspec{zone}->{'attrs'}} = (
{attr_name => 'zonename',
tabentry => 'zone.zonename',
access_tabentry => 'zone.zonename=attr:zonename',
},
{attr_name => 'sshkeydir',
tabentry => 'zone.sshkeydir',
access_tabentry => 'zone.zonename=attr:zonename',
},
{attr_name => 'defaultzone',
tabentry => 'zone.defaultzone',
access_tabentry => 'zone.zonename=attr:zonename',
},
{attr_name => 'usercomment',
tabentry => 'zone.comments',
access_tabentry => 'zone.zonename=attr:zonename',
},
);
#########################
# route data object #
#########################
# routes table #
#########################
#########################
# route data object #
#########################
-2
View File
@@ -163,8 +163,6 @@ sub isServiceReq
if (($value eq "1") || ($value eq "YES"))
{
$servicehash->{$service} = "1";
} elsif ($value eq "2") {
$servicehash->{$service} = "2";
} else {
$servicehash->{$service} = "0";
}
+1 -4
View File
@@ -207,7 +207,6 @@ my %usage = (
mkvm noderange [--full]
mkvm noderange [vmcpus=min/req/max] [vmmemory=min/req/max]
[vmphyslots=drc_index1,drc_index2...] [vmothersetting=hugepage:N,bsr:N]
[vmnics=vlan1,vlan2] [vmstorage=<N|viosnode:slotid>] [--vios]
For KVM
mkvm noderange -m|--master mastername -s|--size disksize -f|--force
For zVM
@@ -242,8 +241,6 @@ my %usage = (
chvm <noderange> [lparname=<*|name>]
chvm <noderange> [vmcpus=min/req/max] [vmmemory=min/req/max]
[vmphyslots=drc_index1,drc_index2...] [vmothersetting=hugepage:N,bsr:N]
[vmnics=vlan1,vlan2] [vmstorage=<N|viosnode:slotid>] [--vios]
chvm <noderange> [del_vadapter=slotid]
VMware specific:
chvm <noderange> [-a size][-d disk][-p disk][--resize disk=size][--cpus count][--mem memory]
zVM specific:
@@ -280,7 +277,7 @@ my %usage = (
"lsslp" =>
"Usage: lsslp [-h|--help|-v|--version]
lsslp [<noderange>][-V|--verbose][-i ip[,ip..]][-w][-r|-x|-z][-n][-I][-s FRAME|CEC|MM|IVM|RSA|HMC|CMM|IMM2|FSP]
[-u] [--range IPranges][-t tries][--vpdtable][-C counts][-T timeout]",
[-t tries][--vpdtable][-C counts][-T timeout]",
"rflash" =>
"Usage:
rflash [ -h|--help|-v|--version]
+12 -54
View File
@@ -940,15 +940,18 @@ sub runcmd
my ($class, $cmd, $exitcode, $refoutput, $stream) = @_;
$::RUNCMD_RC = 0;
# redirect stderr to stdout
if (!($cmd =~ /2>&1$/)) { $cmd .= ' 2>&1'; }
my $hostname = `/bin/hostname`;
chomp $hostname;
if ($::VERBOSE)
{
my $msg="Running command on $hostname: $cmd";
if ($::VERBOSE)
{
# get this systems name as known by xCAT management node
my $Sname = xCAT::InstUtils->myxCATname();
my $msg;
if ($Sname) {
$msg = "Running command on $Sname: $cmd";
} else {
$msg="Running command: $cmd";
}
if ($::CALLBACK){
my $rsp = {};
@@ -957,7 +960,7 @@ sub runcmd
} else {
xCAT::MsgUtils->message("I", "$msg\n");
}
}
}
my $outref = [];
if (!defined($stream) || (length($stream) == 0)) { # do not stream
@@ -3369,51 +3372,6 @@ sub filter_nostatusupdate{
}
}
}
sub version_cmp {
my $ver_a = shift;
if ($ver_a =~ /xCAT::Utils/)
{
$ver_a = shift;
}
my $ver_b = shift;
my @array_a = ($ver_a =~ /([-.]|\d+|[^-.\d]+)/g);
my @array_b = ($ver_b =~ /([-.]|\d+|[^-.\d]+)/g);
my ($a, $b);
my $len_a = @array_a;
my $len_b = @array_b;
my $len = $len_a;
if ( $len_b < $len_a ) {
$len = $len_b;
}
for ( my $i = 0; $i < $len; $i++ ) {
$a = $array_a[$i];
$b = $array_b[$i];
if ($a eq $b) {
next;
} elsif ( $a eq '-' ) {
return -1;
} elsif ( $b eq '-') {
return 1;
} elsif ( $a eq '.' ) {
return -1;
} elsif ( $b eq '.' ) {
return 1;
} elsif ($a =~ /^\d+$/ and $b =~ /^\d+$/) {
if ($a =~ /^0/ || $b =~ /^0/) {
return ($a cmp $b);
} else {
return ($a <=> $b);
}
} else {
$a = uc $a;
$b = uc $b;
return ($a cmp $b);
}
}
return ( $len_a <=> $len_b )
}
}
1;
-329
View File
@@ -1,329 +0,0 @@
#!/usr/bin/env perl
# IBM(c) 2007 EPL license http://www.eclipse.org/legal/epl-v10.html
package xCAT::Zone;
BEGIN
{
$::XCATROOT = $ENV{'XCATROOT'} ? $ENV{'XCATROOT'} : '/opt/xcat';
}
# if AIX - make sure we include perl 5.8.2 in INC path.
# Needed to find perl dependencies shipped in deps tarball.
if ($^O =~ /^aix/i) {
unshift(@INC, qw(/usr/opt/perl5/lib/5.8.2/aix-thread-multi /usr/opt/perl5/lib/5.8.2 /usr/opt/perl5/lib/site_perl/5.8.2/aix-thread-multi /usr/opt/perl5/lib/site_perl/5.8.2));
}
use lib "$::XCATROOT/lib/perl";
# do not put a use or require for xCAT::Table here. Add to each new routine
# needing it to avoid reprocessing of user tables ( ExtTab.pm) for each command call
use POSIX qw(ceil);
use File::Path;
use Socket;
use strict;
use Symbol;
use warnings "all";
#--------------------------------------------------------------------------------
=head1 xCAT::Zone
=head2 Package Description
This program module file, is a set of Zone utilities used by xCAT *zone commands.
=cut
#--------------------------------------------------------------------------------
=head3 genSSHRootKeys
Arguments:
callback for error messages
directory in which to put the ssh RSA keys
zonename
rsa private key to use for generation ( optional)
Returns:
Error: 1 - key generation failure.
Example:
$rc =xCAT::Zone->genSSHRootKeys($callback,$keydir,$rsakey);
=cut
#--------------------------------------------------------------------------------
sub genSSHRootKeys
{
my ($class, $callback, $keydir,$zonename,$rsakey) = @_;
#
# create /keydir if needed
#
if (!-d $keydir)
{
my $cmd = "/bin/mkdir -m 700 -p $keydir";
my $output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] =
"Could not create $keydir directory";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
#
# create /install/postscripts/_ssh/zonename if needed
#
my $installdir = xCAT::TableUtils->getInstallDir(); # get installdir
if (!-d "$installdir/postscripts/_ssh/$zonename")
{
my $cmd = "/bin/mkdir -m 755 -p $installdir/postscripts/_ssh/$zonename";
my $output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could not create $installdir/postscripts/_ssh/$zonename directory.";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
#need to gen a new rsa key for root for the zone
my $pubfile = "$keydir/id_rsa.pub";
my $pvtfile = "$keydir/id_rsa";
# if exists, remove the old files
if (-r $pubfile)
{
my $cmd = "/bin/rm $keydir/id_rsa*";
my $output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could not remove id_rsa files from $keydir directory.";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
# gen new RSA keys
my $cmd;
my $output;
# if private key was input use it
if (defined ($rsakey)) {
$cmd="/usr/bin/ssh-keygen -y -f $rsakey > $pubfile";
$output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could not generate $pubfile from $rsakey";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
# now copy the private key into the directory
$cmd="cp $rsakey $keydir";
$output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could not run $cmd";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
} else { # generate all new keys
$cmd = "/usr/bin/ssh-keygen -t rsa -q -b 2048 -N '' -f $pvtfile";
$output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could not generate $pubfile";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
#make sure permissions are correct
$cmd = "chmod 644 $pubfile;chown root $pubfile";
$output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could set permission and owner on $pubfile";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
# copy authorized_keys for install on node
if (-r $pubfile)
{
my $cmd =
"/bin/cp -p $pubfile $installdir/postscripts/_ssh/$zonename ";
my $output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] =
"Could not copy $pubfile to $installdir/postscripts/_ssh/$zonename";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
else
{
my $rsp = {};
$rsp->{error}->[0] =
"Could not copy $pubfile to $installdir/postscripts/_ssh/$zonename, because $pubfile does not exist.";
xCAT::MsgUtils->message("E", $rsp, $callback);
}
}
#--------------------------------------------------------------------------------
=head3 getdefaultzone
Arguments:
None
Returns:
Name of the current default zone from the zone table
Example:
my $defaultzone =xCAT::Zone->getdefaultzone();
=cut
#--------------------------------------------------------------------------------
sub getdefaultzone
{
my ($class, $callback) = @_;
my $defaultzone;
# read all the zone table and find the defaultzone, if it exists
my $tab = xCAT::Table->new("zone");
if ($tab){
my @zones = $tab->getAllAttribs('zonename','defaultzone');
foreach my $zone (@zones) {
# Look for the defaultzone=yes/1 entry
if ((defined($zone->{defaultzone})) &&
(($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} = "1"))) {
$defaultzone = $zone->{zonename};
}
$tab->close();
}
} else {
my $rsp = {};
$rsp->{error}->[0] =
"Error reading the zone table. ";
xCAT::MsgUtils->message("E", $rsp, $callback);
}
return $defaultzone;
}
#--------------------------------------------------------------------------------
=head3 iszonedefined
Arguments:
zonename
Returns:
1 if the zone is already in the zone table.
Example:
xCAT::Zone->iszonedefined($zonename);
=cut
#--------------------------------------------------------------------------------
sub iszonedefined
{
my ($class,$zonename) = @_;
# checks the zone table to see if input zonename already in the table
my $tab = xCAT::Table->new("zone");
my $zone = $tab->getAttribs({zonename => $zonename},'sshkeydir');
$tab->close();
if (defined($zone)) {
return 1;
}else{
return 0;
}
}
#--------------------------------------------------------------------------------
=head3 getzoneinfo
Arguments:
An array of nodes
Returns:
Hash array by zonename point to the nodes in that zonename and sshkeydir
zonename1 -> {nodelist} -> array of nodes in the zone
-> {sshkeydir} -> directory containing ssh RSA keys
-> {defaultzone} -> is it the default zone
Example:
my %zonehash =xCAT::Zone->getNodeZones($nodelist);
Rules:
If the nodes nodelist.zonename attribute is a zonename, it is assigned to that zone
If the nodes nodelist.zonename attribute is undefined:
If there is a defaultzone in the zone table, the node is assigned to that zone
If there is no defaultzone in the zone table, the node is assigned to the ~.ssh keydir
=cut
#--------------------------------------------------------------------------------
sub getzoneinfo
{
my ($class, $callback,$nodes) = @_;
# make the list into an array
# $nodelist=~ s/\s*//g; # remove blanks
# my @nodes = split ',', $nodelist;
my $zonehash;
my $defaultzone;
# read all the zone table
my $zonetab = xCAT::Table->new("zone");
if ($zonetab){
my @zones = $zonetab->getAllAttribs('zonename','sshkeydir','defaultzone');
$zonetab->close();
if (@zones) {
foreach my $zone (@zones) {
my $zonename=$zone->{zonename};
$zonehash->{$zonename}->{sshkeydir}= $zone->{sshkeydir};
$zonehash->{$zonename}->{defaultzone}= $zone->{defaultzone};
# find the defaultzone
if ((defined($zone->{defaultzone})) &&
(($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} = "1"))) {
$defaultzone = $zone->{zonename};
}
}
}
} else {
my $rsp = {};
$rsp->{error}->[0] =
"Error reading the zone table. ";
xCAT::MsgUtils->message("E", $rsp, $callback);
return;
}
my $nodelisttab = xCAT::Table->new("nodelist");
my $nodehash = $nodelisttab->getNodesAttribs(\@$nodes, ['zonename']);
# for each of the nodes, look up it's zone name and assign to the zonehash
# if the node is a service node, it is assigned to the __xcatzone which gets its keys from
# the ~/.ssh dir no matter what in the database for the zonename.
# If the nodes nodelist.zonename attribute is a zonename, it is assigned to that zone
# If the nodes nodelist.zonename attribute is undefined:
# If there is a defaultzone in the zone table, the node is assigned to that zone
# If there is no defaultzone in the zone table, the node is assigned to the ~.ssh keydir
my @allSN=xCAT::ServiceNodeUtils->getAllSN("ALL"); # read all the servicenodes define
my $xcatzone = "__xcatzone"; # if node is in no zones or a service node, use this one
$zonehash->{$xcatzone}->{sshkeydir}= "~/.ssh";
foreach my $node (@$nodes) {
my $zonename;
if (grep(/^$node$/, @allSN)) { # this is a servicenode, treat special
$zonename=$xcatzone; # always use ~/.ssh directory
} else { # use the nodelist.zonename attribute
$zonename=$nodehash->{$node}->[0]->{zonename};
}
if (defined($zonename)) { # zonename explicitly defined in nodelist.zonename
push @{$zonehash->{$zonename}->{nodes}},$node;
} else { # no explict zonename
if (defined ($defaultzone)) { # there is a default zone in the zone table, use it
push @{$zonehash->{$defaultzone}->{nodes}},$node;
} else { # if no default then use the ~/.ssh keys as the default, put them in the __xcatzone
push @{$zonehash->{$xcatzone}->{nodes}},$node;
}
}
}
return;
}
1;
+1 -5
View File
@@ -49,8 +49,6 @@ require Exporter;
"1328205744.315196" => "rhels5.8", #x86_64
"1354216429.587870" => "rhels5.9", #x86_64
"1354214009.518521" => "rhels5.9", #ppc64
"1378846702.129847" => "rhels5.10", #x86_64
"1378845049.643372" => "rhels5.10", #ppc64
"1285193176.460470" => "rhels6", #x86_64
"1285192093.430930" => "rhels6", #ppc64
"1305068199.328169" => "rhels6.1", #x86_64
@@ -62,8 +60,6 @@ require Exporter;
"1339638991.532890" => "rhels6.3", #i386
"1359576752.435900" => "rhels6.4", #x86_64
"1359576196.686790" => "rhels6.4", #ppc64
"1384196515.415715" => "rhels6.5", #x86_64
"1384198011.520581" => "rhels6.5", #ppc64
"1285193176.593806" => "rhelhpc6", #x86_64
"1305067719.718814" => "rhelhpc6.1",#x86_64
"1321545261.599847" => "rhelhpc6.2",#x86_64
@@ -82,7 +78,7 @@ require Exporter;
"1305315870.828212" => "fedora15", #x86_64 DVD ISO
"1372355769.065812" => "fedora19", #x86_64 DVD ISO
"1372402928.663653" => "fedora19", #ppc64 DVD ISO
"1386856788.124593" => "fedora20", #x86_64 DVD ISO
"1194512200.047708" => "rhas4.6",
"1194512327.501046" => "rhas4.6",
"1241464993.830723" => "rhas4.8", #x86-64
+78 -269
View File
@@ -6,275 +6,84 @@
#include <stdlib.h>
#include <errno.h>
#include <netinet/in.h>
#include <signal.h>
#include <syslog.h>
// the chunk size for each alloc
int chunknum = 200;
int doreload = 0;
int verbose = 0;
char logmsg[50];
// the struct to store the winpe configuration for each node
struct nodecfg {
char node[50];
char data[150];
};
char *data = NULL; // the ptr to the array of all node config
int nodenum = 0;
// trigger the main program to reload configuration file
void reload(int sig) {
doreload = 1;
}
// the subroutine which is used to load configuration from
// /var/lib/xcat/proxydhcp.cfg to *data
void loadcfg () {
nodenum = 0;
free(data);
data = NULL;
doreload = 0;
char *dp = NULL;
FILE *fp;
fp = fopen("/var/lib/xcat/proxydhcp.cfg", "r");
if (fp) {
int num = chunknum;
int rtime = 1;
while (num == chunknum) {
// realloc the chunknum size of memory each to save memory usage
data = realloc(data, sizeof(struct nodecfg) * chunknum * rtime);
if (NULL == data) {
fprintf (stderr, "Cannot get enough memory.\n");
free (data);
return;
}
dp = data + sizeof(struct nodecfg) * chunknum * (rtime - 1);
memset(dp, 0, sizeof(struct nodecfg) * chunknum);
num = fread(dp, sizeof (struct nodecfg), chunknum, fp);
nodenum += num;
rtime++;
}
fclose(fp);
}
}
// get the path of winpe from configuration file which is stored in *data
char *getwinpepath(char *node) {
int i;
struct nodecfg *nc = (struct nodecfg *)data;
for (i=0; i<nodenum;i++) {
if (0 == strcmp(nc->node, node)) {
return nc->data;
}
nc++;
}
return NULL;
}
int main(int argc, char *argv[]) {
int i;
for(i = 0; i < argc; i++)
{
if (strcmp(argv[i], "-V") == 0) {
verbose = 1;
setlogmask(LOG_UPTO(LOG_DEBUG));
openlog("proxydhcp", LOG_NDELAY, LOG_LOCAL0);
}
}
// regist my pid to /var/run/xcat/proxydhcp.pid
int pid = getpid();
FILE *pidf = fopen ("/var/run/xcat/proxydhcp.pid", "w");
if (pidf) {
fprintf(pidf, "%d", pid);
fclose (pidf);
} else {
fprintf (stderr, "Cannot open /var/run/xcat/proxydhcp.pid\n");
return 1;
}
// load configuration at first start
loadcfg();
// regist signal SIGUSR1 for triggering reload configuration from outside
struct sigaction sigact;
sigact.sa_handler = &reload;
sigaction(SIGUSR1, &sigact, NULL);
int serverfd,port;
int getpktinfo = 1;
struct addrinfo hint, *res;
char cmsg[CMSG_SPACE(sizeof(struct in_pktinfo))];
char clientpacket[1024];
struct sockaddr_in clientaddr;
struct msghdr msg;
struct cmsghdr *cmsgptr;
struct iovec iov[1];
unsigned int myip, clientip;
char *txtptr;
iov[0].iov_base = clientpacket;
iov[0].iov_len = 1024;
memset(&msg,0,sizeof(msg));
memset(&clientaddr,0,sizeof(clientaddr));
msg.msg_name=&clientaddr;
msg.msg_namelen = sizeof(clientaddr);
msg.msg_iov = iov;
msg.msg_iovlen = 1;
msg.msg_control=&cmsg;
msg.msg_controllen = sizeof(cmsg);
char defaultwinpe[20] = "Boot/bootmgfw.efi";
char bootpmagic[4] = {0x63,0x82,0x53,0x63};
int pktsize;
int doexit=0;
port = 4011;
memset(&hint,0,sizeof(hint));
hint.ai_family = PF_INET; /* Would've done UNSPEC, but it doesn't work right and this is heavily v4 specific anyway */
hint.ai_socktype = SOCK_DGRAM;
hint.ai_flags = AI_PASSIVE;
getaddrinfo(NULL,"4011",&hint,&res);
serverfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (!serverfd) { fprintf(stderr,"That's odd...\n"); }
setsockopt(serverfd,IPPROTO_IP,IP_PKTINFO,&getpktinfo,sizeof(getpktinfo));
if (bind(serverfd,res->ai_addr ,res->ai_addrlen) < 0) {
fprintf(stderr,"Unable to bind 4011");
exit(1);
}
while (!doexit) {
// use select to wait for the 4011 request packages coming
fd_set fds;
FD_ZERO(&fds);
FD_SET(serverfd, &fds);
struct timeval timeout;
timeout.tv_sec = 30;
timeout.tv_usec = 0;
int rc;
if ((rc = select(serverfd+1,&fds,0,0, &timeout)) <= 0) {
if (doreload) {
loadcfg();
fprintf(stderr, "load in select\n");
}
if (verbose) {syslog(LOG_DEBUG, "reload /var/lib/xcat/proxydhcp.cfg\n");}
continue;
}
if (doreload) {
loadcfg();
if (verbose) {syslog(LOG_DEBUG, "reload /var/lib/xcat/proxydhcp.cfg\n");}
}
pktsize = recvmsg(serverfd,&msg,0);
if (pktsize < 320) {
continue;
}
if (clientpacket[0] != 1 || memcmp(clientpacket+0xec,bootpmagic,4)) {
continue;
}
for (cmsgptr = CMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg,cmsgptr)) {
if (cmsgptr->cmsg_level == IPPROTO_IP && cmsgptr->cmsg_type == IP_PKTINFO) {
myip = ((struct in_pktinfo*)(CMSG_DATA(cmsgptr)))->ipi_addr.s_addr;
}
}
// get the ip of dhcp client
clientip = 0;
int i;
for (i = 0; i< 4; i++) {
clientip = clientip << 8;
clientip += (unsigned char)clientpacket[15-i];
}
// get the winpe path
struct hostent *host = gethostbyaddr(&clientip, sizeof(clientip), AF_INET);
char *winpepath = defaultwinpe;
if (host) {
if (host->h_name) {
// remove the domain part from hostname
char *place = strstr(host->h_name, ".");
if (place) {
*place = '\0';
}
winpepath = getwinpepath(host->h_name);
if (winpepath == NULL) {
winpepath = defaultwinpe;
}
if (verbose) {
sprintf(logmsg, "Received proxydhcp request from %s\n", host->h_name);
syslog(LOG_DEBUG, logmsg);
}
}
} else {
winpepath = defaultwinpe;
}
// get the Vendor class identifier
char *arch = NULL;
unsigned char *p = clientpacket + 0xf0;
while (*p != 0xff && p < (unsigned char *)clientpacket + pktsize) {
if (*p == 60) {
arch = p + 0x11;
break;
} else {
p += *(p+1) + 2;
}
}
char winboot[50]; // the bootload of winpe
memset(winboot, 0, 50);
if (0 == memcmp(arch, "00000", 5)) { // bios boot mode
strcpy(winboot, winpepath);
strcat(winboot, "Boot/pxeboot.0");
} else if (0 == memcmp(arch, "00007", 5)) { // uefi boot mode
strcpy(winboot, winpepath);
strcat(winboot, "Boot/bootmgfw.efi");
}
clientpacket[0] = 2; //change to a reply
myip = htonl(myip); //endian neutral change
clientpacket[0x14] = (myip>>24)&0xff; //maybe don't need to do this, maybe assigning the whole int would be better
clientpacket[0x15] = (myip>>16)&0xff;
clientpacket[0x16] = (myip>>8)&0xff;
clientpacket[0x17] = (myip)&0xff;
txtptr = clientpacket+0x6c;
strncpy(txtptr, winboot ,128); // keeping 128 in there just in case someone changes the string
//strncpy(txtptr,"winboot/new/Boot/bootmgfw.efi",128); // keeping 128 in there just in case someone changes the string
//strncpy(txtptr,"Boot/pxeboot.0",128); // keeping 128 in there just in case someone changes the string
clientpacket[0xf0]=0x35; //DHCP MSG type
clientpacket[0xf1]=0x1; // LEN of 1
clientpacket[0xf2]=0x5; //DHCP ACK
clientpacket[0xf3]=0x36; //DHCP server identifier
clientpacket[0xf4]=0x4; //DHCP server identifier length
clientpacket[0xf5] = (myip>>24)&0xff; //maybe don't need to do this, maybe assigning the whole int would be better
clientpacket[0xf6] = (myip>>16)&0xff;
clientpacket[0xf7] = (myip>>8)&0xff;
clientpacket[0xf8] = (myip)&0xff;
char winBCD[50];
strcpy(winBCD, winpepath);
strcat(winBCD, "Boot/BCD");
clientpacket[0xf9] = 0xfc; // dhcp 252 'proxy', but coopeted by bootmgfw, it's actually suggesting the boot config file
clientpacket[0xfa] = strlen(winBCD) + 1; //length of 9
txtptr = clientpacket+0xfb;
strncpy(txtptr, winBCD, strlen(winBCD));
clientpacket[0xfa + strlen(winBCD) + 1] = 0;
clientpacket[0xfa + strlen(winBCD) + 2] = 0xff;
sendto(serverfd,clientpacket,pktsize,0,(struct sockaddr*)&clientaddr,sizeof(clientaddr));
if (verbose) {
sprintf(logmsg, "Path of bootloader:%s. Path of BCD:%s\n", winboot, winBCD);
syslog(LOG_DEBUG, logmsg);
}
}
if (verbose) { closelog();}
int main() {
int serverfd,port;
int getpktinfo = 1;
struct addrinfo hint, *res;
char cmsg[CMSG_SPACE(sizeof(struct in_pktinfo))];
char clientpacket[1024];
struct sockaddr_in clientaddr;
struct msghdr msg;
struct cmsghdr *cmsgptr;
struct iovec iov[1];
unsigned int myip;
char *txtptr;
iov[0].iov_base = clientpacket;
iov[0].iov_len = 1024;
memset(&msg,0,sizeof(msg));
memset(&clientaddr,0,sizeof(clientaddr));
msg.msg_name=&clientaddr;
msg.msg_namelen = sizeof(clientaddr);
msg.msg_iov = iov;
msg.msg_iovlen = 1;
msg.msg_control=&cmsg;
msg.msg_controllen = sizeof(cmsg);
char bootpmagic[4] = {0x63,0x82,0x53,0x63};
int pktsize;
int doexit=0;
port = 4011;
memset(&hint,0,sizeof(hint));
hint.ai_family = PF_INET; /* Would've done UNSPEC, but it doesn't work right and this is heavily v4 specific anyway */
hint.ai_socktype = SOCK_DGRAM;
hint.ai_flags = AI_PASSIVE;
getaddrinfo(NULL,"4011",&hint,&res);
serverfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (!serverfd) { fprintf(stderr,"That's odd...\n"); }
setsockopt(serverfd,IPPROTO_IP,IP_PKTINFO,&getpktinfo,sizeof(getpktinfo));
if (bind(serverfd,res->ai_addr ,res->ai_addrlen) < 0) {
fprintf(stderr,"Unable to bind 4011");
exit(1);
}
while (!doexit) {
pktsize = recvmsg(serverfd,&msg,0);
if (pktsize < 320) {
continue;
}
if (clientpacket[0] != 1 || memcmp(clientpacket+0xec,bootpmagic,4)) {
continue;
}
for (cmsgptr = CMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg,cmsgptr)) {
if (cmsgptr->cmsg_level == IPPROTO_IP && cmsgptr->cmsg_type == IP_PKTINFO) {
myip = ((struct in_pktinfo*)(CMSG_DATA(cmsgptr)))->ipi_addr.s_addr;
}
}
clientpacket[0] = 2; //change to a reply
myip = htonl(myip); //endian neutral change
clientpacket[0x14] = (myip>>24)&0xff; //maybe don't need to do this, maybe assigning the whole int would be better
clientpacket[0x15] = (myip>>16)&0xff;
clientpacket[0x16] = (myip>>8)&0xff;
clientpacket[0x17] = (myip)&0xff;
txtptr = clientpacket+0x6c;
strncpy(txtptr,"Boot/bootmgfw.efi",128); // keeping 128 in there just in case someone changes the string
clientpacket[0xf0]=0x35; //DHCP MSG type
clientpacket[0xf1]=0x1; // LEN of 1
clientpacket[0xf2]=0x5; //DHCP ACK
clientpacket[0xf3]=0x36; //DHCP server identifier
clientpacket[0xf4]=0x4; //DHCP server identifier length
clientpacket[0xf5] = (myip>>24)&0xff; //maybe don't need to do this, maybe assigning the whole int would be better
clientpacket[0xf6] = (myip>>16)&0xff;
clientpacket[0xf7] = (myip>>8)&0xff;
clientpacket[0xf8] = (myip)&0xff;
clientpacket[0xf9] = 0xfc; // dhcp 252 'proxy', but coopeted by bootmgfw, it's actually suggesting the boot config file
clientpacket[0xfa] = 9; //length of 9
txtptr = clientpacket+0xfb;
strncpy(txtptr,"Boot/BCD",8);
clientpacket[0x103]=0;
clientpacket[0x104]=0xff;
sendto(serverfd,clientpacket,pktsize,0,(struct sockaddr*)&clientaddr,sizeof(clientaddr));
}
}
@@ -14,7 +14,7 @@
# postscript (stateful install) or with the otherpkgs processing of
# genimage (stateless/statelite install). This script will install any
# gpfs update rpms that exist on the xCAT management node in the
# /install/post/otherpkgs/gpfs_updates directory.
# /install/post/gpfs_updates directory.
# This is necessary because the GPFS updates can ONLY be installed
# after the base rpms have been installed, and the update rpms cannot
# exist in any rpm repositories used by xCAT otherpkgs processing
@@ -91,9 +91,7 @@ template "/etc/quantum/policy.json" do
group node["openstack"]["network"]["platform"]["group"]
mode 00644
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::server")
notifies :restart, "service[quantum-server]", :delayed
end
notifies :restart, "service[quantum-server]", :delayed
end
rabbit_server_role = node["openstack"]["network"]["rabbit_server_chef_role"]
@@ -145,14 +143,12 @@ end
# may just be running a subset of agents (like l3_agent)
# and not the api server components, so we ignore restart
# failures here as there may be no quantum-server process
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::server")
service "quantum-server" do
service_name platform_options["quantum_server_service"]
supports :status => true, :restart => true
ignore_failure true
service "quantum-server" do
service_name platform_options["quantum_server_service"]
supports :status => true, :restart => true
ignore_failure true
action :nothing
end
action :nothing
end
template "/etc/quantum/quantum.conf" do
@@ -170,9 +166,7 @@ template "/etc/quantum/quantum.conf" do
:service_pass => service_pass
)
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::server")
notifies :restart, "service[quantum-server]", :delayed
end
notifies :restart, "service[quantum-server]", :delayed
end
template "/etc/quantum/api-paste.ini" do
@@ -185,9 +179,7 @@ template "/etc/quantum/api-paste.ini" do
"service_pass" => service_pass
)
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::server")
notifies :restart, "service[quantum-server]", :delayed
end
notifies :restart, "service[quantum-server]", :delayed
end
directory "/etc/quantum/plugins/#{main_plugin}" do
@@ -344,9 +336,7 @@ when "openvswitch"
:sql_connection => sql_connection,
:local_ip => local_ip
)
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::server")
notifies :restart, "service[quantum-server]", :delayed
end
notifies :restart, "service[quantum-server]", :delayed
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::openvswitch")
notifies :restart, "service[quantum-plugin-openvswitch-agent]", :delayed
end
@@ -9,31 +9,31 @@
"ref": "f759cd013c0a836f2acb219b3e006ff0a1308878"
},
"memcached": {
"locked_version": "1.6.2"
"locked_version": "1.4.0"
},
"runit": {
"locked_version": "1.3.0"
"locked_version": "1.1.6"
},
"build-essential": {
"locked_version": "1.4.2"
"locked_version": "1.4.0"
},
"yum": {
"locked_version": "2.4.0"
"locked_version": "2.3.0"
},
"sysctl": {
"locked_version": "0.3.3"
},
"apt": {
"locked_version": "2.3.0"
"locked_version": "2.1.0"
},
"git": {
"locked_version": "2.7.0"
"locked_version": "2.5.2"
},
"dmg": {
"locked_version": "2.0.4"
"locked_version": "1.1.0"
},
"windows": {
"locked_version": "1.11.0"
"locked_version": "1.10.0"
},
"chef_handler": {
"locked_version": "1.1.4"
@@ -1,46 +0,0 @@
# CHANGELOG for cookbook-openstack-object-storage
This file is used to list changes made in each version of cookbook-openstack-object-storage.
## 7.1.0:
* Update apt sources to grizzly to prepare for grizzly
and havana branches
## 7.0.11:
* Add missing swift-container-sync upstart service which is
not setup by default in ubuntu 12.04 packages
## 7.0.10:
* Do not role restrict super_admin_key in proxy config
* Case correct swauth_version attribute in proxy recipe
* Treat platform_options["swauth_packages"] as a list
## 7.0.9:
* Bugfix tempurl role restriction
## 7.0.8:
* Bugfix allow_override spacing in proxy server template
## 7.0.7:
* Add flexibility to middleware pipeline
## 7.0.6:
* Add choice of install python-swauth from git or package
## 7.0.5:
* Add support for container-sync
## 7.0.4:
* Allow roles used in searches to be defined by cookbook user
## 7.0.3:
* Bugfix the swift-ring-builder output scanner
## 7.0.2:
* Expand statsd support as well as capacity and recon supporting.
## 7.0.1:
* Support more then 24 disks (/dev/sdaa, /dev/vdab, etc)
## 7.0.0:
* Initial openstack object storage cookbook
@@ -63,14 +63,6 @@ Attributes
* ```default[:swift][:authmode]``` - "swauth" or "keystone" (default "swauth"). Right now, only swauth is supported (defaults to swauth)
* ```default[:swift][:tempurl]``` - "true" or "false". Adds tempurl to the pipeline and sets allow_overrides to true when using swauth
* ```default[:swift][:swauth_source]``` - "git" or "package"(default). Selects between installing python-swauth from git or system package
* ```default[:swift][:swauth_repository]``` - Specifies git repo. Default "https://github.com/gholt/swauth.git"
* ```default[:swift][:swauth_version]``` - Specifies git repo tagged branch. Default "1.0.8"
* ```default[:swift][:swift_secret_databag_name]``` - this cookbook supports an optional secret databag where we will retrieve the following attributes overriding any default attributes below. (defaults to nil)
```
@@ -257,7 +249,7 @@ License and Author
| | |
|:---------------------|:---------------------------------------------------|
| **Authors** | Alan Meadows (<alan.meadows@gmail.com>) |
| | Oisin Feeley (<of3434@att.com>) |
| | Oisin Feely (<of3434@att.com>) |
| | Ron Pedde (<ron.pedde@rackspace.com>) |
| | Will Kelly (<will.kelly@rackspace.com>) |
| | |
@@ -11,7 +11,7 @@ default["swift"]["git_builder_ip"] = "127.0.0.1"
# the release only has any effect on ubuntu, and must be
# a valid release on http://ubuntu-cloud.archive.canonical.com/ubuntu
default["swift"]["release"] = "grizzly"
default["swift"]["release"] = "folsom"
# we support an optional secret databag where we will retrieve the
# following attributes overriding any default attributes here
@@ -25,17 +25,6 @@ default["swift"]["release"] = "grizzly"
# }
default["swift"]["swift_secret_databag_name"] = nil
#--------------------
# roles
#--------------------
default["swift"]["setup_chef_role"] = "swift-setup"
default["swift"]["management_server_chef_role"] = "swift-management-server"
default["swift"]["proxy_server_chef_role"] = "swift-proxy-server"
default["swift"]["object_server_chef_role"] = "swift-object-server"
default["swift"]["account_server_chef_role"] = "swift-account-server"
default["swift"]["container_server_chef_role"] = "swift-container-server"
#--------------------
# authentication
#--------------------
@@ -64,40 +53,7 @@ default["swift"]["ring"]["replicas"] = 3
#------------------
# statistics
#------------------
default["swift"]["statistics"]["enabled"] = true
default["swift"]["statistics"]["sample_rate"] = 1
# there are two ways to discover your graphite server ip for
# statsd to periodically publish to. You can directly set
# the ip below, or leave it set to nil and supply chef with
# the role name of your graphite server and the interface
# name to retrieve the appropriate internal ip address from
#
# if no servers with the role below can be found then
# 127.0.0.1 will be used
default["swift"]["statistics"]["graphing_ip"] = nil
default["swift"]["statistics"]["graphing_role"] = 'graphite-role'
default["swift"]["statistics"]["graphing_interface"] = 'eth0'
# how frequently to run chef instantiated /usr/local/bin/swift_statsd_publish.py
# which publishes dispersion and recon statistics (in minutes)
default["swift"]["statistics"]["report_frequency"] = 15
# enable or disable specific portions of generated report
default["swift"]["statistics"]["enable_dispersion_report"] = true
default["swift"]["statistics"]["enable_recon_report"] = true
default["swift"]["statistics"]["enable_disk_report"] = true
# settings for statsd which should be configured to use the local
# statsd daemon that chef will install if statistics are enabled
default["swift"]["statistics"]["statsd_host"] = "127.0.0.1"
default["swift"]["statistics"]["statsd_port"] = "8125"
default["swift"]["statistics"]["statsd_prefix"] = "openstack.swift"
# paths to the recon cache files
default["swift"]["statistics"]["recon_account_cache"] = "/var/cache/swift/account.recon"
default["swift"]["statistics"]["recon_container_cache"] = "/var/cache/swift/container.recon"
default["swift"]["statistics"]["recon_object_cache"] = "/var/cache/swift/object.recon"
default["swift"]["enable_statistics"] = true
#------------------
# network settings
@@ -153,52 +109,11 @@ default["swift"]["disk_test_filter"] = [ "candidate =~ /(sd|hd|xvd|vd)(?!a$)[a-z
"not system('/sbin/parted /dev/' + candidate + ' -s print | grep linux-swap')",
"not info.has_key?('removable') or info['removable'] == 0.to_s" ]
#-------------------
# template overrides
#-------------------
# proxy-server
# override in a wrapper to enable tempurl with swauth
default["swift"]["tempurl"]["enabled"] = false
# container-server
# Override this with an allowed list of your various swift clusters if you wish
# to enable container sync for your end-users between clusters. This should
# be an array of fqdn hostnames for the cluster end-points that your end-users
# would access in the format of ['host1', 'host2', 'host3']
default["swift"]["container-server"]["allowed_sync_hosts"] = []
# container-sync logging settings
default["swift"]["container-server"]["container-sync"]["log_name"] = 'container-sync'
default["swift"]["container-server"]["container-sync"]["log_facility"] = 'LOG_LOCAL0'
default["swift"]["container-server"]["container-sync"]["log_level"] = 'INFO'
# If you need to use an HTTP Proxy, set it here; defaults to no proxy.
default["swift"]["container-server"]["container-sync"]["sync_proxy"] = nil
# Will sync, at most, each container once per interval (in seconds)
default["swift"]["container-server"]["container-sync"]["interval"] = 300
# Maximum amount of time to spend syncing each container per pass (in seconds)
default["swift"]["container-server"]["container-sync"]["container_time"] = 60
#------------------
# swauth source
# -----------------
# Versions of swauth in Ubuntu Cloud Archive PPA can be outdated. This
# allows us to chose to install directly from a tagged branch of
# gholt's repository.
# values: package, git
default["swift"]["swauth_source"] = "package"
default["swift"]["swauth_repository"] = "https://github.com/gholt/swauth.git"
default["swift"]["swauth_version"] = "1.0.8"
#------------------
# packages
#------------------
# Leveling between distros
case platform
when "redhat"
@@ -217,8 +132,7 @@ when "redhat"
"git_dir" => "/var/lib/git",
"git_service" => "git",
"service_provider" => Chef::Provider::Service::Redhat,
"override_options" => "",
"swift_statsd_publish" => "/usr/bin/swift-statsd-publish.py"
"override_options" => ""
}
#
# python-iso8601 is a missing dependency for swift.
@@ -239,8 +153,7 @@ when "centos"
"git_dir" => "/var/lib/git",
"git_service" => "git",
"service_provider" => Chef::Provider::Service::Redhat,
"override_options" => "",
"swift_statsd_publish" => "/usr/bin/swift-statsd-publish.py"
"override_options" => ""
}
when "fedora"
default["swift"]["platform"] = {
@@ -258,8 +171,7 @@ when "fedora"
"git_dir" => "/var/lib/git",
"git_service" => "git",
"service_provider" => Chef::Provider::Service::Systemd,
"override_options" => "",
"swift_statsd_publish" => "/usr/bin/swift-statsd-publish.py"
"override_options" => ""
}
when "ubuntu"
default["swift"]["platform"] = {
@@ -277,7 +189,6 @@ when "ubuntu"
"git_dir" => "/var/cache/git",
"git_service" => "git-daemon",
"service_provider" => Chef::Provider::Service::Upstart,
"override_options" => "-o Dpkg::Options:='--force-confold' -o Dpkg::Option:='--force-confdef'",
"swift_statsd_publish" => "/usr/local/bin/swift-statsd-publish.py"
"override_options" => "-o Dpkg::Options:='--force-confold' -o Dpkg::Option:='--force-confdef'"
}
end
@@ -1,19 +0,0 @@
# swift-container-sync - SWIFT Container Sync
#
# The swift container sync.
description "SWIFT Container Sync"
author "Sergio Rubio <rubiojr@bvox.net>"
start on runlevel [2345]
stop on runlevel [016]
pre-start script
if [ -f "/etc/swift/container-server.conf" ]; then
exec /usr/bin/swift-init container-sync start
else
exit 1
fi
end script
post-stop exec /usr/bin/swift-init container-sync stop
@@ -3,7 +3,7 @@ maintainer "ATT, Inc."
license "Apache 2.0"
description "Installs and configures Openstack Swift"
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version "7.1.0"
version "1.1.0"
recipe "openstack-object-storage::setup", "Does initial setup of a swift cluster"
recipe "openstack-object-storage::account-server", "Installs the swift account server"
recipe "openstack-object-storage::object-server", "Installs the swift object server"
@@ -62,8 +62,7 @@ def generate_script
# figure out what's present in the cluster
disk_data[which] = {}
role = node["swift"]["#{which}_server_chef_role"]
disk_state,_,_ = Chef::Search::Query.new.search(:node,"chef_environment:#{node.chef_environment} AND roles:#{role}")
disk_state,_,_ = Chef::Search::Query.new.search(:node,"chef_environment:#{node.chef_environment} AND roles:swift-#{which}-server")
# for a running track of available disks
disk_data[:available] ||= {}
@@ -196,24 +195,24 @@ def parse_ring_output(ring_data)
next
elsif line =~ /^\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+\.\d+\.\d+\.\d+)\s+(\d+)\s+(\S+)\s+([0-9.]+)\s+(\d+)\s+([-0-9.]+)\s*$/
output[:hosts] ||= {}
output[:hosts][$4] ||= {}
output[:hosts][$3] ||= {}
output[:hosts][$4][$6] ||= {}
output[:hosts][$3][$5] = {}
output[:hosts][$4][$6][:id] = $1
output[:hosts][$4][$6][:region] = $2
output[:hosts][$4][$6][:zone] = $3
output[:hosts][$4][$6][:ip] = $4
output[:hosts][$4][$6][:port] = $5
output[:hosts][$4][$6][:device] = $6
output[:hosts][$4][$6][:weight] = $7
output[:hosts][$4][$6][:partitions] = $8
output[:hosts][$4][$6][:balance] = $9
output[:hosts][$3][$5][:id] = $1
output[:hosts][$3][$5][:region] = $2
output[:hosts][$3][$5][:zone] = $3
output[:hosts][$3][$5][:ip] = $4
output[:hosts][$3][$5][:port] = $5
output[:hosts][$3][$5][:device] = $6
output[:hosts][$3][$5][:weight] = $7
output[:hosts][$3][$5][:partitions] = $8
output[:hosts][$3][$5][:balance] = $9
elsif line =~ /^\s+(\d+)\s+(\d+)\s+(\d+\.\d+\.\d+\.\d+)\s+(\d+)\s+(\S+)\s+([0-9.]+)\s+(\d+)\s+([-0-9.]+)\s*$/
output[:hosts] ||= {}
output[:hosts][$3] ||= {}
output[:hosts][$3][$5] ||= {}
output[:hosts][$3][$5] = {}
output[:hosts][$3][$5][:id] = $1
output[:hosts][$3][$5][:zone] = $2
@@ -23,39 +23,11 @@ end
include_recipe 'sysctl::default'
#-------------
# stats
#-------------
# optionally statsd daemon for stats collection
if node["swift"]["statistics"]["enabled"]
node.set['statsd']['relay_server'] = true
if node["swift"]["enable_statistics"]
include_recipe 'statsd::server'
end
# find graphing server address
if Chef::Config[:solo] and not node['recipes'].include?("chef-solo-search")
Chef::Log.warn("This recipe uses search. Chef Solo does not support search.")
graphite_servers = []
else
graphite_servers = search(:node, "roles:#{node['swift']['statistics']['graphing_role']} AND chef_environment:#{node.chef_environment}")
end
graphite_host = "127.0.0.1"
unless graphite_servers.empty?
graphite_host = graphite_servers[0]['network']["ipaddress_#{node['swift']['statistics']['graphing_interface']}"]
end
if node['swift']['statistics']['graphing_ip'].nil?
node.set['statsd']['graphite_host'] = graphite_host
else
node.set['statsd']['graphite_host'] = node['swift']['statistics']['graphing_ip']
end
#--------------
# swift common
#--------------
platform_options = node["swift"]["platform"]
# update repository if requested with the ubuntu cloud
@@ -91,31 +91,3 @@ template "/etc/swift/container-server.conf" do
notifies :restart, "service[swift-container-updater]", :immediately
notifies :restart, "service[swift-container-auditor]", :immediately
end
# Ubuntu 12.04 packages are missing the swift-container-sync service scripts
# See https://bugs.launchpad.net/cloud-archive/+bug/1250171
if platform?("ubuntu")
cookbook_file "/etc/init/swift-container-sync.conf" do
owner "root"
group "root"
mode "0755"
source "swift-container-sync.conf.upstart"
action :create
not_if "[ -e /etc/init/swift-container-sync.conf ]"
end
link "/etc/init.d/swift-container-sync" do
to "/lib/init/upstart-job"
not_if "[ -e /etc/init.d/swift-container-sync ]"
end
end
service_name=platform_options["service_prefix"] + 'swift-container-sync' + platform_options["service_suffix"]
unless node["swift"]["container-server"]["allowed_sync_hosts"] == []
service "swift-container-sync" do
service_name service_name
provider platform_options["service_provider"]
supports :status => false, :restart => true
action [:enable, :start]
only_if "[ -e /etc/swift/container-server.conf ] && [ -e /etc/swift/container.ring.gz ]"
end
end
@@ -26,29 +26,10 @@ include_recipe "openstack-object-storage::ring-repo"
platform_options = node["swift"]["platform"]
if node["swift"]["authmode"] == "swauth"
case node["swift"]["swauth_source"]
when "package"
platform_options["swauth_packages"].each do |pkg|
package pkg do
action :install
options platform_options["override_options"]
end
end
when "git"
git "#{Chef::Config[:file_cache_path]}/swauth" do
repository node["swift"]["swauth_repository"]
revision node["swift"]["swauth_version"]
action :sync
end
bash "install_swauth" do
cwd "#{Chef::Config[:file_cache_path]}/swauth"
user "root"
group "root"
code <<-EOH
python setup.py install
EOH
environment 'PREFIX' => "/usr/local"
platform_options["swauth_packages"].each.each do |pkg|
package pkg do
action :install
options platform_options["override_options"] # retain configs
end
end
end
@@ -63,19 +44,6 @@ else
auth_key = swift_secrets['dispersion_auth_key']
end
if node['swift']['statistics']['enabled']
template platform_options["swift_statsd_publish"] do
source "swift-statsd-publish.py.erb"
owner "root"
group "root"
mode "0755"
end
cron "cron_swift_statsd_publish" do
command "#{platform_options['swift_statsd_publish']} > /dev/null 2>&1"
minute "*/#{node["swift"]["statistics"]["report_frequency"]}"
end
end
template "/etc/swift/dispersion.conf" do
source "dispersion.conf.erb"
owner "swift"
@@ -94,17 +94,6 @@ template "/etc/swift/object-server.conf" do
notifies :restart, "service[swift-object-auditor]", :immediately
end
%w[ /var/swift /var/swift/recon ].each do |path|
directory path do
# Create the swift recon cache directory and set its permissions.
owner "swift"
group "swift"
mode 00755
action :create
end
end
cron "swift-recon" do
minute "*/5"
command "swift-recon-cron /etc/swift/object-server.conf"
@@ -26,8 +26,7 @@ end
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-object-storage::setup")
Chef::Log.info("I ran the openstack-object-storage::setup so I will use my own swift passwords")
else
setup_role = node["swift"]["setup_chef_role"]
setup = search(:node, "chef_environment:#{node.chef_environment} AND roles:#{setup_role}")
setup = search(:node, "chef_environment:#{node.chef_environment} AND roles:swift-setup")
if setup.length == 0
Chef::Application.fatal! "You must have run the openstack-object-storage::setup recipe (on this or another node) before running the swift::proxy recipe on this node"
elsif setup.length == 1
@@ -48,35 +47,11 @@ platform_options["proxy_packages"].each do |pkg|
end
end
if node["swift"]["authmode"] == "swauth"
case node["swift"]["swauth_source"]
when "package"
platform_options["swauth_packages"].each do |pkg|
package pkg do
action :install
options platform_options["override_options"]
end
end
when "git"
git "#{Chef::Config[:file_cache_path]}/swauth" do
repository node["swift"]["swauth_repository"]
revision node["swift"]["swauth_version"]
action :sync
end
bash "install_swauth" do
cwd "#{Chef::Config[:file_cache_path]}/swauth"
user "root"
group "root"
code <<-EOH
python setup.py install
EOH
environment 'PREFIX' => "/usr/local"
end
end
package "python-swauth" do
action :install
only_if { node["swift"]["authmode"] == "swauth" }
end
package "python-swift-informant" do
action :install
only_if { node["swift"]["use_informant"] }
@@ -109,8 +84,7 @@ if Chef::Config[:solo]
memcache_servers = [ "127.0.0.1:11211" ]
else
memcache_servers = []
proxy_role = node["swift"]["proxy_server_chef_role"]
proxy_nodes = search(:node, "chef_environment:#{node.chef_environment} AND roles:#{proxy_role}")
proxy_nodes = search(:node, "chef_environment:#{node.chef_environment} AND roles:swift-proxy-server")
proxy_nodes.each do |proxy|
proxy_ip = locate_ip_in_cidr(node["swift"]["network"]["proxy-cidr"], proxy)
next if not proxy_ip # skip nil ips so we dont break the config
@@ -127,19 +101,6 @@ else
authkey = swift_secrets['swift_authkey']
end
if node["swift"]["authmode"] == "keystone"
openstack_identity_bootstrap_token = secret "secrets", "openstack_identity_bootstrap_token"
%w[ /home/swift /home/swift/keystone-signing ].each do |path|
directory path do
owner "swift"
group "swift"
mode 00700
action :create
end
end
end
# create proxy config file
template "/etc/swift/proxy-server.conf" do
source "proxy-server.conf.erb"
@@ -147,7 +108,6 @@ template "/etc/swift/proxy-server.conf" do
group "swift"
mode "0600"
variables("authmode" => node["swift"]["authmode"],
"openstack_identity_bootstrap_token" => openstack_identity_bootstrap_token,
"bind_host" => node["swift"]["network"]["proxy-bind-ip"],
"bind_port" => node["swift"]["network"]["proxy-bind-port"],
"authkey" => authkey,
@@ -22,8 +22,7 @@ include_recipe "openstack-object-storage::common"
if Chef::Config[:solo]
Chef::Application.fatal! "This recipe uses search. Chef Solo does not support search."
else
setup_role = node["swift"]["setup_chef_role"]
setup_role_count = search(:node, "chef_environment:#{node.chef_environment} AND roles:#{setup_role}").length
setup_role_count = search(:node, "chef_environment:#{node.chef_environment} AND roles:swift-setup").length
if setup_role_count > 1
Chef::Application.fatal! "You can only have one node with the swift-setup role"
end
@@ -43,32 +42,9 @@ platform_options["proxy_packages"].each do |pkg|
end
end
if node["swift"]["authmode"] == "swauth"
case node["swift"]["swauth_source"]
when "package"
platform_options["swauth_packages"].each do |pkg|
package pkg do
action :upgrade
options platform_options["override_options"]
end
end
when "git"
git "#{Chef::Config[:file_cache_path]}/swauth" do
repository node["swift"]["swauth_repository"]
revision node["swift"]["swauth_version"]
action :sync
end
bash "install_swauth" do
cwd "#{Chef::Config[:file_cache_path]}/swauth"
user "root"
group "root"
code <<-EOH
python setup.py install
EOH
environment 'PREFIX' => "/usr/local"
end
end
package "python-swauth" do
action :upgrade
only_if { node["swift"]["authmode"] == "swauth" }
end
package "python-swift-informant" do
@@ -14,12 +14,12 @@ describe 'openstack-object-storage::common' do
@node = @chef_run.node
@node.set['platform_family'] = "debian"
@node.set['lsb']['codename'] = "precise"
@node.set['swift']['release'] = "grizzly"
@node.set['swift']['release'] = "folsom"
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['git_builder_ip'] = '10.0.0.10'
# TODO: this does not work
# ::Chef::Log.should_receive(:info).with("chefspec: precise-updates/grizzly")
# ::Chef::Log.should_receive(:info).with("chefspec: precise-updates/folsom")
@chef_run.converge "openstack-object-storage::common"
end
@@ -16,8 +16,6 @@ describe 'openstack-object-storage::container-server' do
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['network']['container-bind-ip'] = '10.0.0.1'
@node.set['swift']['network']['container-bind-port'] = '8080'
@node.set['swift']['container-server']['allowed_sync_hosts'] = ['host1', 'host2', 'host3']
@node.set['swift']['container-bind-port'] = '8080'
@node.set['swift']['disk_enum_expr'] = "[{ 'sda' => {}}]"
@node.set['swift']['disk_test_filter'] = [ "candidate =~ /sd[^a]/ or candidate =~ /hd[^a]/ or candidate =~ /vd[^a]/ or candidate =~ /xvd[^a]/",
"File.exist?('/dev/' + candidate)",
@@ -35,7 +33,7 @@ describe 'openstack-object-storage::container-server' do
end
it "starts swift container services on boot" do
%w{swift-container swift-container-auditor swift-container-replicator swift-container-updater swift-container-sync}.each do |svc|
%w{swift-container swift-container-auditor swift-container-replicator swift-container-updater}.each do |svc|
expect(@chef_run).to set_service_to_start_on_boot svc
end
end
@@ -54,34 +52,12 @@ describe 'openstack-object-storage::container-server' do
expect(sprintf("%o", @file.mode)).to eq "600"
end
it "has allowed sync hosts" do
expect(@chef_run).to create_file_with_content @file.name,
"allowed_sync_hosts = host1,host2,host3"
it "template contents" do
pending "TODO: implement"
end
end
it "should create container sync upstart conf for ubuntu" do
expect(@chef_run).to create_cookbook_file "/etc/init/swift-container-sync.conf"
end
it "should create container sync init script for ubuntu" do
expect(@chef_run).to create_link "/etc/init.d/swift-container-sync"
end
describe "/etc/swift/container-server.conf" do
before do
@node = @chef_run.node
@node.set["swift"]["container-server"]["allowed_sync_hosts"] = []
@chef_run.converge "openstack-object-storage::container-server"
@file = @chef_run.template "/etc/swift/container-server.conf"
end
it "has no allowed_sync_hosts on empty lists" do
expect(@chef_run).not_to create_file_with_content @file.name,
/^allowed_sync_hots =/
end
end
end
end
@@ -14,7 +14,7 @@ describe 'openstack-object-storage::disks' do
@node = @chef_run.node
@node.set['platform_family'] = "debian"
@node.set['lsb']['codename'] = "precise"
@node.set['swift']['release'] = "grizzly"
@node.set['swift']['release'] = "folsom"
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['git_builder_ip'] = '10.0.0.10'
@node.set['swift']['disk_enum_expr'] = "[{ 'sda' => {}}]"
@@ -14,9 +14,6 @@ describe 'openstack-object-storage::management-server' do
@node = @chef_run.node
@node.set['lsb']['code'] = 'precise'
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['statistics']['enabled'] = true
@node.set['swift']['swauth_source'] = 'package'
@node.set['swift']['platform']['swauth_packages'] = ['swauth']
@chef_run.converge "openstack-object-storage::management-server"
end
@@ -45,27 +42,6 @@ describe 'openstack-object-storage::management-server' do
end
describe "/usr/local/bin/swift-statsd-publish.py" do
before do
@file = @chef_run.template "/usr/local/bin/swift-statsd-publish.py"
end
it "has proper owner" do
expect(@file).to be_owned_by "root", "root"
end
it "has proper modes" do
expect(sprintf("%o", @file.mode)).to eq "755"
end
it "has expected statsd host" do
expect(@chef_run).to create_file_with_content @file.name,
"self.statsd_host = '127.0.0.1'"
end
end
end
end
@@ -14,8 +14,6 @@ describe 'openstack-object-storage::proxy-server' do
@node = @chef_run.node
@node.set['lsb']['code'] = 'precise'
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['platform']['swauth_packages'] = ['swauth']
@node.set['swift']['swauth_source'] = 'package'
@node.set['swift']['network']['proxy-bind-ip'] = '10.0.0.1'
@node.set['swift']['network']['proxy-bind-port'] = '8080'
@chef_run.converge "openstack-object-storage::proxy-server"
@@ -30,7 +28,7 @@ describe 'openstack-object-storage::proxy-server' do
end
it "installs swauth package if swauth is selected" do
expect(@chef_run).to install_package "swauth"
expect(@chef_run).to install_package "python-swauth"
end
it "starts swift-proxy on boot" do
@@ -14,7 +14,7 @@ describe 'openstack-object-storage::ring-repo' do
@node = @chef_run.node
@node.set['platform_family'] = "debian"
@node.set['lsb']['codename'] = "precise"
@node.set['swift']['release'] = "grizzly"
@node.set['swift']['release'] = "folsom"
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['git_builder_ip'] = '10.0.0.10'
@chef_run.converge "openstack-object-storage::ring-repo"
@@ -14,7 +14,7 @@ describe 'openstack-object-storage::rsync' do
@node = @chef_run.node
@node.set['platform_family'] = "debian"
@node.set['lsb']['codename'] = "precise"
@node.set['swift']['release'] = "grizzly"
@node.set['swift']['release'] = "folsom"
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['git_builder_ip'] = '10.0.0.10'
@chef_run.converge "openstack-object-storage::rsync"
@@ -15,11 +15,11 @@
bind_ip = <%= @bind_ip %>
bind_port = <%= @bind_port %>
workers = 10
<% if node[:swift][:statistics][:enabled] -%>
<% if node[:swift][:enable_statistics] -%>
log_statsd_host = localhost
log_statsd_port = 8125
log_statsd_default_sample_rate = <%= node[:swift][:statistics][:sample_rate] %>
log_statsd_metric_prefix = <%= node[:swift][:statistics][:statsd_prefix] %>.<%= node[:hostname] %>
log_statsd_default_sample_rate = 1
log_statsd_metric_prefix = openstack.swift.<%= node[:hostname] %>
<% end %>
[pipeline:main]
@@ -18,16 +18,12 @@
bind_ip = <%= @bind_ip %>
bind_port = <%= @bind_port %>
workers = 10
<% if node["swift"]["enable_statistics"] -%>
<% if node[:swift][:enable_statistics] -%>
log_statsd_host = localhost
log_statsd_port = 8125
log_statsd_default_sample_rate = 1
log_statsd_metric_prefix = openstack.swift.<%= node["hostname"] %>
<% end -%>
<% if node["swift"]["container-server"]["allowed_sync_hosts"] -%>
allowed_sync_hosts = <%= node["swift"]["container-server"]["allowed_sync_hosts"].join(",") %>
<% end -%>
log_statsd_metric_prefix = openstack.swift.<%= node[:hostname] %>
<% end %>
[pipeline:main]
pipeline = container-server
@@ -81,14 +77,12 @@ use = egg:swift#container
[container-sync]
# You can override the default log routing for this app here (don't use set!):
log_name = <%= node["swift"]["container-server"]["container-sync"]["log_name"] %>
log_facility = <%= node["swift"]["container-server"]["container-sync"]["log_facility"] %>
log_level = <%= node["swift"]["container-server"]["container-sync"]["log_level"] %>
# log_name = container-sync
# log_facility = LOG_LOCAL0
# log_level = INFO
# If you need to use an HTTP Proxy, set it here; defaults to no proxy.
<% if node["swift"]["container-server"]["container-sync"]["sync_proxy"] -%>
sync_proxy = <%= node["swift"]["container-server"]["container-sync"]["sync_proxy"] %>
<% end -%>
# sync_proxy = http://127.0.0.1:8888
# Will sync, at most, each container once per interval
interval = <%= node["swift"]["container-server"]["container-sync"]["interval"] %>
# interval = 300
# Maximum amount of time to spend syncing each container per pass
container_time = <%= node["swift"]["container-server"]["container-sync"]["container_time"] %>
# container_time = 60
@@ -16,11 +16,11 @@
bind_ip = <%= @bind_ip %>
bind_port = <%= @bind_port %>
workers = 10
<% if node[:swift][:statistics][:enabled] -%>
<% if node[:swift][:enable_statistics] -%>
log_statsd_host = localhost
log_statsd_port = 8125
log_statsd_default_sample_rate = <%= node[:swift][:statistics][:sample_rate] %>
log_statsd_metric_prefix = <%= node[:swift][:statistics][:statsd_prefix] %>.<%= node[:hostname] %>
log_statsd_default_sample_rate = 1
log_statsd_metric_prefix = openstack.swift.<%= node[:hostname] %>
<% end %>
[pipeline:main]
@@ -8,23 +8,15 @@ when "swauth"
end
account_management=false
if node[:swift][:authmode] == "swauth" then
if node[:roles].include?("swift-management-server") and node[:swift][:authmode] == "swauth" then
account_management="true"
end
# need to both: 1) add tempurl before auth middleware, 2) set allow_overrides=true
tempurl_toggle=false
if node[:swift][:authmode] == "swauth" and node[:swift][:tempurl][:enabled] == true then
tempurl_toggle = true
pipeline = "tempurl swauth"
end
-%>
# This file is managed by chef. Do not edit it.
#
# Cluster info:
# Auth mode: <%= node[:swift][:authmode] %>
# Management server: <%= node[:roles].include?(node[:swift][:management_server_chef_role]) %>
# Management server: <%= node[:roles].include?("swift-management-server") %>
# Account management enabled: <%= account_management %>
# Auth pipeline: <%= pipeline %>
@@ -46,12 +38,11 @@ end
workers = <%= [ node[:cpu][:total] - 1, 1 ].max %>
bind_ip = <%= @bind_host %>
bind_port = <%= @bind_port %>
user = swift
<% if node[:swift][:statistics][:enabled] -%>
<% if node[:swift][:enable_statistics] -%>
log_statsd_host = localhost
log_statsd_port = 8125
log_statsd_default_sample_rate = <%= node[:swift][:statistics][:sample_rate] %>
log_statsd_metric_prefix = <%= node[:swift][:statistics][:statsd_prefix] %>.<%= node[:hostname] %>
log_statsd_default_sample_rate = 1
log_statsd_metric_prefix = openstack.swift.<%= node[:hostname] %>
<% end %>
@@ -91,7 +82,13 @@ use = egg:swift#proxy
# If set to 'true' authorized accounts that do not yet exist within the Swift
# cluster will be automatically created.
# account_autocreate = false
allow_account_management = <%= account_management %>
######
#
# N.B. ideally allow_account_management would only be set on the
# management server, but swauth will delete using the cluster url
# and not the local url
# allow_account_managemnet = <%= account_management %>
allow_account_management = true
<% if @authmode == "keystone" -%>
account_autocreate = true
@@ -109,12 +106,6 @@ default_swift_cluster = local#<%= node[:swift][:swift_url] %>#<%= node[:swift][:
<% else %>
default_swift_cluster = local#<%= node[:swift][:swift_url] %>
<% end %>
<% if tempurl_toggle -%>
allow_overrides = true
<% end %>
<% end %>
<% if node["swift"]["container-server"]["allowed_sync_hosts"] -%>
allowed_sync_hosts = <%= node["swift"]["container-server"]["allowed_sync_hosts"].join(",") %>
<% end %>
[filter:healthcheck]
@@ -138,10 +129,7 @@ use = egg:swift#memcache
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211
# memcache_servers = 127.0.0.1:11211
#####
#memcache_servers = <%= @memcache_servers.join(",") %>
<% unless @memcache_servers.empty? -%>
memcache_servers = <%= @memcache_servers %>
<% end -%>
memcache_servers = <%= @memcache_servers.join(",") %>
[filter:ratelimit]
use = egg:swift#ratelimit
@@ -250,7 +238,7 @@ use = egg:swift#tempurl
use = egg:swift#formpost
[filter:keystoneauth]
operator_roles = Member,admin,swiftoperator
operator_roles = Member,admin
use = egg:swift#keystoneauth
[filter:proxy-logging]
@@ -265,31 +253,10 @@ use = egg:swift#proxy_logging
# You can use log_statsd_* from [DEFAULT] or override them here:
# access_log_statsd_host = localhost
# access_log_statsd_port = 8125
# access_log_statsd_default_sample_rate = <%= node[:swift][:statistics][:sample_rate] %>
# access_log_statsd_default_sample_rate = 1
# access_log_statsd_metric_prefix =
# access_log_headers = False
# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY
[filter:authtoken]
<% case @authmode
when "keystone" -%>
paste.filter_factory = keystone.middleware.auth_token:filter_factory
# usage for anonymous referrers ('.r:*')
delay_auth_decision = true
#
signing_dir = /home/swift/keystone-signing
auth_protocol = http
auth_port = 35357
auth_host = <%= node["swift"]["network"]["proxy-bind-ip"] %>
admin_token = <%= @openstack_identity_bootstrap_token %>
# the service tenant and swift userid and password created in Keystone
admin_tenant_name = service
admin_user = swift
admin_password = swift
<% end -%>
@@ -3,7 +3,7 @@ gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = <%= @storage_local_net_ip %>
address = 0.0.0.0
[account]
max connections = 10
@@ -1,157 +0,0 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
# Author: Alan Meadows <alan.meadows@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
THIS FILE WAS INSTALLED BY CHEF. ANY CHANGES WILL BE OVERWRITTEN.
Openstack swift collector for recon and dispersion reports. Will send
back dispersion reporting metrics as well as swift recon statistics
to a statsd server for graphite consumption
"""
from subprocess import Popen, PIPE, check_call
from socket import socket, AF_INET, SOCK_DGRAM
import re
import os
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
class OpenStackSwiftStatisticsCollector(object):
def __init__(self):
'''Setup some initial values defined by chef'''
self.statsd_host = '<%= node[:swift][:statistics][:statsd_host] %>'
self.statsd_port = <%= node[:swift][:statistics][:statsd_port] %>
self.statsd_prefix = '<%= node[:swift][:statistics][:statsd_prefix] %>'
<% if node[:swift][:statistics][:enable_dispersion_report] -%>
self.enable_dispersion_report = True
<% else %>
self.enable_dispersion_report = False
<% end %>
<% if node[:swift][:statistics][:enable_recon_report] -%>
self.enable_recon_report = True
<% else %>
self.enable_recon_report = False
<% end %>
<% if node[:swift][:statistics][:enable_disk_report] -%>
self.enable_disk_report = True
<% else %>
self.enable_disk_report = False
<% end %>
self.recon_account_cache = '<%= node[:swift][:statistics][:recon_account_cache] %>'
self.recon_container_cache = '<%= node[:swift][:statistics][:recon_container_cache] %>'
self.recon_object_cache = '<%= node[:swift][:statistics][:recon_object_cache] %>'
def _dispersion_report(self):
"""
Swift Dispersion Report Collection
"""
p = Popen(['/usr/bin/swift-dispersion-report', '-j'],
stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
self.publish('%s.dispersion.errors' % self.statsd_prefix, len(stderr.split('\n')) - 1)
data = json.loads(stdout)
for t in ('object', 'container'):
for (k, v) in data[t].items():
self.publish('%s.dispersion.%s.%s' % (self.statsd_prefix, t, k), v)
def _recon_report(self):
"""
Swift Recon Collection
"""
recon_cache = {'account': self.recon_account_cache,
'container': self.recon_container_cache,
'object': self.recon_object_cache}
for recon_type in recon_cache:
if not os.access(recon_cache[recon_type], os.R_OK):
continue
try:
f = open(recon_cache[recon_type])
try:
rmetrics = json.loads(f.readlines()[0].strip())
metrics = self._process_cache(rmetrics)
for k, v in metrics:
metric_name = '%s.%s.%s' % (self.statsd_prefix, recon_type, ".".join(k))
if isinstance(v, (int, float)):
self.publish(metric_name, v)
except (ValueError, IndexError):
continue
finally:
f.close()
def _disk_report(self):
"""
Swift Disk Capacity Report
"""
p = Popen(['/usr/bin/swift-recon', '-d'],
stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
used, total = 0, 0
match = re.search(r'.* space used: ([0-9]*\.?[0-9]+) of ([0-9]*\.?[0-9]+)', stdout, re.M|re.I)
if match:
used, total = [int(i) for i in match.groups()]
highest, avg = 0, 0
match = re.search(r'.* lowest:.+highest: ([0-9]*\.?[0-9]+)%, avg: ([0-9]*\.?[0-9]+)%', stdout, re.M|re.I)
if match:
highest, avg = match.groups()
self.publish('%s.capacity.bytes_used' % self.statsd_prefix, used)
self.publish('%s.capacity.bytes_free' % self.statsd_prefix, total-used)
self.publish('%s.capacity.bytes_utilization' % self.statsd_prefix, int((used/total)*100))
self.publish('%s.capacity.single_disk_utilization_highest' % self.statsd_prefix, highest)
self.publish('%s.capacity.single_disk_utilization_average' % self.statsd_prefix, avg)
def collect(self):
if (self.enable_dispersion_report):
self._dispersion_report()
if (self.enable_recon_report):
self._recon_report()
if (self.enable_disk_report):
self._disk_report()
def publish(self, metric_name, value):
"""Publish a metric to statsd server"""
# TODO: IPv6 support
print '%s:%s|g' % (metric_name.encode('utf-8'), value), (self.statsd_host, self.statsd_port)
udp_sock = socket(AF_INET, SOCK_DGRAM)
udp_sock.sendto('%s:%s|g' % (metric_name.encode('utf-8'), value), (self.statsd_host, self.statsd_port))
def _process_cache(self, d, path=()):
"""Recusively walk a nested recon cache dict to obtain path/values"""
metrics = []
for k, v in d.iteritems():
if not isinstance(v, dict):
metrics.append((path + (k,), v))
else:
self._process_cache(v, path + (k,))
return metrics
if __name__ == '__main__':
collector = OpenStackSwiftStatisticsCollector()
collector.collect()
@@ -66,13 +66,6 @@ rabbitmq_user "add openstack rabbit user" do
action :add
end
rabbitmq_user "change the password of the openstack rabbit user" do
user user
password pass
action :change_password
end
rabbitmq_vhost "add openstack rabbit vhost" do
vhost vhost
@@ -1 +0,0 @@
metadata
@@ -1,28 +0,0 @@
{
"sources": {
"statsd": {
"path": "."
},
"build-essential": {
"locked_version": "1.4.2"
},
"git": {
"locked_version": "2.6.0"
},
"dmg": {
"locked_version": "2.0.0"
},
"yum": {
"locked_version": "2.3.2"
},
"windows": {
"locked_version": "1.10.0"
},
"chef_handler": {
"locked_version": "1.1.4"
},
"runit": {
"locked_version": "1.2.0"
}
}
}
@@ -1,8 +0,0 @@
source "https://rubygems.org"
gem "chef", "~> 11.4.4"
gem "json", "<= 1.7.7" # chef dependency
gem "berkshelf", "~> 2.0.10"
gem "chefspec", "~> 1.2.0"
gem "foodcritic"
gem "strainer"
@@ -1,212 +0,0 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (3.2.14)
i18n (~> 0.6, >= 0.6.4)
multi_json (~> 1.0)
addressable (2.3.5)
akami (1.2.0)
gyoku (>= 0.4.0)
nokogiri (>= 1.4.0)
berkshelf (2.0.10)
activesupport (~> 3.2.0)
addressable (~> 2.3.4)
buff-shell_out (~> 0.1)
chozo (>= 0.6.1)
faraday (>= 0.8.5)
hashie (>= 2.0.2)
minitar (~> 0.5.4)
rbzip2 (~> 0.2.0)
retryable (~> 1.3.3)
ridley (~> 1.5.0)
solve (>= 0.5.0)
thor (~> 0.18.0)
buff-config (0.4.0)
buff-extensions (~> 0.3)
varia_model (~> 0.1)
buff-extensions (0.5.0)
buff-ignore (1.1.0)
buff-platform (0.1.0)
buff-ruby_engine (0.1.0)
buff-shell_out (0.1.0)
buff-ruby_engine (~> 0.1.0)
builder (3.2.2)
celluloid (0.14.1)
timers (>= 1.0.0)
celluloid-io (0.14.1)
celluloid (>= 0.14.1)
nio4r (>= 0.4.5)
chef (11.4.4)
erubis
highline (>= 1.6.9)
json (>= 1.4.4, <= 1.7.7)
mixlib-authentication (>= 1.3.0)
mixlib-cli (~> 1.3.0)
mixlib-config (>= 1.1.2)
mixlib-log (>= 1.3.0)
mixlib-shellout
net-ssh (~> 2.6)
net-ssh-multi (~> 1.1.0)
ohai (>= 0.6.0)
rest-client (>= 1.0.4, < 1.7.0)
yajl-ruby (~> 1.1)
chefspec (1.2.0)
chef (>= 10.0)
erubis
fauxhai (>= 0.1.1, < 2.0)
minitest-chef-handler (>= 0.6.0)
rspec (~> 2.0)
chozo (0.6.1)
activesupport (>= 3.2.0)
hashie (>= 2.0.2)
multi_json (>= 1.3.0)
ci_reporter (1.9.0)
builder (>= 2.1.2)
diff-lcs (1.2.4)
erubis (2.7.0)
faraday (0.8.8)
multipart-post (~> 1.2.0)
fauxhai (1.1.1)
httparty
net-ssh
ohai
ffi (1.9.0)
foodcritic (2.2.0)
erubis
gherkin (~> 2.11.7)
nokogiri (~> 1.5.4)
treetop (~> 1.4.10)
yajl-ruby (~> 1.1.0)
gherkin (2.11.8)
multi_json (~> 1.3)
gssapi (1.0.3)
ffi (>= 1.0.1)
gyoku (1.1.0)
builder (>= 2.1.2)
hashie (2.0.5)
highline (1.6.19)
httparty (0.11.0)
multi_json (~> 1.0)
multi_xml (>= 0.5.2)
httpclient (2.2.0.2)
httpi (0.9.7)
rack
i18n (0.6.5)
ipaddress (0.8.0)
json (1.7.7)
little-plugger (1.1.3)
logging (1.6.2)
little-plugger (>= 1.1.3)
mime-types (1.25)
minitar (0.5.4)
minitest (4.7.5)
minitest-chef-handler (1.0.1)
chef
ci_reporter
minitest (~> 4.7.3)
mixlib-authentication (1.3.0)
mixlib-log
mixlib-cli (1.3.0)
mixlib-config (1.1.2)
mixlib-log (1.6.0)
mixlib-shellout (1.2.0)
multi_json (1.7.9)
multi_xml (0.5.5)
multipart-post (1.2.0)
net-http-persistent (2.9)
net-ssh (2.6.8)
net-ssh-gateway (1.2.0)
net-ssh (>= 2.6.5)
net-ssh-multi (1.1)
net-ssh (>= 2.1.4)
net-ssh-gateway (>= 0.99.0)
nio4r (0.5.0)
nokogiri (1.5.10)
nori (1.1.5)
ohai (6.18.0)
ipaddress
mixlib-cli
mixlib-config
mixlib-log
mixlib-shellout
systemu
yajl-ruby
polyglot (0.3.3)
rack (1.5.2)
rbzip2 (0.2.0)
rest-client (1.6.7)
mime-types (>= 1.16)
retryable (1.3.3)
ridley (1.5.2)
addressable
buff-config (~> 0.2)
buff-extensions (~> 0.3)
buff-ignore (~> 1.1)
buff-shell_out (~> 0.1)
celluloid (~> 0.14.0)
celluloid-io (~> 0.14.0)
erubis
faraday (>= 0.8.4)
hashie (>= 2.0.2)
json (>= 1.7.7)
mixlib-authentication (>= 1.3.0)
net-http-persistent (>= 2.8)
net-ssh
nio4r (>= 0.5.0)
retryable
solve (>= 0.4.4)
varia_model (~> 0.1)
winrm (~> 1.1.0)
rspec (2.14.1)
rspec-core (~> 2.14.0)
rspec-expectations (~> 2.14.0)
rspec-mocks (~> 2.14.0)
rspec-core (2.14.5)
rspec-expectations (2.14.2)
diff-lcs (>= 1.1.3, < 2.0)
rspec-mocks (2.14.3)
rubyntlm (0.1.1)
savon (0.9.5)
akami (~> 1.0)
builder (>= 2.1.2)
gyoku (>= 0.4.0)
httpi (~> 0.9)
nokogiri (>= 1.4.0)
nori (~> 1.0)
wasabi (~> 1.0)
solve (0.8.1)
strainer (3.3.0)
berkshelf (~> 2.0)
buff-platform (~> 0.1)
systemu (2.5.2)
thor (0.18.1)
timers (1.1.0)
treetop (1.4.15)
polyglot
polyglot (>= 0.3.1)
uuidtools (2.1.4)
varia_model (0.2.0)
buff-extensions (~> 0.2)
hashie (>= 2.0.2)
wasabi (1.0.0)
nokogiri (>= 1.4.0)
winrm (1.1.2)
gssapi (~> 1.0.0)
httpclient (~> 2.2.0.2)
logging (~> 1.6.1)
nokogiri (~> 1.5.0)
rubyntlm (~> 0.1.1)
savon (= 0.9.5)
uuidtools (~> 2.1.2)
yajl-ruby (1.1.0)
PLATFORMS
ruby
DEPENDENCIES
berkshelf (~> 2.0.10)
chef (~> 11.4.4)
chefspec (~> 1.2.0)
foodcritic
json (<= 1.7.7)
strainer
@@ -1,53 +1,69 @@
Description
===========
# DESCRIPTION
Installs and sets up statsd <http://github.com/etsy/statsd>
Chef cookbook to install [Etsy's
StatsD](https://github.com/etsy/statsd) daemon. Supports the new
pluggable backend modules.
Requirements
============
# REQUIREMENTS
Ubuntu 12.04
Depends on the cookbooks:
Attributes
==========
* git
* nodejs
* `node['statsd']['port']` - The port for Statsd to listen for stats on. Defaults to 8125
* `node['statsd']['graphite_host']` - The host to forward processed statistics to. Defaults to localhost.
* `node['statsd']['graphite_port']` - The port to forward processed statistics to. Defaults to 2003
* `node['statsd']['package_version']` - The version to use when creating the package. Defaults to 0.6.0
* `node['statsd']['tmp_dir']` - The temporary directory to while building the package. Defaults to /tmp
* `node['statsd']['repo']` - The gitrepo to use. Defaults to "git://github.com/etsy/statsd.git"
* `node['statsd']['sha']` - The sha checksum of the repo to use
# ATTRIBUTES
Usage
=====
## Basic attributes
Including this recipe will build a dpkg from the statsd git repository and install it.
* `repo`: Location of statsd repo (defaults to Etsy's).
* `log_file`: Where to log output (defaults to:
`/var/log/statsd.log`).
* `flush_interval_msecs`: Flush interval in msecs (default 10000).
* `port`: Port to listen for UDP stats (default 8125).
By default statsd will attempt to send statistics to a graphite instance running on localhost.
## Graphite settings
Testing
=======
* `graphite_enabled`: Enable the built-in Graphite backend (default true).
* `graphite_port`: Port to talk to Graphite on (default 2003).
* `graphite_host`: Host name of Graphite server (default localhost).
$ bundle install
$ bundle exec berks install
$ bundle exec strainer test
## Adding backends
License and Author
==================
Set the attribute `backends` to a hash of statsd NPM module
backends. The hash key is the name of the NPM module, while the hash
value is the version of the NPM module to install (or null for latest
version).
Author:: Scott Lampert (<sl724q@att.com>)
For example, to use version 0.0.1 of [statsd-librato-backend][]:
Copyright 2012-2013, AT&T Services, Inc.
attrs[:statsd][:backends] = { 'statsd-librato-backend' => '0.0.1' }
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
To use the latest version of statsd-librato-backend:
http://www.apache.org/licenses/LICENSE-2.0
attrs[:statsd][:backends] = { 'statsd-librato-backend' => nil }
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The cookbook will install each backend module under the statsd
directory and add it to the list of backends loaded in the
configuration file.
### Extra backend configuration
Set the attribute `extra_config` to any additional configuration
options that should be included in the StatsD configuration file.
For example, to set your email and token for the
[statsd-librato-backend][] backend module, use the following:
```js
attrs[:statsd][:extra_config] => {
'librato' => {
'email' => 'myemail@example.com',
'token' => '1234567890ABCDEF'
}
}
```
# USAGE
[statsd-librato-backend]: https://github.com/librato/statsd-librato-backend
@@ -1,4 +0,0 @@
# Strainerfile
knife test: bundle exec knife cookbook test $COOKBOOK
foodcritic: bundle exec foodcritic -f any -t ~FC003 -t ~FC023 $SANDBOX/$COOKBOOK
chefspec: bundle exec rspec $SANDBOX/$COOKBOOK
@@ -1,9 +1,32 @@
default['statsd']['port'] = 8125
default['statsd']['graphite_port'] = 2003
default['statsd']['graphite_host'] = "localhost"
default['statsd']['relay_server'] = false
default['statsd']['package_version'] = "0.6.0"
default['statsd']['sha'] = "2ccde8266bbe941ac5f79efe39103b99e1196d92"
default['statsd']['user'] = "statsd"
default['statsd']['repo'] = "git://github.com/etsy/statsd.git"
default['statsd']['tmp_dir'] = "/tmp"
default[:statsd][:repo] = "git://github.com/etsy/statsd.git"
default[:statsd][:revision] = "master"
default[:statsd][:log_file] = "/var/log/statsd.log"
default[:statsd][:flush_interval_msecs] = 10000
default[:statsd][:port] = 8125
# Is the graphite backend enabled?
default[:statsd][:graphite_enabled] = true
default[:statsd][:graphite_port] = 2003
default[:statsd][:graphite_host] = "localhost"
#
# Add all NPM module backends here. Each backend should be a
# hash of the backend's name to the NPM module's version. If we
# should just use the latest, set the hash to null.
#
# For example, to use version 0.0.1 of statsd-librato-backend:
#
# attrs[:statsd][:backends] = { 'statsd-librato-backend' => '0.0.1' }
#
# To use the latest version of statsd-librato-backend:
#
# attrs[:statsd][:backends] = { 'statsd-librato-backend' => nil }
#
default[:statsd][:backends] = {}
#
# Add any additional backend configuration here.
#
default[:statsd][:extra_config] = {}
@@ -1,8 +1,12 @@
description "statsd"
author "etsy"
author "Librato"
start on startup
stop on shutdown
start on runlevel [2345]
stop on runlevel [!2345]
env SL_NAME=statsd
respawn
script
# We found $HOME is needed. Without it, we ran into problems
@@ -1,3 +0,0 @@
#!/bin/sh
# Called by Upstart, /etc/init/statsd.conf
node /usr/share/statsd/stats.js /etc/statsd/localConfig.js 2>&1 >> /tmp/statsd.log
@@ -1,16 +1,12 @@
name "statsd"
maintainer "AT&T Services, Inc."
maintainer_email "cookbooks@lists.tfoundry.com"
maintainer "Mike Heffner"
maintainer_email "mike@librato.com"
license "Apache 2.0"
description "Installs/Configures statsd"
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version "0.1.4"
recipe "statsd", "Installs stats ruby gem"
recipe "statsd::server", "Configures statsd server"
version "0.1.1"
%w{ ubuntu }.each do |os|
supports os
end
depends "build-essential"
depends "git"
depends "nodejs", ">= 0.5.2"
depends "build-essential"
depends "git"
supports "ubuntu"
@@ -2,19 +2,109 @@
# Cookbook Name:: statsd
# Recipe:: default
#
# Copyright 2013, Scott Lampert
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2011, Librato, Inc.
#
gem_package "statsd-ruby"
include_recipe "nodejs"
include_recipe "git"
git "/usr/share/statsd" do
repository node[:statsd][:repo]
revision node[:statsd][:revision]
action :sync
end
execute "install dependencies" do
command "npm install -d"
cwd "/usr/share/statsd"
end
backends = []
if node[:statsd][:graphite_enabled]
backends << "./backends/graphite"
end
node[:statsd][:backends].each do |k, v|
if v
name = "#{k}@#{v}"
else
name= k
end
execute "install npm module #{name}" do
command "npm install #{name}"
cwd "/usr/share/statsd"
end
backends << k
end
directory "/etc/statsd" do
action :create
end
user "statsd" do
comment "statsd"
system true
shell "/bin/false"
end
service "statsd" do
provider Chef::Provider::Service::Upstart
restart_command "stop statsd; start statsd"
start_command "start statsd"
stop_command "stop statsd"
supports :restart => true, :start => true, :stop => true
end
template "/etc/statsd/config.js" do
source "config.js.erb"
mode 0644
config_hash = {
:flushInterval => node[:statsd][:flush_interval_msecs],
:port => node[:statsd][:port],
:backends => backends
}.merge(node[:statsd][:extra_config])
if node[:statsd][:graphite_enabled]
config_hash[:graphitePort] = node[:statsd][:graphite_port]
config_hash[:graphiteHost] = node[:statsd][:graphite_host]
end
variables(:config_hash => config_hash)
notifies :restart, resources(:service => "statsd")
end
directory "/usr/share/statsd/scripts" do
action :create
end
template "/usr/share/statsd/scripts/start" do
source "upstart.start.erb"
mode 0755
notifies :restart, resources(:service => "statsd")
end
cookbook_file "/etc/init/statsd.conf" do
source "upstart.conf"
mode 0644
notifies :restart, resources(:service => "statsd")
end
bash "create_log_file" do
code <<EOH
touch #{node[:statsd][:log_file]} && chown statsd #{node[:statsd][:log_file]}
EOH
not_if {File.exist?(node[:statsd][:log_file])}
end
service "statsd" do
action [ :enable, :start ]
end
@@ -1,87 +0,0 @@
#
# Cookbook Name:: statsd
# Recipe:: server
#
# Copyright 2013, Scott Lampert
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "build-essential"
include_recipe "git"
case node["platform"]
when "ubuntu", "debian"
package "nodejs"
package "debhelper"
statsd_version = node['statsd']['sha']
git ::File.join(node['statsd']['tmp_dir'], "statsd") do
repository node['statsd']['repo']
reference statsd_version
action :sync
notifies :run, "execute[build debian package]"
end
# Fix the debian changelog file of the repo
template ::File.join(node['statsd']['tmp_dir'], "statsd/debian/changelog") do
source "changelog.erb"
end
execute "build debian package" do
command "dpkg-buildpackage -us -uc"
cwd ::File.join(node['statsd']['tmp_dir'], "statsd")
creates ::File.join(node['statsd']['tmp_dir'], "statsd_#{node['statsd']['package_version']}_all.deb")
end
dpkg_package "statsd" do
action :install
source ::File.join(node['statsd']['tmp_dir'], "statsd_#{node['statsd']['package_version']}_all.deb")
end
when "redhat", "centos"
raise "No support for RedHat or CentOS (yet)."
end
template "/etc/statsd/localConfig.js" do
source "localConfig.js.erb"
mode 00644
notifies :restart, "service[statsd]"
end
cookbook_file "/usr/share/statsd/scripts/start" do
source "upstart.start"
owner "root"
group "root"
mode 00755
end
cookbook_file "/etc/init/statsd.conf" do
source "upstart.conf"
owner "root"
group "root"
mode 00644
end
user node['statsd']['user'] do
comment "statsd"
system true
shell "/bin/false"
end
service "statsd" do
provider Chef::Provider::Service::Upstart
action [ :enable, :start ]
end
@@ -1,5 +0,0 @@
statsd (<%= node['statsd']['package_version'] %>) unstable; urgency=low
* Dummy changelog for dpkg build
-- Scott Lampert <scott@lampert.org> Thu, 14 Mar 2013 15:24:00 -0700
@@ -0,0 +1 @@
<%= JSON.pretty_generate(@config_hash) %>
@@ -1,17 +0,0 @@
/********************
AUTOGENERATED BY CHEF
*********************/
{
graphitePort: <%= node['statsd']['graphite_port'] %>
, graphiteHost: "<%= node['statsd']['graphite_host'] %>"
<% if node['statsd']['relay_server'] -%>
, address: "127.0.0.1"
, mgmt_address: "127.0.0.1"
<% else -%>
, address: "<%= node['statsd']['graphite_host'] %>"
, mgmt_address: "<%=node['statsd']['graphite_host'] %>"
<% end -%>
, port: <%= node['statsd']['port'] %>
, backends: [ "./backends/graphite" ]
}
@@ -0,0 +1,6 @@
#!/bin/bash
# Called by Upstart, /etc/init/statsd.conf
export PATH=$PATH:/usr/local/bin
node /usr/share/statsd/stats.js /etc/statsd/config.js 2>&1 >> <%= node[:statsd][:log_file] %>
@@ -1,4 +0,0 @@
{
"id": "ceilometer",
"ceilometer": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "cinder",
"cinder": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "glance",
"glance": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "horizon",
"horizon": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "keystone",
"keystone": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "nova",
"nova": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "quantum",
"quantum": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "openstack_identity_bootstrap_token",
"openstack_identity_bootstrap_token": "openstack_identity_bootstrap_token"
}
@@ -1,4 +0,0 @@
{
"id": "quantum_metadata_secret",
"quantum_metadata_secret": "quantum_metadata_secret"
}
@@ -1,4 +0,0 @@
{
"id": "openstack-block-storage",
"openstack-block-storage": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "openstack-compute",
"openstack-compute": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "openstack-image",
"openstack-image": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "openstack-network",
"openstack-network": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "admin",
"admin": "xcatcloud"
}
@@ -1,4 +0,0 @@
{
"id": "guest",
"guest": "xcatcloud"
}
@@ -1,161 +0,0 @@
#
# IBM(c) 2013 EPL license http://www.eclipse.org/legal/epl-v10.html
#
name "example_allinone"
description "Grizzly allinone environment file."
override_attributes(
"mysql" => {
"server_root_password" => "cluster",
"server_debian_password" => "cluster",
"server_repl_password" => "cluster",
"allow_remote_root" => true,
"root_network_acl" => "%"
},
"openstack" => {
"developer_mode" => true,
"secret"=>{
"key_path"=>"/etc/chef/encrypted_data_bag_secret"
},
"db"=>{
"bind_interface"=>"lo",
"compute"=>{
"host"=>"127.0.0.1"
},
"identity"=>{
"host"=>"127.0.0.1"
},
"image"=>{
"host"=>"127.0.0.1"
},
"network"=>{
"host"=>"127.0.0.1"
},
"volume"=>{
"host"=>"127.0.0.1"
},
"dashboard"=>{
"host"=>"127.0.0.1"
},
"metering"=>{
"host"=>"127.0.0.1"
}
},
"mq"=>{
"bind_interface"=>"lo"
},
"identity"=>{
"bind_interface"=>"lo",
"db"=>{
"username"=>"keystone",
"password"=> "keystone"
}
},
"endpoints"=>{
"identity-api"=>{
"host"=>"127.0.0.1",
},
"identity-admin"=>{
"host"=>"127.0.0.1",
},
"compute-api"=>{
"host"=>"127.0.0.1",
},
"compute-ec2-api"=>{
"host"=>"127.0.0.1",
},
"compute-ec2-admin"=>{
"host"=>"127.0.0.1",
},
"compute-xvpvnc"=>{
"host"=>"127.0.0.1",
},
"compute-novnc"=>{
"host"=>"127.0.0.1",
},
"network-api"=>{
"host"=>"127.0.0.1",
},
"image-api"=>{
"host"=>"127.0.0.1",
},
"image-registry"=>{
"host"=>"127.0.0.1",
},
"volume-api"=>{
"host"=>"127.0.0.1",
},
"metering-api"=>{
"host"=>"127.0.0.1",
}
},
"image" => {
"api"=>{
"bind_interface"=>"lo"
},
"registry"=>{
"bind_interface"=>"lo"
},
"image_upload" => false,
"upload_images" => ["cirros"],
"upload_image" => {
"cirros" => "https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
},
"identity_service_chef_role" => "allinone-compute"
},
"block-storage" => {
"keystone_service_chef_role" => "allinone-compute"
},
"dashboard" => {
"keystone_service_chef_role" => "allinone-compute",
"use_ssl" => "false"
},
"network" => {
"metadata"=>{
"nova_metadata_ip"=>"127.0.0.1"
},
"rabbit"=>{
"host"=>"127.0.0.1"
},
"api"=>{
"bind_interface"=>"lo"
},
"rabbit_server_chef_role" => "allinone-compute",
"l3"=>{
"external_network_bridge_interface"=>"eth0"
},
"openvswitch"=> {
"tenant_network_type"=>"vlan",
"network_vlan_ranges"=>"physnet1",
"bridge_mappings"=>"physnet1:eth2"
}
},
"compute" => {
"identity_service_chef_role" => "allinone-compute",
"rabbit"=>{
"host"=>"127.0.0.1"
},
"xvpvnc_proxy"=>{
"bind_interface"=>"eth0"
},
"novnc_proxy"=>{
"bind_interface"=>"eth0"
},
"network" => {
"service_type" => "quantum"
},
"config" => {
"ram_allocation_ratio" => 5.0
},
"libvirt" => {
"bind_interface"=>"lo",
"virt_type" => "qemu"
}
}
}
)
@@ -1,156 +0,0 @@
#
# IBM(c) 2013 EPL license http://www.eclipse.org/legal/epl-v10.html
# http://docs.openstack.org/grizzly/openstack-network/admin/content/app_demo_routers_with_private_networks.html
#
#
name "example_per-tenant_router"
description "Grizzly environment file based on Per-tenant Routers with Private Networks"
override_attributes(
"mysql" => {
"server_root_password" => "cluster",
"server_debian_password" => "cluster",
"server_repl_password" => "cluster",
"allow_remote_root" => true,
"root_network_acl" => "%"
},
"openstack" => {
"developer_mode" => true,
"db"=>{
"bind_interface"=>"eth1",
"compute"=>{
"host"=>"11.1.0.107"
},
"identity"=>{
"host"=>"11.1.0.107"
},
"image"=>{
"host"=>"11.1.0.107"
},
"network"=>{
"host"=>"11.1.0.107"
},
"volume"=>{
"host"=>"11.1.0.107"
},
"dashboard"=>{
"host"=>"11.1.0.107"
},
"metering"=>{
"host"=>"11.1.0.107"
}
},
"mq"=>{
"bind_interface"=>"eth1"
},
"identity"=>{
"bind_interface"=>"eth1",
"db"=>{
"username"=>"keystone",
"password"=> "keystone"
}
},
"endpoints"=>{
"identity-api"=>{
"host"=>"11.1.0.107",
},
"identity-admin"=>{
"host"=>"11.1.0.107",
},
"compute-api"=>{
"host"=>"11.1.0.107",
},
"compute-ec2-api"=>{
"host"=>"11.1.0.107",
},
"compute-ec2-admin"=>{
"host"=>"11.1.0.107",
},
"compute-xvpvnc"=>{
"host"=>"11.1.0.107",
},
"compute-novnc"=>{
"host"=>"11.1.0.107",
},
"network-api"=>{
"host"=>"11.1.0.107",
},
"image-api"=>{
"host"=>"11.1.0.107",
},
"image-registry"=>{
"host"=>"11.1.0.107",
},
"volume-api"=>{
"host"=>"11.1.0.107",
},
"metering-api"=>{
"host"=>"11.1.0.107",
}
},
"image" => {
"api"=>{
"bind_interface"=>"eth1"
},
"registry"=>{
"bind_interface"=>"eth1"
},
"image_upload" => false,
"upload_images" => ["cirros"],
"upload_image" => {
"cirros" => "https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
},
},
"dashboard" => {
"use_ssl" => "false"
},
"network" => {
"metadata"=>{
"nova_metadata_ip"=>"11.1.0.107"
},
"rabbit"=>{
"host"=>"11.1.0.107"
},
"api"=>{
"bind_interface"=>"eth1"
},
"l3"=>{
"external_network_bridge_interface"=>"eth0"
},
"allow_overlapping_ips" => "True",
"use_namespaces" => "True",
"openvswitch"=> {
"tenant_network_type"=>"gre",
"tunnel_id_ranges"=>"1:1000",
"enable_tunneling"=>"True",
"local_ip_interface"=>"eth2"
}
},
"compute" => {
"identity_service_chef_role" => "os-compute-single-controller",
"rabbit"=>{
"host"=>"11.1.0.107"
},
"xvpvnc_proxy"=>{
"bind_interface"=>"eth0"
},
"novnc_proxy"=>{
"bind_interface"=>"eth0"
},
"network" => {
"service_type" => "quantum",
},
"config" => {
"ram_allocation_ratio" => 5.0
},
"libvirt" => {
"bind_interface"=>"eth1",
"virt_type" => "qemu"
}
}
}
)
@@ -1,6 +0,0 @@
name "os-block-storage-volume"
description "OpenStack Block Storage volume service"
run_list(
"role[os-base]",
"recipe[openstack-block-storage::volume]"
)
@@ -2,7 +2,5 @@ name "os-block-storage"
description "Configures OpenStack block storage, configured by attributes."
run_list(
"role[os-base]",
"role[os-block-storage-api]",
"role[os-block-storage-scheduler]",
"role[os-block-storage-volume]",
"recipe[openstack-block-storage]"
)
@@ -6,7 +6,6 @@ run_list(
"role[os-ops-messaging]",
"role[os-identity]",
"role[os-network-server]",
"role[os-network-dhcp-agent]",
"role[os-compute-scheduler]",
"role[os-compute-api]",
"role[os-compute-cert]",
@@ -2,5 +2,5 @@ name "os-object-storage-account"
description "OpenStack object storage account service"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::account-server]"
"recipe[openstack-object-storage::account]"
)
@@ -2,5 +2,5 @@ name "os-object-storage-container"
description "OpenStack object storage container service"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::container-server]"
"recipe[openstack-object-storage::container]"
)
@@ -2,5 +2,5 @@ name "os-object-storage-management"
description "OpenStack object storage management service"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::management-server]"
"recipe[openstack-object-storage::management]"
)
@@ -2,5 +2,5 @@ name "os-object-storage-object"
description "OpenStack object storage object service"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::object-server]"
"recipe[openstack-object-storage::object]"
)
@@ -2,5 +2,5 @@ name "os-object-storage-proxy"
description "OpenStack object storage proxy service"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::proxy-server]"
"recipe[openstack-object-storage::proxy]"
)
@@ -1,6 +0,0 @@
name "os-object-storage-setup"
description "OpenStack object storage server responsible for generating initial settings"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::setup]"
)
@@ -2,10 +2,5 @@ name "os-object-storage"
description "OpenStack object storage roll-up role"
run_list(
"role[os-base]",
"role[os-object-storage-setup]",
"role[os-object-storage-management]",
"role[os-object-storage-proxy]",
"role[os-object-storage-object]",
"role[os-object-storage-container]",
"role[os-object-storage-account]"
"recipe[openstack-object-storage]"
)
@@ -179,10 +179,6 @@ sub process_request
$callback->($rsp);
next;
}
unless ( -d "$repos/environments") {
mkdir("$repos/environments", 0777);
}
my $tmperr = cloudvars(
$tmplfile,
+4 -24
View File
@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/sh -vx
bridge_name="br-ex"
@@ -19,7 +19,7 @@ str_value=$(hashget hash_defined_nics $pubinterface)
old_ifs=$IFS
IFS=$','
array_temp=($str_value)
IFS=$old_ifs
FS=$old_ifs
if [ -n "${array_temp[1]}" ];then
str_nic_type=`echo ${array_temp[1]} | tr "[A-Z]" "[a-z]"`
@@ -35,27 +35,7 @@ else
fi
str_network=$(checknetwork ${array_temp[0]})
if [ -z "$str_network" ];then
logger -t xcat -p local4.info "configbr-ex: could not find the network for $bridge_name which is based on $pubinterface. Please check the networks and nics tables."
echo "configbr-ex: could not find the network for $bridge_name which is based on $pubinterface. Please check the networks and nics tables."
exit -1
fi
#configeth $bridge_name ${array_temp[0]} ${array_temp[2]}
configeth $bridge_name ${array_temp[0]} $str_network
if [ $? -ne 0 ];then
logger -t xcat -p local4.info "configbr-ex failed to configure $bridge_name : configeth $bridge_name ${array_temp[0]} $str_network"
echo "confignics: configbr-ex failed to configure $bridge_name : configeth $bridge_name ${array_temp[0]} $str_network"
exit -1
fi
. ./configgw $bridge_name
if [ $? -ne 0 ];then
logger -t xcat -p local4.info "configgw failed to configure gateway for $bridge_name."
echo "configgw failed to configure gateway for $bridge_name."
exit -1
fi
exit 0
configeth $bridge_name ${array_temp[0]} ${array_temp[2]}
+13 -105
View File
@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/sh
# IBM(c) 2007 EPL license http://www.eclipse.org/legal/epl-v10.html
# This script, ("loadclouddata"), is a sample xCAT post script for
@@ -38,20 +38,17 @@ hkeys() {
set | grep -o "^HASH${1}[[:alnum:]]*=" | sed -re "s/^HASH${1}(.*)=/\\1/g"
}
HOME='/root/'
export HOME
#flags
run_all=0
no_args=0
only_load_cookbook=0
only_load_role=0
only_load_clouddata=0
# develop mode. 0 -- false(customer mode); 1 -- true(develop mode)
devmode=1
if [ $# -eq 0 ]
then
run_all=1
no_args=1
else
for arg in "$@"
do
@@ -64,30 +61,15 @@ else
elif [ "$arg" = "--clouddata" ]
then
only_load_clouddata=1
elif [ "$arg" = "--nodevmode" ]
then
devmode=0
run_all=1
else
errmsg="no argument $arg in the loadchefdata script"
logger -t xcat -p local4.err "$errmsg"
echo "$errmsg"
logger -t xcat -p local4.err $errmsg
echo $errmsg
exit 1
fi
done
fi
if [ $devmode -eq 0 ]
then
if [ $only_load_cookbook -eq 1 -o $only_load_role -eq 1 -o $only_load_clouddata -eq 1 ]
then
errmsg="'--nodevmode' could not be used with other arguments"
logger -t xcat -p local4.err "$errmsg"
echo "$errmsg"
exit 1
fi
fi
if [ -z $CLOUDLIST ]
then
errmsg="Error! No Cloud name is assigned to the chef-client of the chef-server $NODE. Please check the cloud table."
@@ -107,7 +89,7 @@ then
fi
cd $REPOSITORY
if [ $run_all -eq 1 -o $only_load_cookbook -eq 1 ]
if [ $no_args -eq 1 -o $only_load_cookbook -eq 1 ]
then
# upload coobooks
knife cookbook bulk delete '.*' -y > /dev/null 2>&1
@@ -121,7 +103,7 @@ then
fi
fi
if [ $run_all -eq 1 -o $only_load_role -eq 1 ]
if [ $no_args -eq 1 -o $only_load_role -eq 1 ]
then
# upload roles
knife role bulk delete '.*' -y > /dev/null 2>&1
@@ -137,7 +119,7 @@ then
fi
if [ $run_all -eq 1 -o $only_load_clouddata -eq 1 ]
if [ $no_args -eq 1 -o $only_load_clouddata -eq 1 ]
then
if [ -z $CFGCLIENTLIST ]
@@ -182,8 +164,7 @@ then
for client in $CFGCLIENTLIST
do
echo "Configuring the chef-client node $client on the chef-server $NODE."
#c_fullname="$client.$DOMAIN"
c_fullname=$client
c_fullname="$client.$DOMAIN"
knife client delete -y $c_fullname > /dev/null 2>&1
knife node delete -y $c_fullname > /dev/null 2>&1
@@ -207,18 +188,11 @@ then
exit 1
fi
roles=`echo $roles | sed -e 's/ /,/g'`
r="";
for onerole in $roles
do
r+="role[$onerole],"
done
# assign the role for the chef node
knife node run_list add $c_fullname "$r"
knife node run_list add $c_fullname "role[$roles]"
if [ $? != 0 ]
then
errmsg="Failed to run knife node run_list add $client '$r' on the chef-server $NODE."
errmsg="Failed to run knife node run_list add $client 'role[$roles]' on the chef-server $NODE."
logger -t xcat -p local4.err $errmsg
echo $errmsg
exit 1
@@ -245,76 +219,10 @@ then
done
IFS=$OIFS
fi
IFS=$OIFS
if [ $devmode -eq 0 ]
then
bags=(db_passwords secrets service_passwords user_passwords)
if [ ! -e "$REPOSITORY/databags" ]
then
mkdir -p "$REPOSITORY/databags"
fi
databag_key="$REPOSITORY/databags/openstack_databag_key"
openssl rand -base64 512 > $databag_key
if [ $? != 0 ]
then
errmsg="Failed to use openssl to generate the data bag key on $NODE. Please check whether openssl is installed."
logger -t xcat -p local4.err "$errmsg"
echo "$errmsg"
exit 1
fi
if [ ! -e "/etc/chef/" ]
then
mkdir -p "/etc/chef/"
fi
#for knife command
cp -f $databag_key "/etc/chef/encrypted_data_bag_secret"
#for other chef-client nodes
cp -f $databag_key "/etc/chef-server/encrypted_data_bag_secret"
# add the path of encrypted_data_bag_secret to knife.rb file
if ! grep -w -q 'encrypted_data_bag_secret' /root/.chef/knife.rb
then
echo "encrypted_data_bag_secret '/etc/chef/openstack_encrypted_data_bag_secret'" >> /root/.chef/knife.rb
fi
# delete the old databags
knife data bag list | xargs -i knife data bag delete -y {}
# create databags and upload items
for bag in ${bags[@]}
do
bagpath="$REPOSITORY/databags/$bag"
if [ ! -e "$bagpath" ]
then
errmsg="$bag doesn't exist in $REPOSITORY/databags. Please make sure the databags are in the directory $REPOSITORY/databags."
logger -t xcat -p local4.err "$errmsg"
echo "$errmsg"
exit 1
fi
knife data bag create --secret-file $databag_key $bag
items=$(ls $bagpath)
for item in $items
do
knife data bag from file $bag $REPOSITORY/databags/$bag/$item --secret-file $databag_key
if [ $? != 0 ]
then
errmsg="Failed to run knife data bag from file $bag $REPOSITORY/databags/$bag/$item --secret-file $databag_key"
logger -t xcat -p local4.err "$errmsg"
echo "$errmsg"
exit 1
fi
done
done
fi
exit 0
@@ -105,10 +105,7 @@ override_attributes(
"identity_service_chef_role" => "allinone-compute"
},
"block-storage" => {
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"keystone_service_chef_role" => "allinone-compute"
"keystone_service_chef_role" => "allinone-compute"
},
"dashboard" => {
"keystone_service_chef_role" => "allinone-compute",
@@ -132,7 +129,7 @@ override_attributes(
"openvswitch"=> {
"tenant_network_type"=>"vlan",
"network_vlan_ranges"=>"physnet1",
"bridge_mappings"=>"physnet1:br-#TABLE:clouds:name=$CLOUD:datainterface#"
"bridge_mappings"=>"physnet1:#TABLE:clouds:name=$CLOUD:datainterface#"
}
},
"compute" => {
@@ -1,164 +0,0 @@
#
# IBM(c) 2013 EPL license http://www.eclipse.org/legal/epl-v10.html
#
name "$CLOUD"
description "Grizzly allinone environment file."
override_attributes(
"mysql" => {
"server_root_password" => "cluster",
"server_debian_password" => "cluster",
"server_repl_password" => "cluster",
"allow_remote_root" => true,
"root_network_acl" => "%"
},
"openstack" => {
"developer_mode" => false,
"secret"=>{
"key_path"=>"/etc/chef/encrypted_data_bag_secret"
},
"db"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"compute"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"identity"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"image"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"network"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"volume"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"dashboard"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"metering"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
}
},
"mq"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"identity"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"db"=>{
"username"=>"keystone",
"password"=> "keystone"
}
},
"endpoints"=>{
"identity-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"identity-admin"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-ec2-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-ec2-admin"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-xvpvnc"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-novnc"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"network-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"image-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"image-registry"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"volume-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"metering-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
}
},
"image" => {
"api"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"registry"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"image_upload" => false,
"upload_images" => ["cirros"],
"upload_image" => {
"cirros" => "https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
},
"identity_service_chef_role" => "allinone-compute"
},
"block-storage" => {
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"keystone_service_chef_role" => "allinone-compute"
},
"dashboard" => {
"keystone_service_chef_role" => "allinone-compute",
"use_ssl" => "false"
},
"network" => {
"metadata"=>{
"nova_metadata_ip"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"api"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"rabbit_server_chef_role" => "allinone-compute",
"l3"=>{
"external_network_bridge_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"openvswitch"=> {
"tenant_network_type"=>"vlan",
"network_vlan_ranges"=>"physnet1",
"bridge_mappings"=>"physnet1:br-#TABLE:clouds:name=$CLOUD:datainterface#"
}
},
"compute" => {
"identity_service_chef_role" => "allinone-compute",
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"xvpvnc_proxy"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"novnc_proxy"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"network" => {
"service_type" => "quantum"
},
"config" => {
"ram_allocation_ratio" => 5.0
},
"libvirt" => {
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"virt_type" => "#TABLE:clouds:name=$CLOUD:virttype#"
}
}
}
)
@@ -1,178 +0,0 @@
#
# IBM(c) 2013 EPL license http://www.eclipse.org/legal/epl-v10.html
#
#
# When using this template, you should change the proxy-cidr and object-cidr
# according to your actual network environment!!!!!!!!
#
name "$CLOUD"
description "Grizzly keystone+swift allinone environment file."
override_attributes(
"mysql" => {
"server_root_password" => "cluster",
"server_debian_password" => "cluster",
"server_repl_password" => "cluster",
"allow_remote_root" => true,
"root_network_acl" => "%"
},
"swift" => {
"authmode" => "keystone",
"authkey" => "swift",
"proxy_server_chef_role"=>"os-object-storage",
"network" => {
"proxy-bind-ip" => "#TABLE:clouds:name=$CLOUD:hostip#",
"proxy-cidr" => "11.0.0.0/8",
"account-bind-ip" => "#TABLE:clouds:name=$CLOUD:hostip#",
"container-bind-ip" => "#TABLE:clouds:name=$CLOUD:hostip#",
"object-bind-ip" => "#TABLE:clouds:name=$CLOUD:hostip#",
"object-cidr" => "11.0.0.0/8"
}
},
"openstack" => {
"developer_mode" => true,
"db"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"compute"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"identity"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"image"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"network"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"volume"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"dashboard"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"metering"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
}
},
"mq"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"identity"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"db"=>{
"username"=>"keystone",
"password"=> "keystone"
}
},
"endpoints"=>{
"identity-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"identity-admin"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-ec2-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-ec2-admin"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-xvpvnc"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-novnc"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"network-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"image-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"image-registry"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"volume-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"metering-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
}
},
"image" => {
"api"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"registry"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"image_upload" => false,
"upload_images" => ["cirros"],
"upload_image" => {
"cirros" => "https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
},
"identity_service_chef_role" => "allinone-compute"
},
"block-storage" => {
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"keystone_service_chef_role" => "allinone-compute"
},
"dashboard" => {
"keystone_service_chef_role" => "allinone-compute",
"use_ssl" => "false"
},
"network" => {
"metadata"=>{
"nova_metadata_ip"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"api"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"rabbit_server_chef_role" => "allinone-compute",
"l3"=>{
"external_network_bridge_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"openvswitch"=> {
"tenant_network_type"=>"vlan",
"network_vlan_ranges"=>"physnet1",
"bridge_mappings"=>"physnet1:br-#TABLE:clouds:name=$CLOUD:datainterface#"
}
},
"compute" => {
"identity_service_chef_role" => "allinone-compute",
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"xvpvnc_proxy"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"novnc_proxy"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"network" => {
"service_type" => "quantum"
},
"config" => {
"ram_allocation_ratio" => 5.0
},
"libvirt" => {
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"virt_type" => "#TABLE:clouds:name=$CLOUD:virttype#"
}
}
}
)

Some files were not shown because too many files have changed in this diff Show More