Merge branch 'master' of ssh://git.code.sf.net/p/xcat/xcat-core

This commit is contained in:
root 2014-11-13 05:45:22 -08:00
commit 87fadccb22
9 changed files with 434 additions and 11 deletions

View File

@ -53,7 +53,7 @@ for i in $*; do
done
# Supported distributions
dists="maverick natty oneiric precise saucy trusty"
dists="saucy trusty utopic"
c_flag= # xcat-core (trunk-delvel) path
d_flag= # xcat-dep (trunk) path
@ -262,7 +262,7 @@ then
mkdir conf
for dist in $dists; do
if [ "$dist" = "trusty" ]; then
if [ "$dist" = "trusty" ] || [ "$dist" = "utopic" ]; then
tmp_out_arch="amd64 ppc64el"
else
tmp_out_arch="amd64"
@ -289,7 +289,7 @@ __EOF__
amd_files=`ls ../$package_dir_name/*.deb | grep -v "ppc64el"`
all_files=`ls ../$package_dir_name/*.deb`
for dist in $dists; do
if [ "$dist" = "trusty" ]; then
if [ "$dist" = "trusty" ] || [ "$dist" = "utopic" ]; then
deb_files=$all_files
else
deb_files=$amd_files
@ -382,7 +382,7 @@ then
#create the conf/distributions file
for dist in $dists; do
if [ "$dist" = "trusty" ]; then
if [ "$dist" = "trusty" ] || [ "$dist" = "utopic" ]; then
tmp_out_arch="amd64 ppc64el"
else
tmp_out_arch="amd64"
@ -409,7 +409,7 @@ __EOF__
amd_files=`ls ../debs/*.deb | grep -v "ppc64el"`
all_files=`ls ../debs/*.deb`
for dist in $dists; do
if [ "$dist" = "trusty" ]; then
if [ "$dist" = "trusty" ] || [ "$dist" = "utopic" ]; then
deb_files=$all_files
else
deb_files=$amd_files

View File

@ -8,6 +8,7 @@ Standards-Version: 3.9.2
Package: xcat-genesis-scripts
Architecture: amd64 ppc64el
Depends: xcat-genesis-base-amd64[any-amd64],xcat-genesis-base-ppc64[any-ppc64el]
Replaces: xcat-genesis-scripts-amd64[any-amd64]
Description: xCAT genesis
(Genesis Enhanced Netboot Environment for System Information and Servicing) is a small, embedded-like environment for xCAT's use in discovery and management actions when interaction with an OS is infeasible. This package reperesents the EPL content that is more tightly bound to specific xcat-core versions

View File

@ -10,7 +10,7 @@
#export DH_VERBOSE=1
ifneq ($(DEB_BUILD_ARCH),ppc64el)
ifneq ($(DEB_HOST_ARCH),ppc64el)
export TARGET_ARCH=x86_64
else
export TARGET_ARCH=ppc64

View File

@ -144,7 +144,7 @@ deb http://91.189.88.140/ubuntu-ports/ trusty-updates universe" >> /etc/apt/sour
uname -m |grep ppc64 >/dev/null
then
echo "ARCH=powerpc perl -x mlnxofedinstall $mlnxofed_options"
ARCH=powerpc perl -x mlnxofedinstall $mlnxofed_options
ARCH=powerpc perl -x mlnxofedinstall --without-fw-update $mlnxofed_options
sleep 1
service openibd restart
else

View File

@ -27,6 +27,11 @@ if (!GetOptions(
if ($::HELP) { print $::USAGE; exit 0; }
unless (-x "/usr/sbin/tcpdump") {
print "Error: Please install tcpdump before the detecting.\n";
exit 1;
}
my $nic;
if ($::IF) {
$nic = $::IF;
@ -69,8 +74,12 @@ if (-f "/etc/redhat-release") {
$os = "rh";
} elsif (-f "/etc/SuSE-release") {
$os = "sles";
} elsif (-f "/etc/lsb-release") {
$os = "ubuntu";
} elsif (-f "/etc/debian_version") {
$os = "debian";
} else {
print "Only support the redhat and sles OS.\n";
print "Only support the redhat, sles, ubuntu and debian OS.\n";
exit 1;
}
# fork a process to capture the packet by tcpdump
@ -97,6 +106,15 @@ my $sock = IO::Socket::INET->new(Proto => 'udp',
LocalPort => '68',
PeerAddr => inet_ntoa(INADDR_BROADCAST));
# try the any port if localport 68 has been used
unless ($sock) {
$sock = IO::Socket::INET->new(Proto => 'udp',
Broadcast => 1,
PeerPort => '67',
LocalAddr => $IP,
PeerAddr => inet_ntoa(INADDR_BROADCAST));
}
unless ($sock) {
print "Create socket error: $@\n";
kill_child();

View File

@ -7,6 +7,6 @@ Standards-Version: 3.7.2
Package: xcat
Architecture: amd64 ppc64el
Depends: ${perl:Depends}, xcat-server, xcat-client, libdbd-sqlite3-perl, isc-dhcp-server, apache2, nfs-kernel-server, nmap, bind9, libxml-parser-perl, xinetd, tftpd-hpa, tftp-hpa, conserver-xcat, libnet-telnet-perl, ipmitool-xcat (>=1.8.9), syslinux[any-amd64], libsys-virt-perl, syslinux-xcat[any-amd64], xnba-undi[any-amd64], xcat-genesis-scripts, elilo-xcat[any-amd64], xcat-buildkit
Depends: ${perl:Depends}, xcat-server, xcat-client, libdbd-sqlite3-perl, isc-dhcp-server, apache2, nfs-kernel-server, nmap, bind9, libxml-parser-perl, xinetd, tftpd-hpa, tftp-hpa, conserver-xcat, libnet-telnet-perl, ipmitool-xcat (>=1.8.9), syslinux[any-amd64], libsys-virt-perl, syslinux-xcat[any-amd64], xnba-undi, xcat-genesis-scripts, elilo-xcat, xcat-buildkit
Description: Server and configuration utilities of the xCAT management project
xcat-server provides the core server and configuration management components of xCAT. This package should be installed on your management server

View File

@ -604,10 +604,10 @@ then
do
ifdown $tmp > /dev/null 2>&1
sleep 2
ifup $tmp
ifup $tmp > /dev/null 2>&1
done
else
ifup $nic
ifup $nic > /dev/null 2>&1
fi
fi
done

285
xCAT/postscripts/install_lsf Executable file
View File

@ -0,0 +1,285 @@
#!/bin/bash
#README################################################################
# (1)Check you have LSF installer script package and LSF distribution packages. e.g. "lsf9.1.3_lsfinstall.tar.Z" and "lsf9.1.3_lnx26-lib23-ppc64le.tar.Z"
# (2)Get LSF entitlement file for the edition you are installing. e.g. "platform_lsf_std_entitlement.dat"
# (3)Prepare a install.config file, install.config should be in the same directory with install_lsf and lsf_startup scripts.
# The format of install.config file
# cat install.config
# LSF_TOP=""
# LSF_ADMINS=""
# LSF_CLUSTER_NAME=""
# LSF_MASTER_LIST=""
# LSF_ENTITLEMENT_FILE="NEED A FULL PATH OF THE FILE"
# LSF_TARDIR=""
# (4)Run this script on one compute node,you can also use "updatenode <nodename> -P install_lsf" to execute this script on one compute node
# (It's not necessary to run it on each compute node).
#
# NOTE for install.config:
# -----------------
# LSF_TOP="/usr/share/lsf"
# -----------------
# Full path to the top-level installation directory {REQUIRED}
#
# The path to LSF_TOP must be shared and accessible to all hosts
# in the cluster. It cannot be the root directory (/).
# The file system containing LSF_TOP must have enough disk space for
# all host types (approximately 300 MB per host type).
# -----------------
# LSF_ADMINS="lsfadmin user1 user2"
# -----------------
# List of LSF administrators {REQUIRED}
#
# The first user account name in the list is the primary LSF
# administrator. It cannot be the root user account.
# Typically, this account is named lsfadmin.
# It owns the LSF configuration files and log files for job events.
# It also has permission to reconfigure LSF and to control batch
# jobs submitted by other users. It typically does not have
# authority to start LSF daemons. Usually, only root has
# permission to start LSF daemons.
# All the LSF administrator accounts must exist on all hosts in the
# cluster before you install LSF.
# Secondary LSF administrators are optional.
#
# -----------------
# LSF_CLUSTER_NAME="cluster1"
# -----------------
# Name of the LSF cluster {REQUIRED}
#
# It must be 39 characters or less, and cannot contain any
# white spaces. Do not use the name of any host, user, or user group
# as the name of your cluster.
#
# -----------------
# LSF_MASTER_LIST="hostm hosta hostc"
# -----------------
# List of LSF server hosts to be master or master candidate in the
# cluster {REQUIRED when you install for the first time or during
# upgrade if the parameter does not already exist.}
#
# You must specify at least one valid server host to start the
# cluster. The first host listed is the LSF master host.
#
# -----------------
# LSF_ENTITLEMENT_FILE="/usr/share/lsf/lsf_distrib/platform_lsf_std_entitlement.dat"
# -----------------
# You must specify a full path to the LSF entitlement file.
#
# -----------------
# LSF_TARDIR="/usr/share/lsf_distrib/"
# -----------------
# Full path to the directory containing the LSF distribution tar files.
#
# Default: Parent directory of the current working directory.
# For example, if lsfinstall is running under
# /usr/share/lsf_distrib/lsf_lsfinstall
# the LSF_TARDIR default value is
# /usr/share/lsf_distrib
# -----------------
# LSF_ADD_SERVERS="hostm hosta hostb hostc"
# -----------------
# List of additional LSF server hosts
#
# The hosts in LSF_MASTER_LIST are always LSF servers. You can specify
# additional server hosts.
#README################################################################
INSTALL_CONFIG_FILE=`pwd`/install.config
#LSF_INSTALL_FILE_PATH=""
#LSF_GLIBC_FILE_PATH=""
function is_parameter_set()
{
PARA=$1
IF_SET=`grep $PARA $INSTALL_CONFIG_FILE`
if [[ -z $IF_SET ]] ; then
if [[ $PARA == "LSF_SILENT_INSTALL_TARLIST" ]]; then
echo "$PARA="all"" >> $INSTALL_CONFIG_FILE
elif [[ $PARA == "LSF_DYNAMIC_HOST_WAIT_TIME" ]]; then
echo "$PARA="60"" >> $INSTALL_CONFIG_FILE
else
echo "$PARA="Y"" >> $INSTALL_CONFIG_FILE
fi
echo "INFO: Set a recommended value for $PARA"
fi
return 0
}
#verify if install.config exists
if [[ ! -f ${INSTALL_CONFIG_FILE} ]]; then
echo "ERROR: $INSTALL_CONFIG_FILE not found"
exit 1
fi
. $INSTALL_CONFIG_FILE
cat $INSTALL_CONFIG_FILE
#verify if the required parameters are valid in install.config
if [[ ! -d $LSF_TOP ]]; then
echo "ERROR: No such directory $LSF_TOP. Check LSF_TOP in install.config"
exit 1
fi
if [[ x$LSF_ADMINS == x ]]; then
echo "ERROR: You must specify LSF_ADMINS in install.config"
exit 1
fi
for USERID in ${LSF_ADMINS} ; do
RTC=`id $USERID`
RTC=$?
if [[ $RTC -ne 0 ]] ; then
echo "ERROR: No such user id $USERID on the node. Check LSF_ADMINS in install.config"
exit 1
fi
done
if [[ x$LSF_CLUSTER_NAME == x ]]; then
echo "ERROR: You must specify LSF_CLUSTER_NAME in install.config"
exit 1
fi
if [[ x$LSF_MASTER_LIST == x ]]; then
echo "ERROR: You must specify LSF_MASTER_LIST in install.config"
exit 1
fi
for MASTER_NODE in $LSF_MASTER_LIST ; do
RTC=`ssh $MASTER_NODE uptime`
RTC=$?
if [[ $RTC -ne 0 ]] ; then
echo "ERROR: MASTER_NODE $MASTER_NODE is not reachable. Check LSF_MASTER_LIST in install.config"
exit 1
fi
done
if [[ ! -r ${LSF_ENTITLEMENT_FILE} || -d ${LSF_ENTITLEMENT_FILE} ]]; then
echo "ERROR: No such entitlement file $LSF_ENTITLEMENT_FILE. Check LSF_ENTITLEMENT_FILE in install.config"
exit 1
fi
#CH_TMP=`expr "$LSF_ENTITLEMENT_FILE" : '\(.\).*'`
#if [[ "$CH_TMP" != "/" ]]; then
# echo "Set LSF_ENTITLEMENT_FILE a full path but not relative path"
# return 1
#fi
if [[ ! -d $LSF_TARDIR ]]; then
echo "ERROR: No such directory $LSF_TARDIR. Check LSF_TARDIR in install.config"
exit 1
fi
if [[ x$LSF_INSTALL_FILE_PATH != x ]]; then
if [[ -r ${LSF_INSTALL_FILE_PATH} ]] && [[ ! -d ${LSF_INSTALL_FILE_PATH} ]]; then
:
else
echo "ERROR: No such lsf install tar file $LSF_INSTALL_FILE_PATH. Check LSF_INSTALL_FILE_PATH in install.config"
exit 1
fi
else
LSF_INSTALL_FILE_PATH=`find $LSF_TARDIR -name 'lsf*lsfinstall*tar.Z'`
count=`echo $LSF_INSTALL_FILE_PATH |wc -w`
if [[ $count -gt 1 ]]; then
echo "ERROR: There are more than one lsfinstall tar file. You need to specify LSF_INSTALL_FILE_PATH in install.config or remove other useless lsfinstall TAR files in LSF_TARDIR $LSF_TARDIR."
exit 1
elif [[ $count -eq 1 ]]; then
:
else
echo "ERROR: lsfinstall TAR file not found in LSF_TARDIR $LSF_TARDIR."
exit 1
fi
fi
echo "INFO: We will untar the lsfinstall TAR file $LSF_INSTALL_FILE_PATH."
for SERVER_NODE in $LSF_ADD_SERVERS ; do
RTC=`ssh $SERVER_NODE uptime`
RTC=$?
if [[ $RTC -ne 0 ]] ; then
echo "ERROR: SERVER_NODE $SERVER_NODE is not reachable. Check LSF_ADD_SERVERS in install.config"
exit 1
fi
done
#Check if we set following parameters in install.config; if not, set them a recommended value.
#ENABLE_DYNAMIC_HOSTS="Y"
##LSF_DYNAMIC_HOST_WAIT_TIME="60"
#ENABLE_HPC_CONFIG="Y"
#SILENT_INSTALL="Y"
#LSF_SILENT_INSTALL_TARLIST="all"
is_parameter_set "ENABLE_DYNAMIC_HOSTS"
is_parameter_set "LSF_DYNAMIC_HOST_WAIT_TIME"
is_parameter_set "ENABLE_HPC_CONFIG"
is_parameter_set "SILENT_INSTALL"
is_parameter_set "LSF_SILENT_INSTALL_TARLIST"
#Extract lsfinstall package
cd $LSF_TARDIR
LSF_INSTALL_PACKAGE=`basename $LSF_INSTALL_FILE_PATH`
zcat $LSF_INSTALL_PACKAGE | tar xvf -
RTC=$?
if [[ $RTC -ne 0 ]] ; then
echo "ERROR: Fail to extract LSF_INSTALL_PACKAGE $LSF_INSTALL_PACKAGE."
exit 1
fi
#INSTALL LSF on the node
cd $LSF_TARDIR/lsf*lsfinstall
INSTALL_LOG="Install.log"
#backup Install.log if there is one before installing
if [[ -r $INSTALL_LOG ]]; then
mv $INSTALL_LOG `date "+%Y.%m.%d-%H:%M"`_$INSTALL_LOG
fi
./lsfinstall -f $INSTALL_CONFIG_FILE
RTC=$?
IF_INSTALL_DONE=`grep "lsfinstall is done" $INSTALL_LOG`
if [[ $RTC -ne 0 || -z $IF_INSTALL_DONE ]] ; then
echo "ERROR: Fail to install LSF. Check Install.log and Install.err in `pwd`."
exit 1
fi
echo "INFO: Installation script DONE."
LSF_VERSION=`echo $LSF_INSTALL_PACKAGE |cut -c4-6`
if [[ `echo "$LSF_VERSION >= 9.1"|bc` -eq 1 ]]
then
#Start configuration. Update configuration files lsf.conf,lsf.hosts.
echo "INFO: Updating LSF Cluster Configuration Files lsf.conf and lsb.hosts"
LSF_CONF_FILE="$LSF_TOP/conf/lsf.conf"
echo "LSF_RSH=ssh" >> $LSF_CONF_FILE
echo "LSF_PE_NETWORK_NUM=2" >> $LSF_CONF_FILE
echo "LSF_PE_NETWORK_UPDATE_INTERVAL=6" >> $LSF_CONF_FILE
echo "EGO_DEFINE_NCPUS=threads" >> $LSF_CONF_FILE
echo "LSF_HPC_EXTENSIONS="CUMULATIVE_RUSAGE"">> $LSF_CONF_FILE
LSB_HOSTS_FILE="$LSF_TOP/conf/lsbatch/$LSF_CLUSTER_NAME/configdir/lsb.hosts"
LSB_HOSTS_FILE_ORIG="$LSF_TOP/conf/lsbatch/$LSF_CLUSTER_NAME/configdir/lsb.hosts.orig"
mv $LSB_HOSTS_FILE $LSB_HOSTS_FILE_ORIG
PRIMARY_MASTER_NODE=`echo $LSF_MASTER_LIST | awk '{print $1}'`
echo "Begin Host" >> lsb.hosts
echo "HOST_NAME MXJ r1m pg ls tmp DISPATCH_WINDOW AFFINITY" >> $LSB_HOSTS_FILE
echo "default ! () () () () () (Y)" >> $LSB_HOSTS_FILE
echo "$PRIMARY_MASTER_NODE 0 () () () () () (Y)" >> $LSB_HOSTS_FILE
echo "End Host" >> $LSB_HOSTS_FILE
echo "Begin HostGroup" >> $LSB_HOSTS_FILE
echo "GROUP_NAME GROUP_MEMBER" >> $LSB_HOSTS_FILE
echo "End HostGroup" >> $LSB_HOSTS_FILE
fi

View File

@ -0,0 +1,119 @@
#!/bin/bash
#README################################################################
# (1)lsf_startup should be ran after lsf is installed, so it should be ran after install_lsf script
# (2)lsf_startup use the same install.config file with install_lsf, install.config should be in the same directory with install_lsf and lsf_startup scripts.
# The format of install.config file, see more details in install_lsf README
# cat install.config
# LSF_TOP=""
# LSF_ADMINS=""
# LSF_CLUSTER_NAME=""
# LSF_MASTER_LIST=""
# LSF_ENTITLEMENT_FILE="NEED A FULL PATH OF THE FILE"
# LSF_TARDIR=""
# (3)Run this script on all lsf cluster nodes,you can also use "updatenode <noderange> -P lsf_startup" to execute this script
#
#README################################################################
#need install.config
INSTALL_CONFIG_FILE=`pwd`/install.config
. $INSTALL_CONFIG_FILE
echo "INFO: Run hostsetup on each node."
find /$LSF_TOP -name hostsetup > /dev/null
if [[ $? -ne 0 ]]
then
echo "Error : there is no hostsetup, check if lsf install is installed or not."
exit 1
fi
#get lsf main version,
LSF_VERSION=`find /$LSF_TOP -name hostsetup|head -1|awk '{print $5}'`
if [[ x${LSF_ADD_SERVERS} != x ]]; then
ALL_LSF_NODES=${LSF_MASTER_LIST}' '${LSF_ADD_SERVERS}
else
ALL_LSF_NODES=${LSF_MASTER_LIST}
fi
for item in $ALL_LSF_NODES
do
if [[ x${item} == x$NODE ]]
then
$LSF_TOP/$LSF_VERSION/install/hostsetup --top="$LSF_TOP" --boot="y"
fi
done
# Set your LSF environment"
echo "INFO: Set LSF environment for root and LSF_ADMINS"
for lsfnode in $ALL_LSF_NODES
do
if [[ x${lsfnode} == x$NODE ]]
then
echo ". $LSF_TOP/conf/profile.lsf" >> /root/.profile
fi
done
#change .profile for every lsf admin user
for LSF_ADMIN_USER in $LSF_ADMINS ; do
LSF_ADMIN_USER_HOMEDIR=`grep $LSF_ADMIN_USER: /etc/passwd | cut -d ':' -f 6`
grep "profile.lsf" $LSF_ADMIN_USER_HOMEDIR/.profile > /dev/null
if [[ $? -eq 0 ]]
then
sed -i '/profile.lsf/d' $LSF_ADMIN_USER_HOMEDIR/.profile
fi
for lsfnode in $ALL_LSF_NODES
do
if [[ x${lsfnode} == x$NODE ]]
then
echo ". $LSF_TOP/conf/profile.lsf" >> $LSF_ADMIN_USER_HOMEDIR/.profile
fi
done
done
# Startup LSF CLUSTER
echo "INFO: Start LSF Cluster."
. $LSF_TOP/conf/profile.lsf
lsadminpath="lsadmin"
if [[ x${lsadminpath} == x ]]
then
echo "Error:there is no lsadmin."
else
$lsadminpath limstartup
if [[ $? -ne 0 ]]
then
echo "lsadmin limstartup fail."
fi
$lsadminpath resstartup
if [[ $? -ne 0 ]]
then
echo "lsadmin resstartup fail."
fi
fi
badminpath="badmin"
if [[ x${badminpath} == x ]]
then
echo "Error:there is no badmin."
else
$badminpath hstartup
if [[ $? -ne 0 ]]
then
echo "Error : badmin hstartup faile. "
fi
fi