198 Commits

Author SHA1 Message Date
2209e419cf update Template.pm, so that the partitionfile is grabbed from the master using wget 2014-02-19 00:26:23 +00:00
lissav
d5e7c46622 more zone support 2014-02-18 09:06:11 -05:00
lissav
ace58552c1 more zone support 2014-02-18 08:49:47 -05:00
lissav
62c1f4a6f6 fix man page 2014-02-18 06:20:28 -05:00
6e1720038e change comment on path of where the gpfs_updates directory is placed 2014-02-17 12:23:32 +00:00
lissav
95486591e4 link in zone commands 2014-02-13 14:27:41 -05:00
Jarrod Johnson
b1fddf8eca Merge branch '2.8' of ssh://git.code.sf.net/p/xcat/xcat-core into 2.8 2014-02-13 09:53:54 -05:00
Jarrod Johnson
d03dcbe5f3 Explicitly set SSL_VERIFY_MODE during start ssl in Client 2014-02-13 09:51:02 -05:00
lissav
2ee32a79c8 Multiple zone support 2014-02-13 07:54:10 -05:00
lissav
7fe699f561 Multiple zone support 2014-02-13 07:50:30 -05:00
lissav
445483f25a put in wrong directory 2014-02-13 07:48:02 -05:00
lissav
aa32357398 Multiple zone support 2014-02-13 07:43:36 -05:00
zhaoertao
601dc2569c update manpage of mkdef/chdef for osimage 2014-02-13 00:59:42 -08:00
jjhua
e0eea42b96 only use short node name in loadclouddata 2014-02-13 12:28:34 -05:00
immarvin
663e7d0132 take *.rhel*.pkglist as pkglist file if *.rhels*.pkglist do not exist 2014-02-12 00:52:35 -08:00
immarvin
3dce4de63a specify text installation mode, otherwise, anaconda will drop into choice dialog when fails to start X in graphic mode 2014-02-12 00:49:36 -08:00
zhaoertao
a4a0ed5da3 fix bug 3983: copycds show error info 2014-02-11 22:06:48 -08:00
jjhua
2876b0f371 The template for keystone and swift(all in one) 2014-02-12 12:34:27 -05:00
yinle
3fd9ee8ad9 fix bug #3984 Ubuntu 13.10 diskless installation fails 2014-02-11 10:54:53 -08:00
jjhua
2d7b3f6c51 support for keystone+swift 2014-02-11 15:44:44 -05:00
Jarrod Johnson
73b9ef07ad Have openssl req use better message digest 2014-02-11 10:08:06 -05:00
daniceexi
784f89b916 roll back the last change in configmic file; And fix the issue that hostname cannot show the short hostname 2014-02-11 04:48:40 -05:00
Jarrod Johnson
8804d7180b RHEL7 support for diskful compute profile complete 2014-02-10 14:01:05 -05:00
Jarrod Johnson
89f3dccdbe Have RHEL7 proceed to get through install completion
Still need to get through the postscript phase
2014-02-10 14:00:52 -05:00
Jarrod Johnson
263ee3af3a Add signature detection for RHEL7 media to anaconda 2014-02-10 10:50:32 -05:00
daniceexi
5452efea2e fix the issue that hostname command cannot get short hort name 2014-02-10 08:24:41 -05:00
Jarrod Johnson
504dc16571 Fix xCAT init script status reporting
xCAT in some cases was reporting improper status for certain scenarios.
Risk being inaccure if no pid file exists so that it is accurate when it
does exist.
2014-02-07 17:39:32 -05:00
lissav
1d64d9ce82 new zone table and zonename attribute 2014-02-05 08:28:53 -05:00
Jarrod Johnson
b808b12dcf Correct } mistake in previous commit to IPMI.pm 2014-02-04 10:26:57 -05:00
7f42f33094 fix unordered cherry-pick 2014-01-30 20:42:02 +00:00
e889f59523 add nichostnameprefixes to @nodeattrs 2014-01-30 20:40:17 +00:00
afb15362f3 first commit for prefix hostname feature 2014-01-30 20:40:07 +00:00
b832cb5023 fix bug 3971, trim othernames variable 2014-01-30 19:54:07 +00:00
Jarrod Johnson
5e62fbced8 _clear_cache was removed long ago. _build_cache does all the needed work itself, so skip _build_cache. 2014-01-30 13:00:42 -05:00
ligc
5ac1ebaa63 fix for a typo in confignics, caused ib configuration problems 2014-01-29 10:03:17 -06:00
lissav
0ba8649e56 Add IBM_HPC_Stack_in_an_xCAT_Cluster 2014-01-28 15:35:47 -05:00
yinle
9cae05441d minor fix 2014-01-28 13:47:49 -08:00
yinle
5e686b1a45 Add range check and message to nmap 2014-01-28 13:38:39 -08:00
baiyuan
b7ec6919e9 fix Undefined subroutine &JSON::decode_json called at /opt/xcat/ws/xcatws.cgi line 168 2014-01-27 23:31:39 -05:00
zhaoertao
6556f026d7 modify usage for 'chvm' and 'mkvm', remove debug msg 2014-01-26 22:53:39 -08:00
zhaoertao
d662c5e8b5 The scripts used for configuring and provisioning VIOS partition 2014-01-26 22:02:50 -08:00
zhaoertao
23423a5cb1 create VIOS and logical partitions 2014-01-26 21:57:57 -08:00
qyin
4a2de6ddc2 windows solution 222013 xcat part 2014-01-27 13:12:27 +08:00
ligc
0a810645f2 fix for bug 3979: print a message with rnetboot/bootseq when gateway is empty 2014-01-27 09:58:48 -06:00
lissav
1e2fdb0529 improve man for sshbetweennodes 2014-01-23 12:59:11 -05:00
zhaoertao
e62b5b43a8 3974 rinv failed for Fujitsu Blade Server 2014-01-22 23:19:19 -08:00
baiyuan
caceaab306 upload testcase for ubuntu 2014-01-22 23:48:03 -05:00
Jarrod Johnson
d08d6fb244 Add enic to nics in genesis 2014-01-22 16:57:43 -05:00
baiyuan
0a7963cc65 fix 3879,complete kit size is too big,just keep build_input in complete kit with buildkit.conf,other_files,plugins and scripts, remove other useless dir 2014-01-21 03:48:06 -05:00
yinle
07f95665be Add a new file xcat.conf.apach24 2014-01-20 13:51:04 -08:00
yinle
9b171c32af fix bug #3973 Ubuntu 13.10 diskful installation fails 2014-01-20 13:49:15 -08:00
yinle
bc607bf74c fix bug #3973 Ubuntu 13.10 diskful installation fails 2014-01-20 13:21:03 -08:00
yinle
c98217e5a1 fix bug #3973 Ubuntu 13.10 diskful installation fails 2014-01-20 13:16:57 -08:00
daniceexi
20865b8812 Change the man page of nodeset to make it supports the shutdown and shell operations 2014-01-20 11:09:09 -05:00
baiyuan
91a5abd7f3 fix 3879,remove build_input dir from complete kit xxx.tar.gz 2014-01-20 04:10:03 -05:00
baiyuan
7c8f671f9f kitconponent should be kitcomponent 2014-01-19 22:19:07 -05:00
baiyuan
95d332aa23 update imgexport/imgimport manpage to surpport kits 2014-01-19 21:54:04 -05:00
baiyuan
ece1fd7d03 fixed 3357,add symlink,copy postscripts and plugin files for kit 2014-01-16 20:54:14 -05:00
yinle
0f93e7a135 Add a new file xcat.conf.apach24 2014-01-16 12:45:19 -08:00
yinle
d97a9e4316 minor fix 2014-01-16 04:41:33 -08:00
yinle
aa2c4978c5 update building of ubuntu repo to include the supported ubuntu disctros 2014-01-16 04:38:31 -08:00
yinle
65bd3afbb3 Merge branch '2.8' of ssh://git.code.sf.net/p//xcat/xcat-core into 2.8 2014-01-16 04:21:24 -08:00
yinle
3b418f8981 update building of ubuntu repo to include the supported ubuntu disctros 2014-01-16 03:56:55 -08:00
daniceexi
d0e3c2354f defect 3968: fixed the issue that for statelite on aix, the .statelite dir was not copied to shared_root spot from default spot 2014-01-16 04:48:20 -05:00
lissav
bcb98fcec9 fix mysqlsetup -u 2014-01-15 06:14:52 -05:00
immarvin
27f6761231 fix defect #3960 Genimage broken for CentOS 5.4 nodes 2014-01-14 01:00:33 -08:00
Jarrod Johnson
6777f86870 Make IPMI 2.0 crypto dependencies mandatory
Faced with an increasing population of IPMI 2 only devices, make the AES/CBC
requirements mandatory as it is a common source of systems failing to work
now.
2014-01-13 10:50:00 -05:00
daniceexi
caaa130479 To make bmcsetup cmd to update node status to be [bmcready] in genesis; And make chain mechanism to support [shutdown] key word which is used to poweroff the node 2014-01-13 07:54:05 -05:00
daniceexi
a3ade8608b fix the issue that proxydhcp configuration file cannot be updated 2014-01-10 09:15:50 -05:00
sjing
487c57d358 Fix for bug 3955. 2014-01-10 03:01:56 -05:00
sjing
9c35744fd7 Fix for bug 3815
Last fix results in "makedhcp -n" not working, so re-fix it.
2014-01-09 22:14:55 -05:00
lissav
4263c661c5 update description 2014-01-09 08:46:26 -05:00
Jarrod Johnson
3db9440ca9 Fix detection of debian for some ubuntu installations 2014-01-08 15:16:18 -05:00
lissav
1caaf17eb2 Merge branch '2.8' of ssh://git.code.sf.net/p/xcat/xcat-core into 2.8 2014-01-08 11:11:44 -05:00
lissav
a7bb1d0a9a simplify messages for odbcsetup call 2014-01-08 11:11:17 -05:00
ligc
7f7907ae3e fix for bug 3951: remove the code to check xcat versions during xcatd restart/reload 2014-01-08 23:49:01 +08:00
lissav
a37d0e3ec4 fix for defect 3839 2014-01-08 10:32:37 -05:00
ligc
f35332a0d1 fix for bug 3947: add check for AIX and nmap existence 2014-01-08 23:29:30 +08:00
wanghuaz
c1359797e8 handle kit staff in imgexport/imgimport 2014-01-08 20:14:47 +08:00
daniceexi
05a1624439 defect 3135: changed the mount process for the statelite directory (for persistent entries) that make a directory after the node name and remount to the nodename directory instead of nfs root dir for persistent 2014-01-08 04:59:33 -05:00
lissav
9470ac6d1a Add catagories to the site table 2014-01-07 15:40:58 -05:00
baiyuan
509b979ee8 fix:remoed should be removed 2014-01-07 03:06:30 -05:00
daniceexi
a03b712e2f make confignics postscript to accept the site.setinstallnic to configure installnic to be static 2014-01-07 02:26:53 -05:00
lissav
a46a8919a3 defect 3948 2014-01-06 14:56:58 -05:00
wanghuaz
8ca9e350e4 fixing the migration problem that rmkitcomp should remove the kitdeployment parameter file and its contents. 2014-01-06 15:22:11 +08:00
immarvin
3e31714aa3 fedora19/fedora20 diskful support 2014-01-02 23:50:47 -08:00
baiyuan
24b255fec5 update cases0 for buildkit 2014-01-02 02:50:08 -05:00
ligc
37be3cd9d7 do not use nodels --version in /etc/init.d/xcatd 2014-01-02 14:38:09 +08:00
sjing
f11aa5cc6d Fix for bug 3952
Made makedhcp be able to handle the case where site.nameservers or
networks.nameservers is a comma delimited list with <xcatmaster>
keyword in it.
2014-01-02 01:17:44 -05:00
daniceexi
49cd33382c defect 3909: make xcatd loads xCAT::Enabletrace by require instead of use to save resource 2014-01-02 02:02:24 -05:00
daniceexi
6d76a44409 fix man page of mknb to indicate that mknb only supports x86_64 2014-01-02 01:12:19 -05:00
jjhua
0fe0d25c5f change the name os-object-storage-setup in os-object-storage-setup.rb 2014-01-01 22:09:20 -05:00
daniceexi
2facd1f0c4 Change the Windows deployment templates to support disk configuration, nics configuration and run postscripts 2013-12-31 09:01:28 -05:00
daniceexi
1039ba5490 Enhance genimage.cmd to accept second and third params for multiple winpe support 2013-12-31 08:02:54 -05:00
wanghuaz
b4a450352c add an enhancement to skip not well-formed deployment parameters. 2013-12-31 17:02:22 +08:00
wanghuaz
341e847646 minor change in last check of passing parameters to postinstall and postbootscritps 2013-12-31 15:37:53 +08:00
daniceexi
740114c491 Add some comments and help message for genimage.bat 2013-12-30 23:02:52 -05:00
daniceexi
0d1e50c764 fixed the issue that missed the winpe-scripting.cab in last checkin 2013-12-30 07:01:40 -05:00
wanghuaz
9403878ade fixing bug 3815: don't use global variables which doest work well in hierarchy system. 2013-12-30 10:56:57 +08:00
wanghuaz
2e1d048dee passing kitcomponent deploy parameters to genimage package installation, postinstall script and postbootscripts. 2013-12-30 10:05:19 +08:00
wanghuaz
c327c4a24b fixng bug 3945: give an example of how to write ospkgdeps and kitpkgdeps in different arch. 2013-12-27 17:05:28 +08:00
baiyuan
c3132f5da9 kit release should be mandatory according to mini-design 2013-12-27 02:42:46 -05:00
wanghuaz
b93a7b7ab1 fixing bug 3943: give accureate pattern to match output from console 2013-12-27 15:30:00 +08:00
baiyuan
2ad4b13f78 fix Use of uninitialized value within @a2 in pattern match (m//) at BuildKitUtils.pm line 273 2013-12-27 01:53:35 -05:00
sjing
fc7d8ecee5 Support dns master/slave configuration 2013-12-26 22:57:24 -05:00
jjhua
f2817eff86 update the cookbook and role for swift 2013-12-26 15:05:36 -05:00
wanghuaz
c944543a1f fixing bug 3848, moving preuninstall script from prerequisite rpm to meta rpm to make sure it can be issued before componnet been uninstalled. 2013-12-26 15:23:56 +08:00
jjhua
c4522d9f5c update node dependency cookbook statsd for swift 2013-12-26 14:14:17 -05:00
jjhua
e0de4e6a26 support for swift 2013-12-26 13:06:40 -05:00
jjhua
60afd44afc if there are two roles for one chef-client, the script couldn't assign the
roles to the node. fixed it.
2013-12-26 12:39:02 -05:00
jjhua
f3aae68777 support for swift 2013-12-24 15:16:11 -05:00
nott
1afea00f08 template cleanup 2013-12-23 15:23:33 -05:00
jjhua
b55034f64c support for cinder 2013-12-23 15:37:45 -05:00
yinle
52098b8ad4 Add warning message on fsp wrong slp reply 2013-12-22 05:03:57 -08:00
immarvin
4920ba650d grep in busybox do not support long optipn string, use short instead 2013-12-19 19:13:10 -08:00
linggao
a3b589af67 Changed the table name from capacity to hwinv. 2013-12-19 22:51:39 -05:00
immarvin
891f41b6ba add rhels5.10 discinfo 2013-12-19 04:55:45 -08:00
immarvin
b767c8ae95 fix syntax error if is blank 2013-12-19 04:26:24 -08:00
immarvin
5b2c27eb75 grep in busybox do not support long optipn string, use short instead 2013-12-19 01:42:59 -08:00
daniceexi
29fa3ff010 add the second argument for genimage.bat to make it can generate winpe and BCD to a specific dir 2013-12-19 07:25:25 -05:00
immarvin
ce58c3bc1c fix the problem that rhels5.10 initrd cannot resolve the mn hostname 2013-12-18 23:19:35 -08:00
yinle
9584f0f683 Remove useless file build-debianrepo 2013-12-19 04:18:25 -08:00
daniceexi
a24cabf71c Rewirte the proxydhcp.c with proxydhcp-xcat in perl; Added site.installnic to control the nics setting for windows; Added servicenode.proxydhcp and noderes.proxydhcp to control the starting of proxydhcp-xcat daemon and makedhcp againsts node for windows deployment 2013-12-19 04:53:49 -05:00
yinle
eaf168ed48 Modify the bug that causing mklocalrepo.sh wrong 2013-12-19 00:57:32 -08:00
linggao
b68c12d07a fixed a syntax error 2013-12-19 08:34:10 -05:00
linggao
69bb732270 Added a capacity table to store the cpu, memory and disk sizes for nodes 2013-12-19 07:52:29 -05:00
Bruce Potter
70a8a07daa Merge branch '2.8' of ssh://git.code.sf.net/p/xcat/xcat-core into 2.8 2013-12-18 15:44:03 -05:00
Bruce Potter
61f2851006 fixed Error: at the beginning of monshow usage (feature request 73) 2013-12-18 15:43:15 -05:00
lissav
eebb88bbb1 defect 3916 2013-12-18 09:13:28 -05:00
sjing
8efb1c8f48 Support dns master/slave configuration 2013-12-18 01:35:15 -05:00
lissav
7b5055c77c defect 3916 2013-12-17 13:20:28 -05:00
daniceexi
0fe981ce6d make including winpostscript dir only for linux 2013-12-17 13:54:29 -05:00
jjhua
f3bc8f145a correct the errors in the allinone template(using single flat network) 2013-12-17 20:53:30 -05:00
jjhua
413ec01107 add the role os-block-storage-volume for cinder 2013-12-17 20:50:58 -05:00
jjhua
83f82660ca enhanced the role for cinder 2013-12-17 15:12:10 -05:00
jjhua
df0ec5b14b It will output some errors when using new chef/chef-server. The errors will not affect the
whole procedure. Update the cookbooks to revmoe the error mesages.
2013-12-17 14:50:17 -05:00
jjhua
b9c3d71d6f if we set up the chef-server during os provision, there are some error messages
in the /var/log/messages on the management node.  Fixed them.
2013-12-17 14:10:53 -05:00
daniceexi
50686d34c7 Make xCAT rpm to install all files in /install/winpostscripts/* for Windows support 2013-12-17 02:23:38 -05:00
daniceexi
5021289a86 Enhanced postscript support in templates 2013-12-16 03:53:47 -05:00
daniceexi
32f8a37a22 code drop for windows postscript/postbootscript support. The postscript/postbootscript should be set in node/osimage.postscript/postbootscript and copy to /install/winpostscripts before running nodeset 2013-12-16 02:22:02 -05:00
yinle
b958093474 Add lsslp unicast support 2013-12-13 01:26:52 -08:00
yinle
0922dc234c Add lsslp unicast support 2013-12-13 00:59:33 -08:00
yinle
acce8ea798 Add lsslp unicast support 2013-12-13 00:50:14 -08:00
lissav
d6b62f0eac fix defect 3942 2013-12-12 11:15:15 -05:00
immarvin
214852bcd7 liteimg use rc.statelite instead of rc.statelite.ppc.redhat for rhels6.5.ppc64 statelite 2013-12-12 02:13:02 -08:00
wanghuaz
d5a02a43a8 prevent the running of postbootscripts 2013-12-12 17:48:17 +08:00
wanghuaz
8238177b7f prevent the running of postbootscripts 2013-12-12 17:43:57 +08:00
wanghuaz
0284522bf1 prevent the running of postbootscripts 2013-12-12 17:37:48 +08:00
daniceexi
0ac6f5d4e8 skip the bmc interface for nics configuration 2013-12-10 07:17:51 -05:00
baiyuan
75a1115638 add test cases for addkit 2013-12-10 02:19:18 -05:00
daniceexi
01d1bee405 Mutiple winpes support. nodeset (Windows.pm) will generate configuration file (path of winpe) in /var/lib/xcat/proxydhcp.cfg and send signal to proxydhcp daemon, proxydhcp daemon loads configuration file and offers 4011 service to windows compute nodes. 2013-12-10 05:34:09 -05:00
wanghuaz
96a81f9911 fixing bug 3340: add test option for rmkit to list kitcomponents in use 2013-12-09 21:37:28 -08:00
wanghuaz
044febc97d fixing bug 3340: add test option for rmkit to list kitcomponents in use 2013-12-09 21:27:28 -08:00
zhaoertao
389396a5f5 modify the vmstorage value format of local path 2013-12-09 18:58:53 -08:00
Bruce Potter
fd9456c60c add valid values of kvm, esx, rhevm to nodehm.mgt and power attributes 2013-12-09 19:02:14 -05:00
Bruce Potter
e0dc206356 update description of litefile and litetree image attribute to include reference to image groups 2013-12-09 14:39:45 -05:00
immarvin
a60bb9d50c rhels6.5 support 2013-12-05 21:54:18 -08:00
ligc
7877dae894 fix for bug 3902: add bridge nics into dhcpd.conf, em\d+ for Fedora 2013-12-05 12:50:12 +08:00
ligc
6040b777d7 fix for bug 3922, use getNodesAttribs instead of getNodeAttribs 2013-12-05 09:47:39 +08:00
sjing
2ecf98530b Fix for bug 3912
update net-snmp rpm version
2013-12-04 04:01:39 -05:00
lissav
cdab9ccc32 fix lots of info in man page 2013-12-03 07:20:38 -05:00
sjing
58505be501 Fixed bug 3927
AIX bundle file can not recognize '#' in the middle of line.
2013-12-02 22:58:25 -05:00
daniceexi
5bc7502d61 code drop for feature to support multiple disks/paritions and multiple nics configuration for Windows deployment. 2013-12-03 02:26:31 -05:00
lissav
7d6d4cc690 Defect 3926, rerun of mysqlsetup -i leaves xcatd stopped 2013-12-02 06:42:27 -05:00
jjhua
672f7548a3 two environment template files if develop_mode=false 2013-12-02 16:36:19 -05:00
baiyuan
e037cd0b69 update linux.conf.template for autotest 2013-12-01 22:13:38 -05:00
jjhua
0da6df2117 at present, we will use "develop_mode=true", so I change the value
of develop_mode in the example to "true".
2013-11-29 16:16:57 -05:00
jjhua
1d39d95386 add two examples of the environment files 2013-11-29 14:55:27 -05:00
jjhua
25ef66f1d2 databags and related items in the openstack chef cookbooks.
These are some examples, they can work with the current cookbooks.
If there are some changes in the cookbooks, please update the
databags and related items
2013-11-29 14:44:34 -05:00
jjhua
f3925b9cf0 To support databag in openstack chef cookbook.
--nodevmode is only used when running all the procedure, and will
generate the secret, create the databag, and load the databag item
2013-11-29 14:32:42 -05:00
baiyuan
bcb80dc6c3 support multiple os version 2013-11-29 00:35:14 -05:00
baiyuan
5b5703e18b add test cases for buildkit 2013-11-28 21:53:31 -05:00
jjhua
59ca686eec To support developer_mode=false, we need to use databag. There was
a bug in rabbimq-server(could not change the guest's password). Fixed
it.
2013-11-28 13:50:07 -05:00
jjhua
54f2ee5abe add the role[os-network-dhcp-agent] into os-compute-single-controller.rb .
It could be used to allocate IP for the nova VMs when using role[allinone-compute]
2013-11-28 13:45:08 -05:00
Bruce Potter
3f6448b316 removed the nbroot rpms from the yum group file 2013-11-27 04:36:46 -05:00
Jarrod Johnson
d814b94bde Have ipmi do wire format, to match ipmitool and microsoft behavior in spite of the spec (which no one follows, not even prior xCAT code) 2013-11-26 10:26:51 -05:00
baiyuan
f83c398450 enhance os value in testcase,adding os:rhels,os:sles support in cases0 2013-11-24 20:53:58 -05:00
xq2005
47a010573d for bug 3919: version compare problem 2013-11-22 01:35:37 -08:00
xq2005
3b1f5fbbf2 update the ubuntu dep tarball 2013-11-21 20:26:30 -08:00
Bruce Potter
749909960a Merge branch '2.8' of ssh://git.code.sf.net/p/xcat/xcat-core into 2.8 2013-11-21 07:22:25 -05:00
zhaoertao
1166185575 remove 'sequential' parameter for runxcmd calling in configfpc.pm 2013-11-21 01:04:30 -08:00
zhaoertao
b5409af7ee fix bug 3889 xcatd not running preprocess for multiple plugins when mgt=ipmi 2013-11-21 00:51:00 -08:00
daniceexi
2ade33338a defect 3917: add support for running of postinstall script in mic genimage. The rootimage root is changed to overlay/rootimg from overlay/package 2013-11-21 05:17:23 -05:00
wanghuaz
6f25f60016 fixing the problem that configing bond0 flushed the default gateway. 2013-11-21 10:09:07 +08:00
Bruce Potter
9b4d72de32 Merge branch '2.8' of ssh://git.code.sf.net/p/xcat/xcat-core into 2.8 2013-11-19 07:19:31 -05:00
daniceexi
5161d143bc Change the man pages of nodeset,genimage and geninitrd commands for adding --ignorekernelchk option 2013-11-19 07:07:34 -05:00
ligc
c3f73bc9b8 Merge branch '2.8' of ssh://git.code.sf.net/p/xcat/xcat-core into 2.8 2013-11-19 15:55:45 +08:00
daniceexi
2b61564684 Code drop for new requirement: Add a new flag --ignorekernelchk for nodeset, geninitrd and genimage commands to skip the kernel version checking when injecting drivers from osimage.driverupdatesrc 2013-11-19 06:27:11 -05:00
ligc
089ea2da87 fix for bug 3913: do not use autocommit=0 for table read 2013-11-19 15:49:26 +08:00
Jarrod Johnson
004d179474 Fix SLES driver update media injection that is not rpm based 2013-11-18 16:31:15 -05:00
lissav
9578417eae defect 3870 2013-11-18 13:42:28 -05:00
Bruce Potter
fee08fb028 Merge branch '2.8' of ssh://git.code.sf.net/p/xcat/xcat-core into 2.8 2013-11-18 07:52:56 -05:00
lissav
0804cf1ae6 Defect 3906 2013-11-18 06:50:18 -05:00
jjhua
e432c238f9 fixed bug 3904, if the environements dir doesn't exsit, create it. 2013-11-18 16:26:03 -05:00
jjhua
c8053583bd fixed bug 3898 2013-11-18 15:37:15 -05:00
immarvin
43bc2d1e8e fix #3877 [FVT]genimage return confusing error message when xCAT-IBMhpc is not installed 2013-11-17 19:07:05 -08:00
immarvin
e2708df2b4 fix defect #3693 [DEV] rhels6.4-ppc64 statelite failed with (FATAL error: could not get the entries from litefile table...) when noderes.xcatmaster=<hostname of MN> 2013-11-17 18:48:00 -08:00
Bruce Potter
d91767af84 change version of 2.8 branch to 2.8.4 2013-11-15 16:46:40 -05:00
225 changed files with 10088 additions and 2155 deletions

View File

@@ -1 +1 @@
2.8.3
2.8.4

View File

@@ -1,241 +0,0 @@
#!/bin/sh
# Update GSA Ubuntu Repositories or create a local repository
#
# Author: Leonardo Tonetto (tonetto@linux.vnet.ibm.com)
# Revisor: Arif Ali (aali@ocf.co.uk)
#
# After running this script, add the following line to
# /etc/apt/sources.list for local repository
# deb file://<core_repo_path>/xcat-core/ maverick main
# deb file://<dep_repo_path>/xcat-dep/ maverick main
#
# For the purpose of getting the distribution name
# Supported distributions
dists="squeeze"
a_flag= # automatic flag - only update if repo was updated
c_flag= # xcat-core (trunk-delvel) path
d_flag= # xcat-dep (trunk) path
local_flag= # build the repository localy
while getopts 'c:d:u:p:l:a' OPTION
do
case $OPTION in
c) c_flag=1
xcat_core_path="$OPTARG"
;;
d) d_flag=1
xcat_dep_path="$OPTARG"
;;
l) local_flag=1
local_repo_path="$OPTARG"
;;
a) a_flag=1
;;
?) printf "Usage: %s -c <core_trunk_path> [-d <dep_trunk_path>] -l <local-repo_path> [-a]\n" $(basename $0) >&2
echo "-a Automatic: update only if there's any update on repo"
exit 2
;;
esac
done
shift $(($OPTIND - 1))
if [ -z "$c_flag" -a -z "$d_flag" ]
then
printf "Usage: %s -c <core_trunk_path> [-d <dep_trunk_path>] { -l <local-repo_path> | [-u <gsa_id> -p <gsa_passwd>] } [-a]\n" $(basename $0) >&2
echo "-a Automatic: update only if there's any update on repo"
exit 2
fi
if [ ! -d $xcat_core_path ]
then
printf "%s: No such directory\n" "$xcat_core_path" >&2
exit 2
fi
if [ "$d_flag" ]
then
if [ ! -d $xcat_dep_path ]
then
printf "%s: No such directory\n" "$xcat_dep_path" >&2
exit 2
fi
fi
if [ "$local_flag" ]
then
repo_xcat_core_path=$local_repo_path"/xcat-core"
repo_xcat_dep_path=$local_repo_path"/xcat-dep"
else
printf "Usage: %s -c <core_trunk_path> [-d <dep_trunk_path>] -l <local-repo_path> [-a]\n" $(basename $0) >&2
echo "-a Automatic: update only if there's any update on repo"
exit 2
fi
if [ "$a_flag" ]
then
touch svcupdate.trace
SVCUP='svcupdate.trace'
svn update $xcat_core_path 1> $SVCUP 2>&1
if ! grep 'Tree is up to date' $SVCUP
then
update_core=1
else
update_core=
fi
rm -f $SVCUP
else
update_core=1
fi
if [ "$c_flag" -a "$update_core" ]
then
echo "###############################"
echo "# Building xcat-core packages #"
echo "###############################"
CMD_PATH=`pwd`
cd $xcat_core_path
./build-debs-all "snap" "Nightly_Builds"
echo "#################################"
echo "# Creating xcat-core repository #"
echo "#################################"
if [ -d $repo_xcat_core_path ]; then
rm -rf $repo_xcat_core_path
fi
mkdir -p $repo_xcat_core_path/conf
find . -iname '*.deb' -exec mv {} $repo_xcat_core_path \;
rm -rf debs/
cd $CMD_PATH
rm -rf $repo_xcat_core_path/conf/distributions
for dist in $dists; do
cat << __EOF__ >> $repo_xcat_core_path/conf/distributions
Origin: xCAT internal repository
Label: xcat-core bazaar repository
Codename: $dist
Architectures: amd64
Components: main
Description: Repository automatically genereted conf
__EOF__
done
cat << __EOF__ > $repo_xcat_core_path/conf/options
verbose
basedir .
__EOF__
for dist in $dists; do
for file in `ls $repo_xcat_core_path/*.deb`; do
reprepro -b $repo_xcat_core_path includedeb $dist $file;
done
done
mv $xcat_core_path/latest_version $repo_xcat_core_path/xcat-core_latest-build
cat << '__EOF__' > $repo_xcat_core_path/mklocalrepo.sh
codename=`lsb_release -a 2>null | grep Codename | awk '{print $2}'`
cd `dirname $0`
echo deb file://"`pwd`" $codename main > /etc/apt/sources.list.d/xcat-core.list
__EOF__
chmod 775 $repo_xcat_core_path/mklocalrepo.sh
rm -rf $repo_xcat_core_path/*.deb
if [ -z "$local_flag" ]
then
echo "###############################"
echo "# Updating GSA xcat-core repo #"
echo "###############################"
lftp -e "mirror -R --delete-first $repo_xcat_core_path /projects/i/ipl-xcat/ubuntu/; exit;" -u $gsa_id,$gsa_passwd -p 22 sftp://ausgsa.ibm.com
fi ### if [ -z "$local_flag" ]
fi ### if [ "$a_flag" ]
if [ "$a_flag" -a "$d_flag" ]
then
touch svcupdate.trace
SVCUP='svcupdate.trace'
svn update $xcat_dep_path 1> $SVCUP 2>&1
if ! grep 'Tree is up to date' $SVCUP
then
update_dep=1
else
update_dep=
fi
rm -f $SVCUP
else
update_dep=1
fi
if [ "$d_flag" -a "$update_dep" ]
then
echo "##############################"
echo "# Building xcat-dep packages #"
echo "##############################"
CMD_PATH=`pwd`
cd $xcat_dep_path
./build-debs-all "snap" "Nightly_Builds"
echo "################################"
echo "# Creating xcat-dep repository #"
echo "################################"
rm -rf $repo_xcat_dep_path
mkdir -p $repo_xcat_dep_path/conf
find $xcat_dep_path -iname '*.deb' -exec cp {} $repo_xcat_dep_path \;
rm -rf $repo_xcat_core_path/conf/distributions
for dist in $dists; do
cat << __EOF__ >> $repo_xcat_dep_path/conf/distributions
Origin: xCAT internal repository
Label: xcat-dep bazaar repository
Codename: $dist
Architectures: amd64
Components: main
Description: Repository automatically genereted conf
__EOF__
done
cat << __EOF__ > $repo_xcat_dep_path/conf/options
verbose
basedir .
__EOF__
for dist in $dists; do
for file in `ls $repo_xcat_dep_path/*.deb`; do
reprepro -b $repo_xcat_dep_path includedeb $dist $file;
done
done
cat << '__EOF__' > $repo_xcat_dep_path/mklocalrepo.sh
codename=`lsb_release -a 2>null | grep Codename | awk '{print $2}'`
cd `dirname $0`
echo deb file://"`pwd`" $codename main > /etc/apt/sources.list.d/xcat-dep.list
__EOF__
chmod 775 $repo_xcat_dep_path/mklocalrepo.sh
rm -rf $repo_xcat_dep_path/*.deb
if [ -z "$local_flag" ]
then
echo "##############################"
echo "# Updating GSA xcat-dep repo #"
echo "##############################"
lftp -e "mirror -R --delete-first $repo_xcat_dep_path /projects/i/ipl-xcat/ubuntu/; exit;" -u $gsa_id,$gsa_passwd -p 22 sftp://ausgsa.ibm.com
fi ### if [ -z "$local_flag" ]
fi ### if [ "$d_flag" -a "$a_flag"]
if [ -z "$local_flag" ] # delete the temp repo after upload is done
then
rm -rf ./gsa-repo_temp
fi
exit 0

View File

@@ -53,7 +53,7 @@ for i in $*; do
done
# Supported distributions
dists="maverick natty oneiric precise"
dists="maverick natty oneiric precise saucy"
c_flag= # xcat-core (trunk-delvel) path
d_flag= # xcat-dep (trunk) path
@@ -276,7 +276,7 @@ __EOF__
done
#create the mklocalrepo script
cat << __EOF__ > mklocalrepo.sh
cat << '__EOF__' > mklocalrepo.sh
. /etc/lsb-release
cd `dirname $0`
echo deb file://"`pwd`" $DISTRIB_CODENAME main > /etc/apt/sources.list.d/xcat-core.list
@@ -413,6 +413,10 @@ __EOF__
while [ $((i+=1)) -le 5 ] && ! rsync -urLv --delete xcat-dep ${uploader},xcat@web.sourceforge.net:${sf_dir}/ubuntu/
do : ; done
#upload the tarball
i=0
echo "Uploading $dep_tar_name to ${sf_dir}/xcat-dep/2.x_Ubuntu/ ..."
while [ $((i+=1)) -le 5 ] && ! rsync -v $dep_tar_name ${uploader},xcat@web.sourceforge.net:${sf_dir}/xcat-dep/2.x_Ubuntu/
do : ; done
cd $old_pwd
fi
exit 0

View File

@@ -10,9 +10,6 @@
<packagereq type="required">xCAT-server</packagereq>
<packagereq type="required">xCAT-client</packagereq>
<packagereq type="required">perl-xCAT</packagereq>
<packagereq type="required">xCAT-nbroot-core-x86_64</packagereq>
<packagereq type="required">xCAT-nbroot-core-x86</packagereq>
<packagereq type="optional">xCAT-nbroot-core-ppc64</packagereq>
</packagelist>
</group>
</comps>

View File

@@ -75,6 +75,7 @@ function makexcat {
tar --exclude .svn --exclude upflag -czf $RPMROOT/SOURCES/postscripts.tar.gz postscripts LICENSE.html
tar --exclude .svn -czf $RPMROOT/SOURCES/prescripts.tar.gz prescripts
tar --exclude .svn -czf $RPMROOT/SOURCES/templates.tar.gz templates
tar --exclude .svn -czf $RPMROOT/SOURCES/winpostscripts.tar.gz winpostscripts
cp xcat.conf $RPMROOT/SOURCES
cp xCATMN $RPMROOT/SOURCES
cd - >/dev/null

View File

@@ -223,6 +223,7 @@ if (ref($request) eq 'HASH') { # the request is an array, not pure XML
SSL_key_file => $keyfile,
SSL_cert_file => $certfile,
SSL_ca_file => $cafile,
SSL_verify_mode => SSL_VERIFY_PEER,
SSL_use_cert => 1,
Timeout => 0,
);

View File

@@ -618,7 +618,7 @@ sub getDBtable
{
# need to get info from DB
my $thistable = xCAT::Table->new($table, -create => 1, -autocommit => 0);
my $thistable = xCAT::Table->new($table, -create => 1);
if (!$thistable)
{
return undef;

View File

@@ -15,6 +15,7 @@ use xCAT::PPCcli qw(SUCCESS EXPECT_ERROR RC_ERROR NR_ERROR);
use xCAT::Usage;
use xCAT::NodeRange;
use xCAT::FSPUtils;
use xCAT::VMCommon;
#use Data::Dumper;
use xCAT::MsgUtils qw(verbose_message);
##############################################
@@ -52,7 +53,7 @@ sub chvm_parse_extra_options {
my $args = shift;
my $opt = shift;
# Partition used attributes #
my @support_ops = qw(vmcpus vmmemory vmphyslots vmothersetting);
my @support_ops = qw(vmcpus vmmemory vmphyslots vmothersetting vmstorage vmnics del_vadapter);
if (ref($args) ne 'ARRAY') {
return "$args";
}
@@ -84,6 +85,24 @@ sub chvm_parse_extra_options {
$opt->{bsr} = $1;
}
next;
} elsif ($cmd eq "vmstorage") {
if (exists($opt->{vios})) {
if ($value !~ /\d+/) {
return "'$value' is invalid, must be numbers";
} else {
my @array = ();
for (1..$value) {
push @array, 0;
}
$value = \@array;
}
} else {
if ($value =~ /^([\w_-]*):(\d+)$/) {
$value = ["0,$1:$2"];
} else {
return "'$value' is invalid, must be in form of 'Server_name:slotnum'";
}
}
}
} else {
@@ -124,7 +143,7 @@ sub chvm_parse_args {
$Getopt::Long::ignorecase = 0;
Getopt::Long::Configure( "bundling" );
if ( !GetOptions( \%opt, qw(V|verbose p=s i=s m=s r=s p775) )) {
if ( !GetOptions( \%opt, qw(V|verbose p=s i=s m=s r=s p775 vios) )) {
return( usage() );
}
####################################
@@ -398,7 +417,7 @@ sub mkvm_parse_args {
push @unsupport_ops, $tmpop;
}
}
my @support_ops = qw(vmcpus vmmemory vmphyslots vmothersetting);
my @support_ops = qw(vmcpus vmmemory vmphyslots vmothersetting vmnics vmstorage);
if (defined(@ARGV[0]) and defined($opt{full})) {
return(usage("Option 'full' shall be used alone."));
} elsif (defined(@ARGV[0])) {
@@ -711,6 +730,45 @@ sub do_op_extra_cmds {
$action = "part_set_lpar_pending_proc";
} elsif ($op eq "vmphyslots") {
$action = "set_io_slot_owner_uber";
} elsif ($op eq "del_vadapter") {
$action = "part_clear_vslot_config";
} elsif ($op eq "vmnics") {
my @vlans = split /,/,$param;
foreach (@vlans) {
if (/vlan(\d+)/i) {
my $vlanid = $1;
my $mac = lc(xCAT::VMCommon::genMac($name));
if ($mac =~ /(..):(..):(..):(..):(..):(..)/) {
my $tail = hex($6)+$vlanid;
$mac = sprintf("$1$2$3$4$5%02x",$tail);
}
my $value = xCAT::FSPUtils::fsp_api_action($request,$name, $d, "part_set_veth_slot_config",0,"0,$vlanid,$mac");
if (@$value[1] && ((@$value[1] =~ /Error/i) && (@$value[2] ne '0'))) {
return ([[$name, @$value[1], '1']]) ;
} else {
push @values, [$name, "Success", '0'];
}
}
}
next;
} elsif ($op eq "vmstorage") {
foreach my $v_info (@$param) {
if ($v_info =~ /(\d+),([\w_-]*):(\d+)/) {
my $vios = &find_lpar_id($request, @$d[3], $2);
my $r_slotid = $3;
if (!defined($vios)) {
return ([[$name, "Cannot find lparid for Server lpar:$1", '1']]);
}
$v_info = "$1,$vios,$r_slotid";
}
my $value = xCAT::FSPUtils::fsp_api_action($request,$name, $d, "part_set_vscsi_slot_config",0,$v_info);
if (@$value[1] && ((@$value[1] =~ /Error/i) && (@$value[2] ne '0'))) {
return ([[$name, @$value[1], '1']]) ;
} else {
push @values, [$name, "Success", '0'];
}
}
next;
} elsif ($op eq "vmmemory") {
my @td = @$d;
@td[0] = 0;
@@ -750,6 +808,9 @@ sub do_op_extra_cmds {
$action = "part_set_lpar_pending_mem";
} elsif ($op eq "bsr") {
$action = "set_lpar_bsr";
} elsif ($op eq "vios") {
print __LINE__."=========>op=vios===\n";
next;
} else {
last;
}
@@ -1638,6 +1699,34 @@ sub query_cec_info_actions {
#$data .= "\n";
next;
}
if ($action eq "part_get_all_vio_info") {
my @output = split /\n/, @$values[1];
my ($drc_index,$drc_name);
foreach my $line (@output) {
chomp($line);
if ($line =~ /Index:.*drc_index:([^,]*),\s*drc_name:(.*)$/) {
$drc_index = $1;
$drc_name = $2;
next;
} elsif ($line =~ /\s*lpar_id=(\d+),type=(vSCSI|vSerial),slot=(\d+),attr=(\d+).*remote_lpar_id=(0x\w+),remote_slot_num=(0x\w+)/) {
if ($4 eq '0') {
push @array, [$name, "$1,$3,$drc_name,$drc_index,$2 Client(Server_lparid=$5,Server_slotid=$6)", 0];
} else {
push @array, [$name, "$1,$3,$drc_name,$drc_index,$2 Server", 0];
}
} elsif ($line =~ /\s*lpar_id=(\d+),type=(vEth),slot=(\d+).*port_vlan_id=(\d+),mac_addr=(\w+)/) {
push @array, [$name, "$1,$3,$drc_name,$drc_index,$2 (port_vlanid=$4,mac_addr=$5)", 0];
#} elsif ($line =~ /\s*lpar_id=(\d+),type=(\w+),slot=(\d+)/) {
# push @array, [$name, "$1,$3,$drc_name,$drc_index,$2", 0];
#} else {
#print "=====>line:$line\n";
#push @array, [$name, $line, 0];
}
$drc_index = '';
$drc_name = '';
}
next;
}
}
#$data .= "@$values[1]\n\n";
push @array, [$name, @$values[1], @$values[2]];
@@ -1660,14 +1749,16 @@ sub query_cec_info {
my $args = $request->{opt};
my @td = ();
my @result = ();
#print Dumper($request);
#print Dumper($hash);
while (my ($mtms,$h) = each(%$hash) ) {
while (my ($name, $d) = each (%$h)) {
@td = @$d;
if (@$d[0] == 0 && @$d[4] ne "lpar") {
if (@$d[0] == 0 && @$d[4] !~ /lpar|vios/) {
last;
}
#my $rethash = query_cec_info_actions($request, $name, $d, 0, ["part_get_lpar_processing","part_get_lpar_memory","part_get_all_vio_info","lpar_lhea_mac","part_get_all_io_bus_info","get_huge_page","get_cec_bsr"]);
my $rethash = query_cec_info_actions($request, $name, $d, 0, ["part_get_lpar_processing","part_get_lpar_memory","part_get_all_io_bus_info","get_huge_page","get_cec_bsr"]);
my $rethash = query_cec_info_actions($request, $name, $d, 0, ["part_get_lpar_processing","part_get_lpar_memory","part_get_all_io_bus_info","part_get_all_vio_info","get_huge_page","get_cec_bsr"]);
#push @result, [$name, $rethash, 0];
push @result, @$rethash;
}
@@ -1766,7 +1857,7 @@ sub deal_with_avail_mem {
} else {
$cur_avail = $lparhash->{hyp_avail_mem} + $used_regions - $tmphash{lpar0_used_mem};
}
xCAT::MsgUtils->verbose_message($request, "====****====used:$used_regions,avail:$cur_avail,($min:$cur:$max).");
#xCAT::MsgUtils->verbose_message($request, "====****====used:$used_regions,avail:$cur_avail,($min:$cur:$max).");
if ($cur_avail < $min) {
return([$name, "Parse reserverd regions failed, no enough memory, available:$lparhash->{hyp_avail_mem}.", 1]);
}
@@ -1781,6 +1872,17 @@ sub deal_with_avail_mem {
return 0;
}
sub find_lpar_id {
my $request = shift;
my $parent = shift;
my $name = shift;
my %mapping = %{$request->{ppc}->{$parent}->{mapping}};
if (exists($mapping{$name})) {
return $mapping{$name};
}
return undef;
}
sub create_lpar {
my $request = shift;
my $name = shift;
@@ -1804,12 +1906,42 @@ sub create_lpar {
xCAT::FSPUtils::fsp_api_action($request, $name, $d, "part_set_lpar_group_id");
xCAT::FSPUtils::fsp_api_action($request, $name, $d, "part_set_lpar_avail_priority");
#print "======>physlots:$lparhash->{physlots}.\n";
$values = xCAT::FSPUtils::fsp_api_action($request, $name, $d, "set_io_slot_owner_uber", 0, $lparhash->{physlots});
#$values = xCAT::FSPUtils::fsp_api_action($request, $name, $d, "set_io_slot_owner", 0, join(",",@phy_io_array));
if (@$values[2] ne 0) {
&set_lpar_undefined($request, $name, $d);
return ([$name, @$values[1], @$values[2]]);
if (exists($lparhash->{physlots})) {
$values = xCAT::FSPUtils::fsp_api_action($request, $name, $d, "set_io_slot_owner_uber", 0, $lparhash->{physlots});
#$values = xCAT::FSPUtils::fsp_api_action($request, $name, $d, "set_io_slot_owner", 0, join(",",@phy_io_array));
if (@$values[2] ne 0) {
&set_lpar_undefined($request, $name, $d);
return ([$name, @$values[1], @$values[2]]);
}
}
if (exists($lparhash->{nics})) {
my @vlans = split /,/,$lparhash->{nics};
foreach (@vlans) {
if (/vlan(\d+)/i) {
my $vlanid = $1;
my $mac = lc(xCAT::VMCommon::genMac($name));
if ($mac =~ /(..):(..):(..):(..):(..):(..)/) {
my $tail = hex($6)+$vlanid;
$mac = sprintf("$1$2$3$4$5%02x",$tail);
}
$values = xCAT::FSPUtils::fsp_api_action($request,$name, $d, "part_set_veth_slot_config",0,"0,$vlanid,$mac");
if (@$values[2] ne 0) {
&set_lpar_undefined($request, $name, $d);
return ([$name, @$values[1], @$values[2]]);
}
}
}
}
if (exists($lparhash->{storage})) {
foreach my $v_info (@{$lparhash->{storage}}) {
$values = xCAT::FSPUtils::fsp_api_action($request,$name, $d, "part_set_vscsi_slot_config",0,$v_info);
if (@$values[2] ne 0) {
&set_lpar_undefined($request, $name, $d);
return ([$name, @$values[1], @$values[2]]);
}
}
}
# ====== ====== #
if (exists($lparhash->{phy_hea})) {
my $phy_hash = $lparhash->{phy_hea};
foreach my $phy_drc (keys %$phy_hash) {
@@ -1850,6 +1982,7 @@ sub create_lpar {
&set_lpar_undefined($request, $name, $d);
return ([$name, @$values[1], @$values[2]]);
}
xCAT::FSPUtils::fsp_api_action($request, $name, $d, "part_set_lpar_comp_modes");
#print "======>memory:$lparhash->{huge_page}.\n";
xCAT::FSPUtils::fsp_api_action($request, $name, $d, "set_huge_page", 0, $lparhash->{huge_page});
@@ -1866,7 +1999,6 @@ sub create_lpar {
}
return ([$name, "Done", 0]);
}
sub mkspeclpar {
my $request = shift;
my $hash = shift;
@@ -1880,7 +2012,7 @@ sub mkspeclpar {
while (my ($mtms, $h) = each (%$hash)) {
my $memhash;
my @nodes = keys(%$h);
my $ent = $vmtab->getNodesAttribs(\@nodes, ['cpus', 'memory','physlots', 'othersettings']);
my $ent = $vmtab->getNodesAttribs(\@nodes, ['cpus', 'memory','physlots', 'othersettings', 'storage', 'nics']);
while (my ($name, $d) = each (%$h)) {
if (@$d[4] ne 'lpar') {
push @result, [$name, "Node must be LPAR", 1];
@@ -1889,7 +2021,7 @@ sub mkspeclpar {
if (!exists($memhash->{run})) {
my @td = @$d;
@td[0] = 0;
$memhash = &query_cec_info_actions($request, $name, \@td, 1, ["part_get_hyp_process_and_mem","lpar_lhea_mac"]);
$memhash = &query_cec_info_actions($request, $name, \@td, 1, ["part_get_hyp_process_and_mem","lpar_lhea_mac","part_get_all_io_bus_info"]);
$memhash->{run} = 1;
}
my $tmp_ent = $ent->{$name}->[0];
@@ -1902,13 +2034,50 @@ sub mkspeclpar {
if (exists($opt->{vmphyslots})) {
$tmp_ent->{physlots} = $opt->{vmphyslots};
}
if (exists($opt->{vmothersetting})) {
$tmp_ent->{othersettings} = $opt->{vmothersetting};
}
if (exists($opt->{vmstorage})) {
$tmp_ent->{storage} = $opt->{vmstorage};
}
if (exists($opt->{vmnics})) {
$tmp_ent->{nics} = $opt->{vmnics};
}
if (exists($opt->{vios})) {
if (!exists($tmp_ent->{physlots})) {
my @phy_io_array = keys(%{$memhash->{bus}});
$tmp_ent->{physlots} = join(",", @phy_io_array);
}
if (exists($tmp_ent->{storage}) and $tmp_ent->{storage} !~ /^\d+$/) {
return ([[$name, "Parameter for 'vmstorage' is invalid", 1]]);
} elsif (exists($tmp_ent->{storage})) {
my $num = $tmp_ent->{storage};
my @array = ();
for (1..$num) {
push @array, '0';
}
$tmp_ent->{storage} = \@array;
}
} else {
if (exists($tmp_ent->{storage}) and $tmp_ent->{storage} !~ /^[\w_-]*:\d+$/) {
return ([[$name, "Parameter for 'vmstorage' is invalid", 1]]);
} elsif (exists($tmp_ent->{storage})) {
if ($tmp_ent->{storage} =~ /([\w_-]*):(\d+)/) {
my $vios = &find_lpar_id($request, @$d[3], $1);
my $r_slotid = $2;
if (!defined($vios)) {
return ([[$name, "Cannot find lparid for Server lpar:$1"]]);
}
$tmp_ent->{storage} = ["0,$vios,$r_slotid"];
}
}
}
if (!defined($tmp_ent) ) {
return ([[$name, "Not find params", 1]]);
} elsif (!exists($tmp_ent->{cpus}) || !exists($tmp_ent->{memory}) || !exists($tmp_ent->{physlots})) {
return ([[$name, "The attribute 'vmcpus', 'vmmemory' and 'vmphyslots' are all needed to be specified.", 1]]);
#} elsif (!exists($tmp_ent->{cpus}) || !exists($tmp_ent->{memory}) || !exists($tmp_ent->{physlots})) {
} elsif (!exists($tmp_ent->{cpus}) || !exists($tmp_ent->{memory})) {
return ([[$name, "The attribute 'vmcpus', 'vmmemory' are needed to be specified.", 1]]);
}
if ($tmp_ent->{memory} =~ /(\d+)([G|M]?)\/(\d+)([G|M]?)\/(\d+)([G|M]?)/i) {
my $memsize = $memhash->{mem_region_size};

View File

@@ -56,6 +56,7 @@ $::STATUS_SHELL="shell";
$::STATUS_DEFINED="defined";
$::STATUS_UNKNOWN="unknown";
$::STATUS_FAILED="failed";
$::STATUS_BMCREADY="bmcready";
%::VALID_STATUS_VALUES = (
$::STATUS_ACTIVE=>1,
$::STATUS_INACTIVE=>1,
@@ -72,6 +73,7 @@ $::STATUS_FAILED="failed";
$::STATUS_DEFINED=>1,
$::STATUS_UNKNOWN=>1,
$::STATUS_FAILED=>1,
$::STATUS_BMCREADY=>1,
$::STATUS_SYNCING=>1,
$::STATUS_OUT_OF_SYNC=>1,

View File

@@ -1353,6 +1353,16 @@ sub dolitesetup
return 1;
}
# also copy $instrootloc/.statelite contents
$ccmd = "/usr/bin/cp -p -r $instrootloc/.statelite $SRloc";
$out = xCAT::Utils->runcmd("$ccmd", -1);
if ($::RUNCMD_RC != 0)
{
my $rsp;
push @{$rsp->{data}}, "Could not copy $instrootloc/.statelite to $SRloc.";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
}
}

View File

@@ -827,7 +827,7 @@ sub get_mac_addr {
$done[0] = 0;
$cmd[0] = "\" local-mac-address\" ". $phandle . " get-package-property\r";
$msg[0] = "Status: return code and mac-address now on stack\n";
$pattern[0] = "ok";#"\s*3 >";
$pattern[0] = "local-mac-address.*ok";#"\s*3 >";
$newstate[0] = 1;
# cmd(1) is a dot (.). This is a stack manipulation command that removes one
@@ -1231,8 +1231,8 @@ sub ping_server{
$done[2] = 0;
$cmd[2] = "dev /packages/net\r";
$msg[2] = "Status: selected the /packages/net node as the active package\n";
#$pattern[2] = ".*dev(.*)ok(.*)0 >(.*)";
$pattern[2] = "ok";
$pattern[2] = ".*dev.*packages.*net(.*)ok(.*)0 >(.*)";
#$pattern[2] = "ok";
$newstate[2]= 3;
# state 3, ping the server
@@ -1266,6 +1266,7 @@ sub ping_server{
# state 5, all done
$done[5] = 1;
# for ping, only need to set speed and duplex for ethernet adapters
#
if ( $list_type eq "ent" ) {
@@ -1323,8 +1324,10 @@ sub ping_server{
$timeout = 300;
while ( $done[$state] eq 0 ) {
send_command($verbose, $rconsole, $cmd[$state]);
@result = $rconsole->expect(
$timeout,
[qr/$pattern[$state]/s=>
sub {
@@ -1362,7 +1365,9 @@ sub ping_server{
}
],
);
return 1 if ($rc eq 1);
return 1 if ($rc eq 1);
if ( $state eq 1 ) {
$adap_conn = $adap_conn_list[$j];
$cmd[1] = "\" ethernet,$adap_speed,$adap_conn,$adap_duplex\" encode-string \" chosen-network-type\" property\r";
@@ -2050,14 +2055,46 @@ sub multiple_open_dev {
; \r";
send_command($verbose, $rconsole, $command);
$command = "patch new-open-dev open-dev net-ping \r";
send_command($verbose, $rconsole, $command);
$timeout = 30;
$rconsole->expect(
$timeout,
#[qr/patch new-open-dev(.*)>/=>
[qr/>/=>
[qr/new-open-dev(.*)ok/=>
#[qr/>/=>
sub {
nc_msg($verbose, "Status: at End of multiple_open_dev \n");
$rconsole->clear_accum();
}
],
[qr/]/=>
sub {
nc_msg($verbose, "Unexpected prompt\n");
$rconsole->clear_accum();
$rc = 1;
}
],
[timeout =>
sub {
send_user(2, "Timeout\n");
$rconsole->clear_accum();
$rc = 1;
}
],
[eof =>
sub {
send_user(2, "Cannot connect to $node\n");
$rconsole->clear_accum();
$rc = 1;
}
],
);
$command = "patch new-open-dev open-dev net-ping \r";
send_command($verbose, $rconsole, $command);
$rconsole->expect(
$timeout,
[qr/patch new-open-dev(.*)ok/=>
#[qr/>/=>
sub {
nc_msg($verbose, "Status: at End of multiple_open_dev \n");
$rconsole->clear_accum();
@@ -2086,6 +2123,7 @@ sub multiple_open_dev {
}
],
);
return $rc;
}
###################################################################
@@ -2569,7 +2607,7 @@ sub lparnetbootexp
####################################
nc_msg($verbose, "Connecting to the $node.\n");
sleep 3;
$timeout = 2;
$timeout = 10;
$rconsole->expect(
$timeout,
[ qr/Enter.* for help.*/i =>
@@ -2778,6 +2816,8 @@ sub lparnetbootexp
$done = 0;
$retry_count = 0;
$timeout = 10;
while (!$done) {
my @result = $rconsole->expect(
$timeout,
@@ -2885,6 +2925,7 @@ sub lparnetbootexp
}
}
##############################
# Call multiple_open_dev to
# circumvent firmware OPEN-DEV
@@ -2919,6 +2960,7 @@ sub lparnetbootexp
$match_pat = ".*";
}
if($colon) {
nc_msg($verbose, "#Type:Location_Code:MAC_Address:Full_Path_Name:Ping_Result:Device_Type:Size_MB:OS:OS_Version:\n");
$outputarrayindex++; # start from 1, 0 is used to set as 0
@@ -2972,7 +3014,7 @@ sub lparnetbootexp
} else {
for( $i = 0; $i < $adapter_found; $i++) {
if ($adap_type[$i] =~ /$match_pat/) {
if ($adap_type[$i] eq "hfi-ent") {
if (!($adap_type[$i] eq "hfi-ent")) {
$mac_address = get_mac_addr($phandle_array[$i], $rconsole, $node, $verbose);
$loc_code = get_adaptr_loc($phandle_array[$i], $rconsole, $node, $verbose);
}

View File

@@ -149,7 +149,7 @@ sub nodesbycriteria {
}
if ($neednewcache) {
if ($nodelist) {
$nodelist->_clear_cache();
#$nodelist->_clear_cache();
$nodelist->_build_cache(\@cachedcolumns);
}
}

View File

@@ -265,6 +265,7 @@ sub rackformat_to_numricformat{
values are attributes of a specific nic, like:
type : nic type
hostnamesuffix: hostname suffix
hostnameprefix: hostname prefix
customscript: custom script for this nic
network: network name for this nic
ip: ip address of this nic.
@@ -276,7 +277,7 @@ sub get_nodes_nic_attrs{
my $nodes = shift;
my $nicstab = xCAT::Table->new( 'nics');
my $entry = $nicstab->getNodesAttribs($nodes, ['nictypes', 'nichostnamesuffixes', 'niccustomscripts', 'nicnetworks', 'nicips']);
my $entry = $nicstab->getNodesAttribs($nodes, ['nictypes', 'nichostnamesuffixes', 'nichostnameprefixes', 'niccustomscripts', 'nicnetworks', 'nicips']);
my %nicsattrs;
my @nicattrslist;
@@ -308,6 +309,20 @@ sub get_nodes_nic_attrs{
}
}
if($entry->{$node}->[0]->{'nichostnameprefixes'}){
@nicattrslist = split(",", $entry->{$node}->[0]->{'nichostnameprefixes'});
foreach (@nicattrslist){
my @nicattrs;
if ($_ =~ /!/) {
@nicattrs = split("!", $_);
} else {
@nicattrs = split(":", $_);
}
$nicsattrs{$node}{$nicattrs[0]}{'hostnameprefix'} = $nicattrs[1];
}
}
if($entry->{$node}->[0]->{'niccustomscripts'}){
@nicattrslist = split(",", $entry->{$node}->[0]->{'niccustomscripts'});
foreach (@nicattrslist){
@@ -733,6 +748,27 @@ sub get_imageprofile_prov_method
#-------------------------------------------------------------------------------
=head3 get_imageprofile_prov_osvers
Description : Get A node's provisioning os version and profile from its imageprofile attribute.
Arguments : $imgprofilename - imageprofile name
Returns : node's osversion and profile
=cut
#-------------------------------------------------------------------------------
sub get_imageprofile_prov_osvers
{
my $class = shift;
my $imgprofilename = shift;
my $osimgtab = xCAT::Table->new('osimage');
my $osimgentry = ($osimgtab->getAllAttribsWhere("imagename = '$imgprofilename'", 'ALL' ))[0];
my $osversion = $osimgentry->{'osvers'};
my $profile = $osimgentry->{'profile'};
return ($osversion, $profile);
}
#-------------------------------------------------------------------------------
=head3 check_profile_consistent
Description : Check if three profile consistent
Arguments : $imageprofile - image profile name
@@ -1000,6 +1036,40 @@ sub parse_nodeinfo_file
return 1, "";
}
#-------------------------------------------------------
=head3 update the table prodkey, in order to support windows
per node license key
Returns: $retcode.
$retcode = 1. update failed, the value is undef
$retcode = 0. save into db is OK..
=cut
#-------------------------------------------------------
sub update_windows_prodkey
{
my $class = shift;
my $node = shift;
my $product = shift;
my $key = shift;
unless(defined($node) && defined($product) && defined($key))
{
return 1;
}
# please notice this db usage
my %keyhash;
my %updates;
$keyhash{'node'} = $node;
$updates{'product'} = $product;
$updates{'key'} = $key;
my $tab = xCAT::Table->new('prodkey', -create=>1, -autocommit=>0);
$tab->setAttribs( \%keyhash,\%updates );
$tab->commit;
$tab->close;
return 0;
}
#-------------------------------------------------------------------------------
=head3 check_nicips
Description: Check if the nicips defined in MAC file is correct

1017
perl-xCAT/xCAT/SLP.pm Normal file → Executable file

File diff suppressed because it is too large Load Diff

388
perl-xCAT/xCAT/Schema.pm Normal file → Executable file
View File

@@ -134,7 +134,7 @@ litetree => {
table_desc => 'Directory hierarchy to traverse to get the initial contents of node files. The files that are specified in the litefile table are searched for in the directories specified in this table.',
descriptions => {
priority => 'This number controls what order the directories are searched. Directories are searched from smallest priority number to largest.',
image => "The name of the image that will use this directory, as specified in the osimage table. If image is not supplied, the default is 'ALL'. 'ALL' means use it for all images.",
image => "The name of the image (as specified in the osimage table) that will use this directory. You can also specify an image group name that is listed in the osimage.groups attribute of some osimages. 'ALL' means use this row for all images.",
directory => 'The location (hostname:path) of a directory that contains files specified in the litefile table. Variables are allowed. E.g: $noderes.nfsserver://xcatmasternode/install/$node/#CMD=uname-r#/',
mntopts => "A comma-separated list of options to use when mounting the litetree directory. (Ex. 'soft') The default is to do a 'hard' mount.",
comments => 'Any user-written notes.',
@@ -148,7 +148,7 @@ litefile => {
required => [qw(image file)], # default type is rw nfsroot
table_desc => 'The litefile table specifies the directories and files on the statelite nodes that should be readwrite, persistent, or readonly overlay. All other files in the statelite nodes come from the readonly statelite image.',
descriptions => {
image => "The name of the image that will use these files, as specified in the osimage table. 'ALL' means use it for all images.",
image => "The name of the image (as specified in the osimage table) that will use these options on this dir/file. You can also specify an image group name that is listed in the osimage.groups attribute of some osimages. 'ALL' means use this row for all images.",
file => "The full pathname of the file. e.g: /etc/hosts. If the path is a directory, then it should be terminated with a '/'. ",
options => "Options for the file:\n\n".
qq{ tmpfs - It is the default option if you leave the options column blank. It provides a file or directory for the node to use when booting, its permission will be the same as the original version on the server. In most cases, it is read-write; however, on the next statelite boot, the original version of the file or directory on the server will be used, it means it is non-persistent. This option can be performed on files and directories..\n\n}.
@@ -200,7 +200,7 @@ vm => {
'mgr' => 'The function manager for the virtual machine',
'host' => 'The system that currently hosts the VM',
'migrationdest' => 'A noderange representing candidate destinations for migration (i.e. similar systems, same SAN, or other criteria that xCAT can use',
'storage' => 'A list of storage files or devices to be used. i.e. /cluster/vm/<nodename> or nfs://<server>/path/to/folder/',
'storage' => 'A list of storage files or devices to be used. i.e. dir:///cluster/vm/<nodename> or nfs://<server>/path/to/folder/',
'storagemodel' => 'Model of storage devices to provide to guest',
'cfgstore' => 'Optional location for persistant storage separate of emulated hard drives for virtualization solutions that require persistant store to place configuration data',
'memory' => 'Megabytes of memory the VM currently should be set to.',
@@ -538,8 +538,8 @@ nodehm => {
table_desc => "Settings that control how each node's hardware is managed. Typically, an additional table that is specific to the hardware type of the node contains additional info. E.g. the ipmi, mp, and ppc tables.",
descriptions => {
node => 'The node name or group name.',
power => 'The method to use to control the power of the node. If not set, the mgt attribute will be used. Valid values: ipmi, blade, hmc, ivm, fsp. If "ipmi", xCAT will search for this node in the ipmi table for more info. If "blade", xCAT will search for this node in the mp table. If "hmc", "ivm", or "fsp", xCAT will search for this node in the ppc table.',
mgt => 'The method to use to do general hardware management of the node. This attribute is used as the default if power or getmac is not set. Valid values: ipmi, blade, hmc, ivm, fsp, bpa. See the power attribute for more details.',
power => 'The method to use to control the power of the node. If not set, the mgt attribute will be used. Valid values: ipmi, blade, hmc, ivm, fsp, kvm, esx, rhevm. If "ipmi", xCAT will search for this node in the ipmi table for more info. If "blade", xCAT will search for this node in the mp table. If "hmc", "ivm", or "fsp", xCAT will search for this node in the ppc table.',
mgt => 'The method to use to do general hardware management of the node. This attribute is used as the default if power or getmac is not set. Valid values: ipmi, blade, hmc, ivm, fsp, bpa, kvm, esx, rhevm. See the power attribute for more details.',
cons => 'The console method. If nodehm.serialport is set, this will default to the nodehm.mgt setting, otherwise it defaults to unused. Valid values: cyclades, mrv, or the values valid for the mgt attribute.',
termserver => 'The hostname of the terminal server.',
termport => 'The port number on the terminal server that this node is connected to.',
@@ -554,7 +554,7 @@ nodehm => {
},
},
nodelist => {
cols => [qw(node groups status statustime appstatus appstatustime primarysn hidden updatestatus updatestatustime comments disable)],
cols => [qw(node groups status statustime appstatus appstatustime primarysn hidden updatestatus updatestatustime zonename comments disable)],
keys => [qw(node)],
tablespace =>'XCATTBS32K',
table_desc => "The list of all the nodes in the cluster, including each node's current status and what groups it is in.",
@@ -569,6 +569,7 @@ nodelist => {
hidden => "Used to hide fsp and bpa definitions, 1 means not show them when running lsdef and nodels",
updatestatus => "The current node update status. Valid states are synced out-of-sync,syncing,failed.",
updatestatustime => "The date and time when the updatestatus was updated.",
zonename => "The name of the zone to which the node is currently assigned. If undefined, then it is not assigned to any zone. ",
comments => 'Any user-written notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
},
@@ -591,7 +592,7 @@ nodepos => {
},
},
noderes => {
cols => [qw(node servicenode netboot tftpserver tftpdir nfsserver monserver nfsdir installnic primarynic discoverynics cmdinterface xcatmaster current_osimage next_osimage nimserver routenames nameservers comments disable)],
cols => [qw(node servicenode netboot tftpserver tftpdir nfsserver monserver nfsdir installnic primarynic discoverynics cmdinterface xcatmaster current_osimage next_osimage nimserver routenames nameservers proxydhcp comments disable)],
keys => [qw(node)],
tablespace =>'XCATTBS16K',
table_desc => 'Resources and settings to use when installing nodes.',
@@ -614,6 +615,7 @@ noderes => {
nimserver => 'Not used for now. The NIM server for this node (as known by this node).',
routenames => 'A comma separated list of route names that refer to rows in the routes table. These are the routes that should be defined on this node when it is deployed.',
nameservers => 'An optional node/group specific override for name server list. Most people want to stick to site or network defined nameserver configuration.',
proxydhcp => 'To specify whether the node supports proxydhcp protocol. Valid values: yes or 1, no or 0. Default value is yes.',
comments => 'Any user-written notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
},
@@ -731,6 +733,21 @@ linuximage => {
disable => "Set to 'yes' or '1' to comment out this row.",
},
},
winimage => {
cols => [qw(imagename template installto partitionfile winpepath comments disable)],
keys => [qw(imagename)],
tablespace =>'XCATTBS32K',
table_desc => 'Information about a Windows operating system image that can be used to deploy cluster nodes.',
descriptions => {
imagename => 'The name of this xCAT OS image definition.',
template => 'The fully qualified name of the template file that is used to create the windows unattend.xml file for diskful installation.',
installto => 'The disk and partition that the Windows will be deployed to. The valid format is <disk>:<partition>. If not set, default value is 0:1 for bios boot mode(legacy) and 0:3 for uefi boot mode; If setting to 1, it means 1:1 for bios boot and 1:3 for uefi boot',
partitionfile => 'The path of partition configuration file. Since the partition configuration for bios boot mode and uefi boot mode are different, this configuration file should include two parts if customer wants to support both bios and uefi mode. If customer just wants to support one of the modes, specify one of them anyway. Example of partition configuration file: [BIOS]xxxxxxx[UEFI]yyyyyyy. To simplify the setting, you also can set installto in partitionfile with section likes [INSTALLTO]0:1',
winpepath => 'The path of winpe which will be used to boot this image. If the real path is /tftpboot/winboot/winpe1/, the value for winpepath should be set to winboot/winpe1',
comments => 'Any user-written notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
}
},
passwd => {
cols => [qw(key username password cryptmethod authdomain comments disable)],
keys => [qw(key username)],
@@ -820,13 +837,13 @@ ppchcp => {
},
},
servicenode => {
cols => [qw(node nameserver dhcpserver tftpserver nfsserver conserver monserver ldapserver ntpserver ftpserver nimserver ipforward dhcpinterfaces comments disable)],
cols => [qw(node nameserver dhcpserver tftpserver nfsserver conserver monserver ldapserver ntpserver ftpserver nimserver ipforward dhcpinterfaces proxydhcp comments disable)],
keys => [qw(node)],
tablespace =>'XCATTBS16K',
table_desc => 'List of all Service Nodes and services that will be set up on the Service Node.',
descriptions => {
node => 'The hostname of the service node as known by the Management Node.',
nameserver => 'Do we set up DNS on this service node? Valid values:yes or 1, no or 0. If yes, creates named.conf file with forwarding to the management node and starts named. If no or 0, it does not change the current state of the service. ',
nameserver => 'Do we set up DNS on this service node? Valid values: 2, 1, no or 0. If 2, creates named.conf as dns slave, using the management node as dns master, and starts named. If 1, creates named.conf file with forwarding to the management node and starts named. If no or 0, it does not change the current state of the service. ',
dhcpserver => 'Do we set up DHCP on this service node? Not supported on AIX. Valid values:yes or 1, no or 0. If yes, runs makedhcp -n. If no or 0, it does not change the current state of the service. ',
tftpserver => 'Do we set up TFTP on this service node? Not supported on AIX. Valid values:yes or 1, no or 0. If yes, configures and starts atftp. If no or 0, it does not change the current state of the service. ',
nfsserver => 'Do we set up file services (HTTP,FTP,or NFS) on this service node? For AIX will only setup NFS, not HTTP or FTP. Valid values:yes or 1, no or 0.If no or 0, it does not change the current state of the service. ',
@@ -838,6 +855,7 @@ servicenode => {
nimserver => 'Not used. Do we set up a NIM server on this service node? Valid values:yes or 1, no or 0. If no or 0, it does not change the current state of the service.',
ipforward => 'Do we set up ip forwarding on this service node? Valid values:yes or 1, no or 0. If no or 0, it does not change the current state of the service.',
dhcpinterfaces => 'The network interfaces DHCP server should listen on for the target node. This attribute can be used for management node and service nodes. If defined, it will override the values defined in site.dhcpinterfaces. This is a comma separated list of device names. !remote! indicates a non-local network for relay DHCP. For example: !remote!,eth0,eth1',
proxydhcp => 'Do we set up proxydhcp service on this node? valid values: yes or 1, no or 0. If yes, the proxydhcp daemon will be enabled on this node.',
comments => 'Any user-written notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
@@ -846,35 +864,44 @@ servicenode => {
site => {
cols => [qw(key value comments disable)],
keys => [qw(key)],
table_desc => "Global settings for the whole cluster. This table is different from the \nother tables in that each attribute is just named in the key column, rather \nthan having a separate column for each attribute. The following is a list of \nthe attributes currently used by xCAT.\n",
table_desc => "Global settings for the whole cluster. This table is different from the \nother tables in that each attribute is just named in the key column, rather \nthan having a separate column for each attribute. The following is a list of \nattributes currently used by xCAT organized into categories.\n",
descriptions => {
# Do not put description text past column 88, so it displays well in a 100 char wide window.
# ----------------------------------------------------------------------------------|----------
key => "Attribute Name: Description\n\n".
" auditskipcmds: List of commands and/or client types that will not be written to the auditlog table.\n".
" ------------\n".
"AIX ATTRIBUTES\n".
" ------------\n".
" nimprime : The name of NIM server, if not set default is the AIX MN.
If Linux MN, then must be set for support of mixed cluster (TBD).\n\n".
" useSSHonAIX: (yes/1 or no/0). Default is yes. The support for rsh/rcp is deprecated.\n".
" useNFSv4onAIX: (yes/1 or no/0). If yes, NFSv4 will be used with NIM. If no,\n".
" NFSv3 will be used with NIM. Default is no.\n\n".
" -----------------\n".
"DATABASE ATTRIBUTES\n".
" -----------------\n".
" auditskipcmds: List of commands and/or client types that will not be\n".
" written to the auditlog table.\n".
" 'ALL' means all cmds will be skipped. If attribute is null, all\n".
" commands will be written.\n".
" clienttype:web would skip all commands from the web client\n".
" For example: tabdump,nodels,clienttype:web \n".
" will not log tabdump,nodels and any web client commands.\n\n".
" blademaxp: The maximum number of concurrent processes for blade hardware control.\n\n".
" cleanupxcatpost: (yes/1 or no/0). Set to 'yes' or '1' to clean up the /xcatpost\n".
" directory on the stateless and statelite nodes after the\n".
" postscripts are run. Default is no.\n\n".
" consoleondemand: When set to 'yes', conserver connects and creates the console\n".
" output only when the user opens the console. Default is no on\n".
" Linux, yes on AIX.\n\n".
" databaseloc: Directory where we create the db instance directory.\n".
" Default is /var/lib. Only DB2 is currently supported.\n".
" Do not use the directory in the site.installloc or\n".
" installdir attribute. This attribute must not be changed\n".
" once db2sqlsetup script has been run and DB2 has been setup.\n\n".
" db2installloc: The location which the service nodes should mount for\n".
" the db2 code to install. Format is hostname:/path. If hostname is\n".
" omitted, it defaults to the management node. Default is /mntdb2.\n\n".
" defserialflow: The default serial flow - currently only used by the mknb command.\n\n".
" defserialport: The default serial port - currently only used by mknb.\n\n".
" defserialspeed: The default serial speed - currently only used by mknb.\n\n".
" excludenodes: A set of comma separated nodes and/or groups that would automatically\n".
" be subtracted from any noderange, it can be used for excluding some\n".
" failed nodes for any xCAT commands. See the 'noderange' manpage for\n".
" details on supported formats.\n\n".
" nodestatus: If set to 'n', the nodelist.status column will not be updated during\n".
" the node deployment, node discovery and power operations. The default is to update.\n\n".
" skiptables: Comma separated list of tables to be skipped by dumpxCATdb\n\n".
" -------------\n".
"DHCP ATTRIBUTES\n".
" -------------\n".
" dhcpinterfaces: The network interfaces DHCP should listen on. If it is the same\n".
" for all nodes, use a simple comma-separated list of NICs. To\n".
" specify different NICs for different nodes:\n".
@@ -887,59 +914,19 @@ site => {
" disjointdhcps: If set to '1', the .leases file on a service node only contains\n".
" the nodes it manages. The default value is '0'.\n".
" '0' value means include all the nodes in the subnet.\n\n".
" pruneservices: Whether to enable service pruning when noderm is run (i.e.\n".
" removing DHCP entries when noderm is executed)\n\n".
" ------------\n".
"DNS ATTRIBUTES\n".
" ------------\n".
" dnshandler: Name of plugin that handles DNS setup for makedns.\n".
" domain: The DNS domain name used for the cluster.\n\n".
" ea_primary_hmc: The hostname of the HMC that the Integrated Switch Network\n".
" Management Event Analysis should send hardware serviceable\n".
" events to for processing and potentially sending to IBM.\n\n".
" ea_backup_hmc: The hostname of the HMC that the Integrated Switch Network\n".
" Management Event Analysis should send hardware serviceable\n".
" events to if the primary HMC is down.\n\n".
" enableASMI: (yes/1 or no/0). If yes, ASMI method will be used after fsp-api. If no,\n".
" when fsp-api is used, ASMI method will not be used. Default is no.\n\n".
" excludenodes: A set of comma separated nodes and/or groups that would automatically\n".
" be subtracted from any noderange, it can be used for excluding some\n".
" failed nodes for any xCAT commands. See the 'noderange' manpage for\n".
" details on supported formats.\n\n".
" forwarders: The DNS servers at your site that can provide names outside of the\n".
" cluster. The makedns command will configure the DNS on the management\n".
" node to forward requests it does not know to these servers.\n".
" Note that the DNS servers on the service nodes will ignore this value\n".
" and always be configured to forward requests to the management node.\n\n".
" fsptimeout: The timeout, in milliseconds, to use when communicating with FSPs.\n\n".
" genmacprefix: When generating mac addresses automatically, use this manufacturing\n".
" prefix (e.g. 00:11:aa)\n\n".
" genpasswords: Automatically generate random passwords for BMCs when configuring\n".
" them.\n\n".
" httpport: The port number that the booting/installing nodes should contact the\n".
" http server on the MN/SN on. It is your responsibility to configure\n".
" the http server to listen on that port - xCAT will not do that.\n\n".
" installdir: The local directory name used to hold the node deployment packages.\n\n".
" installloc: The location from which the service nodes should mount the \n".
" deployment packages in the format hostname:/path. If hostname is\n".
" omitted, it defaults to the management node. The path must\n".
" match the path in the installdir attribute.\n\n".
" ipmidispatch: Whether or not to send ipmi hw control operations to the service\n".
" node of the target compute nodes. Default is 'y'.\n\n".
" hwctrldispatch: Whether or not to send hw control operations to the service\n".
" node of the target nodes. Default is 'y'.(At present, this attribute\n".
" is only used for IBM Flex System)\n\n".
" ipmimaxp: The max # of processes for ipmi hw ctrl. The default is 64. Currently,\n".
" this is only used for HP hw control.\n\n".
" ipmiretries: The # of retries to use when communicating with BMCs. Default is 3.\n\n".
" ipmisdrcache: If set to 'no', then the xCAT IPMI support will not cache locally\n".
" the target node's SDR cache to improve performance.\n\n".
" ipmitimeout: The timeout to use when communicating with BMCs. Default is 2.\n".
" This attribute is currently not used.\n\n".
" iscsidir: The path to put the iscsi disks in on the mgmt node.\n\n".
" master: The hostname of the xCAT management node, as known by the nodes.\n\n".
" maxssh: The max # of SSH connections at any one time to the hw ctrl point for PPC\n".
" This parameter doesn't take effect on the rpower command.\n".
" It takes effects on other PPC hardware control command\n".
" getmacs/rnetboot/rbootseq and so on. Default is 8.\n\n".
" mnroutenames: The name of the routes to be setup on the management node.\n".
" It is a comma separated list of route names that are defined in the\n".
" routes table.\n\n".
" nameservers: A comma delimited list of DNS servers that each node in the cluster\n".
" should use. This value will end up in the nameserver settings of the\n".
" /etc/resolv.conf on each node. It is common (but not required) to set\n".
@@ -949,18 +936,35 @@ site => {
" \"<xcatmaster>\" to mean the DNS server for each node should be the\n".
" node that is managing it (either its service node or the management\n".
" node).\n\n".
" nimprime : The name of NIM server, if not set default is the AIX MN.
If Linux MN, then must be set for support of mixed cluster (TBD).\n\n".
" nodestatus: If set to 'n', the nodelist.status column will not be updated during\n".
" the node deployment, node discovery and power operations. The default is to update.\n\n".
" ntpservers: A comma delimited list of NTP servers for the cluster - often the\n".
" xCAT management node.\n\n".
" runbootscripts: If set to 'yes' the scripts listed in the postbootscripts\n".
" attribute in the osimage and postscripts tables will be run during\n".
" each reboot of stateful (diskful) nodes. This attribute has no\n".
" effect on stateless and statelite nodes. Please run the following\n" .
" command after you change the value of this attribute: \n".
" 'updatenode <nodes> -P setuppostbootscripts'\n\n".
" -------------------------\n".
"HARDWARE CONTROL ATTRIBUTES\n".
" -------------------------\n".
" blademaxp: The maximum number of concurrent processes for blade hardware control.\n\n".
" ea_primary_hmc: The hostname of the HMC that the Integrated Switch Network\n".
" Management Event Analysis should send hardware serviceable\n".
" events to for processing and potentially sending to IBM.\n\n".
" ea_backup_hmc: The hostname of the HMC that the Integrated Switch Network\n".
" Management Event Analysis should send hardware serviceable\n".
" events to if the primary HMC is down.\n\n".
" enableASMI: (yes/1 or no/0). If yes, ASMI method will be used after fsp-api. If no,\n".
" when fsp-api is used, ASMI method will not be used. Default is no.\n\n".
" fsptimeout: The timeout, in milliseconds, to use when communicating with FSPs.\n\n".
" hwctrldispatch: Whether or not to send hw control operations to the service\n".
" node of the target nodes. Default is 'y'.(At present, this attribute\n".
" is only used for IBM Flex System)\n\n".
" ipmidispatch: Whether or not to send ipmi hw control operations to the service\n".
" node of the target compute nodes. Default is 'y'.\n\n".
" ipmimaxp: The max # of processes for ipmi hw ctrl. The default is 64. Currently,\n".
" this is only used for HP hw control.\n\n".
" ipmiretries: The # of retries to use when communicating with BMCs. Default is 3.\n\n".
" ipmisdrcache: If set to 'no', then the xCAT IPMI support will not cache locally\n".
" the target node's SDR cache to improve performance.\n\n".
" ipmitimeout: The timeout to use when communicating with BMCs. Default is 2.\n".
" This attribute is currently not used.\n\n".
" maxssh: The max # of SSH connections at any one time to the hw ctrl point for PPC\n".
" This parameter doesn't take effect on the rpower command.\n".
" It takes effects on other PPC hardware control command\n".
" getmacs/rnetboot/rbootseq and so on. Default is 8.\n\n".
" syspowerinterval: For system p CECs, this is the number of seconds the rpower\n".
" command will wait between performing the action for each CEC.\n".
" For system x IPMI servers, this is the number of seconds the\n".
@@ -987,15 +991,45 @@ site => {
" ppctimeout: The timeout, in milliseconds, to use when communicating with PPC hw\n".
" through HMC. It only takes effect on the hardware control commands\n".
" through HMC. Default is 0.\n\n".
" snmpc: The snmp community string that xcat should use when communicating with the\n".
" switches.\n\n".
" ---------------------------\n".
"INSTALL/DEPLOYMENT ATTRIBUTES\n".
" ---------------------------\n".
" cleanupxcatpost: (yes/1 or no/0). Set to 'yes' or '1' to clean up the /xcatpost\n".
" directory on the stateless and statelite nodes after the\n".
" postscripts are run. Default is no.\n\n".
" db2installloc: The location which the service nodes should mount for\n".
" the db2 code to install. Format is hostname:/path. If hostname is\n".
" omitted, it defaults to the management node. Default is /mntdb2.\n\n".
" defserialflow: The default serial flow - currently only used by the mknb command.\n\n".
" defserialport: The default serial port - currently only used by mknb.\n\n".
" defserialspeed: The default serial speed - currently only used by mknb.\n\n".
" genmacprefix: When generating mac addresses automatically, use this manufacturing\n".
" prefix (e.g. 00:11:aa)\n\n".
" genpasswords: Automatically generate random passwords for BMCs when configuring\n".
" them.\n\n".
" installdir: The local directory name used to hold the node deployment packages.\n\n".
" installloc: The location from which the service nodes should mount the \n".
" deployment packages in the format hostname:/path. If hostname is\n".
" omitted, it defaults to the management node. The path must\n".
" match the path in the installdir attribute.\n\n".
" iscsidir: The path to put the iscsi disks in on the mgmt node.\n\n".
" mnroutenames: The name of the routes to be setup on the management node.\n".
" It is a comma separated list of route names that are defined in the\n".
" routes table.\n\n".
" runbootscripts: If set to 'yes' the scripts listed in the postbootscripts\n".
" attribute in the osimage and postscripts tables will be run during\n".
" each reboot of stateful (diskful) nodes. This attribute has no\n".
" effect on stateless and statelite nodes. Please run the following\n" .
" command after you change the value of this attribute: \n".
" 'updatenode <nodes> -P setuppostbootscripts'\n\n".
" precreatemypostscripts: (yes/1 or no/0). Default is no. If yes, it will \n".
" instruct xCAT at nodeset and updatenode time to query the db once for\n".
" all of the nodes passed into the cmd and create the mypostscript file\n".
" for each node, and put them in a directory of tftpdir(such as: /tftpboot)\n".
" If no, it will not generate the mypostscript file in the tftpdir.\n\n".
" pruneservices: Whether to enable service pruning when noderm is run (i.e.\n".
" removing DHCP entries when noderm is executed)\n\n".
" rsh: This is no longer used. path to remote shell command for xdsh.\n\n".
" rcp: This is no longer used. path to remote copy command for xdcp.\n\n".
" setinstallnic: Set the network configuration for installnic to be static.\n\n".
" sharedtftp: Set to 0 or no, xCAT should not assume the directory\n".
" in tftpdir is mounted on all on Service Nodes. Default is 1/yes.\n".
" If value is set to a hostname, the directory in tftpdir\n".
@@ -1006,18 +1040,30 @@ site => {
" shared filesystem is being used across all service nodes.\n".
" 'all' means that the management as well as the service nodes\n".
" are all using a common shared filesystem. The default is 'no'.\n".
" skiptables: Comma separated list of tables to be skipped by dumpxCATdb\n".
" xcatconfdir: Where xCAT config data is (default /etc/xcat).\n\n".
" --------------------\n".
"REMOTESHELL ATTRIBUTES\n".
" --------------------\n".
" nodesyncfiledir: The directory on the node, where xdcp will rsync the files\n".
" SNsyncfiledir: The directory on the Service Node, where xdcp will rsync the files\n".
" from the MN that will eventually be rsync'd to the compute nodes.\n\n".
" nodesyncfiledir: The directory on the node, where xdcp will rsync the files\n".
" snmpc: The snmp community string that xcat should use when communicating with the\n".
" switches.\n\n".
" sshbetweennodes: Comma separated list of groups to enable passwordless root \n".
" sshbetweennodes: Comma separated list of groups of compute nodes to enable passwordless root \n".
" ssh during install, or xdsh -K. Default is ALLGROUPS.\n".
" Set to NOGROUPS,if you do not wish to enabled any groups.\n".
" Set to NOGROUPS,if you do not wish to enabled any group of compute nodes.\n".
" Service Nodes are not affected by this attribute\n".
" they are always setup with\n".
" passwordless root access to nodes and other SN.\n\n".
" -----------------\n".
"SERVICES ATTRIBUTES\n".
" -----------------\n".
" consoleondemand: When set to 'yes', conserver connects and creates the console\n".
" output only when the user opens the console. Default is no on\n".
" Linux, yes on AIX.\n\n".
" httpport: The port number that the booting/installing nodes should contact the\n".
" http server on the MN/SN on. It is your responsibility to configure\n".
" the http server to listen on that port - xCAT will not do that.\n\n".
" ntpservers: A comma delimited list of NTP servers for the cluster - often the\n".
" xCAT management node.\n\n".
" svloglocal: if set to 1, syslog on the service node will not get forwarded to the\n".
" mgmt node.\n\n".
" timezone: (e.g. America/New_York)\n\n".
@@ -1027,11 +1073,26 @@ site => {
" useNmapfromMN: When set to yes, nodestat command should obtain the node status\n".
" using nmap (if available) from the management node instead of the\n".
" service node. This will improve the performance in a flat network.\n\n".
" useSSHonAIX: (yes/1 or no/0). If yes, ssh/scp will be setup and used. If no, rsh/rcp. The support for rsh/rcp is deprecated.\n".
" vsftp: Default is 'n'. If set to 'y', the xcatd on the mn will automatically\n".
" bring up vsftpd. (You must manually install vsftpd before this.\n".
" This setting does not apply to the service node. For sn\n".
" you need to set servicenode.ftpserver=1 if you want xcatd to\n".
" bring up vsftpd.\n\n".
" -----------------------\n".
"VIRTUALIZATION ATTRIBUTES\n".
" -----------------------\n".
" usexhrm: Have xCAT run its xHRM script when booting up KVM guests to set the\n".
" virtual network bridge up correctly. See\n".
" https://sourceforge.net/apps/mediawiki/xcat/index.php?title=XCAT_Virtualization_with_KVM#Setting_up_a_network_bridge\n\n".
" rsh/rcp will be setup and used on AIX. Default is yes.\n\n".
" vcenterautojoin: When set to no, the VMWare plugin will not attempt to auto remove\n".
" and add hypervisors while trying to perform operations. If users\n".
" or tasks outside of xCAT perform the joining this assures xCAT\n".
" will not interfere.\n\n".
" vmwarereconfigonpower: When set to no, the VMWare plugin will make no effort to\n".
" push vm.cpus/vm.memory updates from xCAT to VMWare.\n\n".
" --------------------\n".
"XCAT DAEMON ATTRIBUTES\n".
" --------------------\n".
" useflowcontrol: (yes/1 or no/0). If yes, the postscript processing on each node\n".
" contacts xcatd on the MN/SN using a lightweight UDP packet to wait\n".
" until xcatd is ready to handle the requests associated with\n".
@@ -1042,28 +1103,14 @@ site => {
" xcatd, and retry. On a new install of xcat, this value will be set to yes.\n".
" See the following document for details:\n".
" https://sourceforge.net/apps/mediawiki/xcat/index.php?title=Hints_and_Tips_for_Large_Scale_Clusters\n\n".
" useNFSv4onAIX: (yes/1 or no/0). If yes, NFSv4 will be used with NIM. If no,\n".
" NFSv3 will be used with NIM. Default is no.\n\n".
" vcenterautojoin: When set to no, the VMWare plugin will not attempt to auto remove\n".
" and add hypervisors while trying to perform operations. If users\n".
" or tasks outside of xCAT perform the joining this assures xCAT\n".
" will not interfere.\n\n".
" vmwarereconfigonpower: When set to no, the VMWare plugin will make no effort to\n".
" push vm.cpus/vm.memory updates from xCAT to VMWare.\n\n".
" vsftp: Default is 'n'. If set to 'y', the xcatd on the mn will automatically\n".
" bring up vsftpd. (You must manually install vsftpd before this.\n".
" This setting does not apply to the service node. For sn\n".
" you need to set servicenode.ftpserver=1 if you want xcatd to\n".
" bring up vsftpd.\n\n".
" xcatconfdir: Where xCAT config data is (default /etc/xcat).\n\n".
" xcatmaxconnections: Number of concurrent xCAT protocol requests before requests\n".
" begin queueing. This applies to both client command requests\n".
" and node requests, e.g. to get postscripts. Default is 64.\n\n".
" xcatmaxbatchconnections: Number of concurrent xCAT connections allowed from the nodes.\n".
" Value must be less than xcatmaxconnections. Default is 50.\n\n".
" xcatdport: The port used by the xcatd daemon for client/server communication.\n\n".
" xcatiport: The port used by xcatd to receive install status updates from nodes.\n\n",
" xcatsslversion: The ssl version by xcatd. Default is SSLv3.\n\n",
" xcatiport: The port used by xcatd to receive install status updates from nodes.\n\n".
" xcatsslversion: The ssl version by xcatd. Default is SSLv3.\n\n".
" xcatsslciphers: The ssl cipher by xcatd. Default is 3DES.\n\n",
value => 'The value of the attribute specified in the "key" column.',
comments => 'Any user-written notes.',
@@ -1143,6 +1190,18 @@ performance => {
disable => "Set to 'yes' or '1' to comment out this row.",
},
},
zone => {
cols => [qw(zonename sshkeydir defaultzone comments disable)],
keys => [qw(zonename)],
table_desc => 'Defines a cluster zone for nodes that share root ssh key access to each other.',
descriptions => {
zonename => 'The name of the zone.',
sshkeydir => 'Directory containing the shared root ssh RSA keys.',
defaultzone => 'If nodes are not assigned to any other zone, they will default to this zone. If value is set to yes or 1.',
comments => 'Any user-provided notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
},
},
eventlog => {
cols => [qw(recid eventtime eventtype monitor monnode node application component id severity message rawdata comments disable)],
@@ -1279,7 +1338,7 @@ firmware => {
},
nics => {
cols => [qw(node nicips nichostnamesuffixes nictypes niccustomscripts nicnetworks nicaliases comments disable)],
cols => [qw(node nicips nichostnamesuffixes nichostnameprefixes nictypes niccustomscripts nicnetworks nicaliases comments disable)],
keys => [qw(node)],
tablespace =>'XCATTBS16K',
table_desc => 'Stores NIC details.',
@@ -1297,6 +1356,13 @@ nics => {
<nic1>!<ext1>|<ext2>,<nic2>!<ext1>|<ext2>,..., for example, eth0!-eth0|-eth0-ipv6,ib0!-ib0|-ib0-ipv6.
The xCAT object definition commands support to use nichostnamesuffixes.<nicname> as the sub attributes.
Note: According to DNS rules a hostname must be a text string up to 24 characters drawn from the alphabet (A-Z), digits (0-9), minus sign (-),and period (.). When you are specifying "nichostnamesuffixes" or "nicaliases" make sure the resulting hostnames will conform to this naming convention',
nichostnameprefixes => 'Comma-separated list of hostname prefixes per NIC.
If only one ip address is associated with each NIC:
<nic1>!<ext1>,<nic2>!<ext2>,..., for example, eth0!eth0-,ib0!ib-
If multiple ip addresses are associcated with each NIC:
<nic1>!<ext1>|<ext2>,<nic2>!<ext1>|<ext2>,..., for example, eth0!eth0-|eth0-ipv6i-,ib0!ib-|ib-ipv6-.
The xCAT object definition commands support to use nichostnameprefixes.<nicname> as the sub attributes.
Note: According to DNS rules a hostname must be a text string up to 24 characters drawn from the alphabet (A-Z), digits (0-9), minus sign (-),and period (.). When you are specifying "nichostnameprefixes" or "nicaliases" make sure the resulting hostnames will conform to this naming convention',
nictypes => 'Comma-separated list of NIC types per NIC. <nic1>!<type1>,<nic2>!<type2>, e.g. eth0!Ethernet,ib0!Infiniband. The xCAT object definition commands support to use nictypes.<nicname> as the sub attributes.',
niccustomscripts => 'Comma-separated list of custom scripts per NIC. <nic1>!<script1>,<nic2>!<script2>, e.g. eth0!configeth eth0, ib0!configib ib0. The xCAT object definition commands support to use niccustomscripts.<nicname> as the sub attribute
.',
@@ -1481,8 +1547,20 @@ mic => {
disable => "Do not use. tabprune will not work if set to yes or 1",
},
},
hwinv => {
cols => [qw(node cputype cpucount memory disksize comments disable)],
keys => [qw(node)],
table_desc => 'The hardware inventory for the node.',
descriptions => {
node => 'The node name or group name.',
cputype => 'The cpu model name for the node.',
cpucount => 'The number of cpus for the node.',
memory => 'The size of the memory for the node.',
disksize => 'The size of the disks for the node.',
comments => 'Any user-provided notes.',
disable => "Set to 'yes' or '1' to comment out this row.",
},
},
); # end of tabspec definition
@@ -1560,6 +1638,7 @@ foreach my $tabname (keys(%xCAT::ExtTab::ext_tabspec)) {
rack => { attrs => [], attrhash => {}, objkey => 'rackname' },
osdistro=> { attrs => [], attrhash => {}, objkey => 'osdistroname' },
osdistroupdate=> { attrs => [], attrhash => {}, objkey => 'osupdatename' },
zone=> { attrs => [], attrhash => {}, objkey => 'zonename' },
);
@@ -1628,6 +1707,11 @@ my @nodeattrs = (
tabentry => 'noderes.monserver',
access_tabentry => 'noderes.node=attr:node',
},
{attr_name => 'supportproxydhcp',
tabentry => 'noderes.proxydhcp',
access_tabentry => 'noderes.node=attr:node',
},
{attr_name => 'kernel',
tabentry => 'bootparams.kernel',
access_tabentry => 'bootparams.node=attr:node',
@@ -1696,6 +1780,10 @@ my @nodeattrs = (
{attr_name => 'setupipforward',
tabentry => 'servicenode.ipforward',
access_tabentry => 'servicenode.node=attr:node',
},
{attr_name => 'setupproxydhcp',
tabentry => 'servicenode.proxydhcp',
access_tabentry => 'servicenode.node=attr:node',
},
# - moserver not used yet
# {attr_name => 'setupmonserver',
@@ -2177,6 +2265,10 @@ my @nodeattrs = (
tabentry => 'nics.nichostnamesuffixes',
access_tabentry => 'nics.node=attr:node',
},
{attr_name => 'nichostnameprefixes',
tabentry => 'nics.nichostnameprefixes',
access_tabentry => 'nics.node=attr:node',
},
{attr_name => 'nictypes',
tabentry => 'nics.nictypes',
access_tabentry => 'nics.node=attr:node',
@@ -2452,6 +2544,25 @@ my @nodeattrs = (
tabentry => 'mic.powermgt',
access_tabentry => 'mic.node=attr:node',
},
#####################
## hwinv table #
#####################
{attr_name => 'cputype',
tabentry => 'hwinv.cputype',
access_tabentry => 'hwinv.node=attr:node',
},
{attr_name => 'cpucount',
tabentry => 'hwinv.cpucount',
access_tabentry => 'hwinv.node=attr:node',
},
{attr_name => 'memory',
tabentry => 'hwinv.memory',
access_tabentry => 'hwinv.node=attr:node',
},
{attr_name => 'disksize',
tabentry => 'hwinv.disksize',
access_tabentry => 'hwinv.node=attr:node',
},
); # end of @nodeattrs that applies to both nodes and groups
@@ -2502,6 +2613,10 @@ my @nodeattrs = (
{attr_name => 'updatestatustime',
tabentry => 'nodelist.updatestatustime',
access_tabentry => 'nodelist.node=attr:node',
},
{attr_name => 'zonename',
tabentry => 'nodelist.zonename',
access_tabentry => 'nodelist.node=attr:node',
},
{attr_name => 'usercomment',
tabentry => 'nodelist.comments',
@@ -2707,6 +2822,29 @@ push(@{$defspec{node}->{'attrs'}}, @nodeattrs);
access_tabentry => 'linuximage.imagename=attr:imagename',
},
####################
# winimage table#
####################
{attr_name => 'template',
only_if => 'imagetype=windows',
tabentry => 'winimage.template',
access_tabentry => 'winimage.imagename=attr:imagename',
},
{attr_name => 'installto',
only_if => 'imagetype=windows',
tabentry => 'winimage.installto',
access_tabentry => 'winimage.imagename=attr:imagename',
},
{attr_name => 'partitionfile',
only_if => 'imagetype=windows',
tabentry => 'winimage.partitionfile',
access_tabentry => 'winimage.imagename=attr:imagename',
},
{attr_name => 'winpepath',
only_if => 'imagetype=windows',
tabentry => 'winimage.winpepath',
access_tabentry => 'winimage.imagename=attr:imagename',
},
####################
# nimimage table#
####################
{attr_name => 'nimtype',
@@ -2933,6 +3071,32 @@ push(@{$defspec{node}->{'attrs'}}, @nodeattrs);
access_tabentry => 'rack.rackname=attr:rackname',
},
);
####################
# zone table #
####################
@{$defspec{zone}->{'attrs'}} = (
{attr_name => 'zonename',
tabentry => 'zone.zonename',
access_tabentry => 'zone.zonename=attr:zonename',
},
{attr_name => 'sshkeydir',
tabentry => 'zone.sshkeydir',
access_tabentry => 'zone.zonename=attr:zonename',
},
{attr_name => 'defaultzone',
tabentry => 'zone.defaultzone',
access_tabentry => 'zone.zonename=attr:zonename',
},
{attr_name => 'usercomment',
tabentry => 'zone.comments',
access_tabentry => 'zone.zonename=attr:zonename',
},
);
#########################
# route data object #
#########################
# routes table #
#########################
#########################
# route data object #
#########################

2
perl-xCAT/xCAT/ServiceNodeUtils.pm Normal file → Executable file
View File

@@ -163,6 +163,8 @@ sub isServiceReq
if (($value eq "1") || ($value eq "YES"))
{
$servicehash->{$service} = "1";
} elsif ($value eq "2") {
$servicehash->{$service} = "2";
} else {
$servicehash->{$service} = "0";
}

View File

@@ -207,6 +207,7 @@ my %usage = (
mkvm noderange [--full]
mkvm noderange [vmcpus=min/req/max] [vmmemory=min/req/max]
[vmphyslots=drc_index1,drc_index2...] [vmothersetting=hugepage:N,bsr:N]
[vmnics=vlan1,vlan2] [vmstorage=<N|viosnode:slotid>] [--vios]
For KVM
mkvm noderange -m|--master mastername -s|--size disksize -f|--force
For zVM
@@ -241,6 +242,8 @@ my %usage = (
chvm <noderange> [lparname=<*|name>]
chvm <noderange> [vmcpus=min/req/max] [vmmemory=min/req/max]
[vmphyslots=drc_index1,drc_index2...] [vmothersetting=hugepage:N,bsr:N]
[vmnics=vlan1,vlan2] [vmstorage=<N|viosnode:slotid>] [--vios]
chvm <noderange> [del_vadapter=slotid]
VMware specific:
chvm <noderange> [-a size][-d disk][-p disk][--resize disk=size][--cpus count][--mem memory]
zVM specific:
@@ -277,7 +280,7 @@ my %usage = (
"lsslp" =>
"Usage: lsslp [-h|--help|-v|--version]
lsslp [<noderange>][-V|--verbose][-i ip[,ip..]][-w][-r|-x|-z][-n][-I][-s FRAME|CEC|MM|IVM|RSA|HMC|CMM|IMM2|FSP]
[-t tries][--vpdtable][-C counts][-T timeout]",
[-u] [--range IPranges][-t tries][--vpdtable][-C counts][-T timeout]",
"rflash" =>
"Usage:
rflash [ -h|--help|-v|--version]

View File

@@ -940,18 +940,15 @@ sub runcmd
my ($class, $cmd, $exitcode, $refoutput, $stream) = @_;
$::RUNCMD_RC = 0;
# redirect stderr to stdout
if (!($cmd =~ /2>&1$/)) { $cmd .= ' 2>&1'; }
my $hostname = `/bin/hostname`;
chomp $hostname;
if ($::VERBOSE)
{
# get this systems name as known by xCAT management node
my $Sname = xCAT::InstUtils->myxCATname();
my $msg;
if ($Sname) {
$msg = "Running command on $Sname: $cmd";
} else {
$msg="Running command: $cmd";
}
if ($::VERBOSE)
{
my $msg="Running command on $hostname: $cmd";
if ($::CALLBACK){
my $rsp = {};
@@ -960,7 +957,7 @@ sub runcmd
} else {
xCAT::MsgUtils->message("I", "$msg\n");
}
}
}
my $outref = [];
if (!defined($stream) || (length($stream) == 0)) { # do not stream
@@ -3372,6 +3369,51 @@ sub filter_nostatusupdate{
}
}
}
}
sub version_cmp {
my $ver_a = shift;
if ($ver_a =~ /xCAT::Utils/)
{
$ver_a = shift;
}
my $ver_b = shift;
my @array_a = ($ver_a =~ /([-.]|\d+|[^-.\d]+)/g);
my @array_b = ($ver_b =~ /([-.]|\d+|[^-.\d]+)/g);
my ($a, $b);
my $len_a = @array_a;
my $len_b = @array_b;
my $len = $len_a;
if ( $len_b < $len_a ) {
$len = $len_b;
}
for ( my $i = 0; $i < $len; $i++ ) {
$a = $array_a[$i];
$b = $array_b[$i];
if ($a eq $b) {
next;
} elsif ( $a eq '-' ) {
return -1;
} elsif ( $b eq '-') {
return 1;
} elsif ( $a eq '.' ) {
return -1;
} elsif ( $b eq '.' ) {
return 1;
} elsif ($a =~ /^\d+$/ and $b =~ /^\d+$/) {
if ($a =~ /^0/ || $b =~ /^0/) {
return ($a cmp $b);
} else {
return ($a <=> $b);
}
} else {
$a = uc $a;
$b = uc $b;
return ($a cmp $b);
}
}
return ( $len_a <=> $len_b )
}
1;

329
perl-xCAT/xCAT/Zone.pm Normal file
View File

@@ -0,0 +1,329 @@
#!/usr/bin/env perl
# IBM(c) 2007 EPL license http://www.eclipse.org/legal/epl-v10.html
package xCAT::Zone;
BEGIN
{
$::XCATROOT = $ENV{'XCATROOT'} ? $ENV{'XCATROOT'} : '/opt/xcat';
}
# if AIX - make sure we include perl 5.8.2 in INC path.
# Needed to find perl dependencies shipped in deps tarball.
if ($^O =~ /^aix/i) {
unshift(@INC, qw(/usr/opt/perl5/lib/5.8.2/aix-thread-multi /usr/opt/perl5/lib/5.8.2 /usr/opt/perl5/lib/site_perl/5.8.2/aix-thread-multi /usr/opt/perl5/lib/site_perl/5.8.2));
}
use lib "$::XCATROOT/lib/perl";
# do not put a use or require for xCAT::Table here. Add to each new routine
# needing it to avoid reprocessing of user tables ( ExtTab.pm) for each command call
use POSIX qw(ceil);
use File::Path;
use Socket;
use strict;
use Symbol;
use warnings "all";
#--------------------------------------------------------------------------------
=head1 xCAT::Zone
=head2 Package Description
This program module file, is a set of Zone utilities used by xCAT *zone commands.
=cut
#--------------------------------------------------------------------------------
=head3 genSSHRootKeys
Arguments:
callback for error messages
directory in which to put the ssh RSA keys
zonename
rsa private key to use for generation ( optional)
Returns:
Error: 1 - key generation failure.
Example:
$rc =xCAT::Zone->genSSHRootKeys($callback,$keydir,$rsakey);
=cut
#--------------------------------------------------------------------------------
sub genSSHRootKeys
{
my ($class, $callback, $keydir,$zonename,$rsakey) = @_;
#
# create /keydir if needed
#
if (!-d $keydir)
{
my $cmd = "/bin/mkdir -m 700 -p $keydir";
my $output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] =
"Could not create $keydir directory";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
#
# create /install/postscripts/_ssh/zonename if needed
#
my $installdir = xCAT::TableUtils->getInstallDir(); # get installdir
if (!-d "$installdir/postscripts/_ssh/$zonename")
{
my $cmd = "/bin/mkdir -m 755 -p $installdir/postscripts/_ssh/$zonename";
my $output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could not create $installdir/postscripts/_ssh/$zonename directory.";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
#need to gen a new rsa key for root for the zone
my $pubfile = "$keydir/id_rsa.pub";
my $pvtfile = "$keydir/id_rsa";
# if exists, remove the old files
if (-r $pubfile)
{
my $cmd = "/bin/rm $keydir/id_rsa*";
my $output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could not remove id_rsa files from $keydir directory.";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
# gen new RSA keys
my $cmd;
my $output;
# if private key was input use it
if (defined ($rsakey)) {
$cmd="/usr/bin/ssh-keygen -y -f $rsakey > $pubfile";
$output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could not generate $pubfile from $rsakey";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
# now copy the private key into the directory
$cmd="cp $rsakey $keydir";
$output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could not run $cmd";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
} else { # generate all new keys
$cmd = "/usr/bin/ssh-keygen -t rsa -q -b 2048 -N '' -f $pvtfile";
$output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could not generate $pubfile";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
#make sure permissions are correct
$cmd = "chmod 644 $pubfile;chown root $pubfile";
$output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] = "Could set permission and owner on $pubfile";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
# copy authorized_keys for install on node
if (-r $pubfile)
{
my $cmd =
"/bin/cp -p $pubfile $installdir/postscripts/_ssh/$zonename ";
my $output = xCAT::Utils->runcmd("$cmd", 0);
if ($::RUNCMD_RC != 0)
{
my $rsp = {};
$rsp->{error}->[0] =
"Could not copy $pubfile to $installdir/postscripts/_ssh/$zonename";
xCAT::MsgUtils->message("E", $rsp, $callback);
return 1;
}
}
else
{
my $rsp = {};
$rsp->{error}->[0] =
"Could not copy $pubfile to $installdir/postscripts/_ssh/$zonename, because $pubfile does not exist.";
xCAT::MsgUtils->message("E", $rsp, $callback);
}
}
#--------------------------------------------------------------------------------
=head3 getdefaultzone
Arguments:
None
Returns:
Name of the current default zone from the zone table
Example:
my $defaultzone =xCAT::Zone->getdefaultzone();
=cut
#--------------------------------------------------------------------------------
sub getdefaultzone
{
my ($class, $callback) = @_;
my $defaultzone;
# read all the zone table and find the defaultzone, if it exists
my $tab = xCAT::Table->new("zone");
if ($tab){
my @zones = $tab->getAllAttribs('zonename','defaultzone');
foreach my $zone (@zones) {
# Look for the defaultzone=yes/1 entry
if ((defined($zone->{defaultzone})) &&
(($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} = "1"))) {
$defaultzone = $zone->{zonename};
}
$tab->close();
}
} else {
my $rsp = {};
$rsp->{error}->[0] =
"Error reading the zone table. ";
xCAT::MsgUtils->message("E", $rsp, $callback);
}
return $defaultzone;
}
#--------------------------------------------------------------------------------
=head3 iszonedefined
Arguments:
zonename
Returns:
1 if the zone is already in the zone table.
Example:
xCAT::Zone->iszonedefined($zonename);
=cut
#--------------------------------------------------------------------------------
sub iszonedefined
{
my ($class,$zonename) = @_;
# checks the zone table to see if input zonename already in the table
my $tab = xCAT::Table->new("zone");
my $zone = $tab->getAttribs({zonename => $zonename},'sshkeydir');
$tab->close();
if (defined($zone)) {
return 1;
}else{
return 0;
}
}
#--------------------------------------------------------------------------------
=head3 getzoneinfo
Arguments:
An array of nodes
Returns:
Hash array by zonename point to the nodes in that zonename and sshkeydir
zonename1 -> {nodelist} -> array of nodes in the zone
-> {sshkeydir} -> directory containing ssh RSA keys
-> {defaultzone} -> is it the default zone
Example:
my %zonehash =xCAT::Zone->getNodeZones($nodelist);
Rules:
If the nodes nodelist.zonename attribute is a zonename, it is assigned to that zone
If the nodes nodelist.zonename attribute is undefined:
If there is a defaultzone in the zone table, the node is assigned to that zone
If there is no defaultzone in the zone table, the node is assigned to the ~.ssh keydir
=cut
#--------------------------------------------------------------------------------
sub getzoneinfo
{
my ($class, $callback,$nodes) = @_;
# make the list into an array
# $nodelist=~ s/\s*//g; # remove blanks
# my @nodes = split ',', $nodelist;
my $zonehash;
my $defaultzone;
# read all the zone table
my $zonetab = xCAT::Table->new("zone");
if ($zonetab){
my @zones = $zonetab->getAllAttribs('zonename','sshkeydir','defaultzone');
$zonetab->close();
if (@zones) {
foreach my $zone (@zones) {
my $zonename=$zone->{zonename};
$zonehash->{$zonename}->{sshkeydir}= $zone->{sshkeydir};
$zonehash->{$zonename}->{defaultzone}= $zone->{defaultzone};
# find the defaultzone
if ((defined($zone->{defaultzone})) &&
(($zone->{defaultzone} =~ "yes") || ($zone->{defaultzone} = "1"))) {
$defaultzone = $zone->{zonename};
}
}
}
} else {
my $rsp = {};
$rsp->{error}->[0] =
"Error reading the zone table. ";
xCAT::MsgUtils->message("E", $rsp, $callback);
return;
}
my $nodelisttab = xCAT::Table->new("nodelist");
my $nodehash = $nodelisttab->getNodesAttribs(\@$nodes, ['zonename']);
# for each of the nodes, look up it's zone name and assign to the zonehash
# if the node is a service node, it is assigned to the __xcatzone which gets its keys from
# the ~/.ssh dir no matter what in the database for the zonename.
# If the nodes nodelist.zonename attribute is a zonename, it is assigned to that zone
# If the nodes nodelist.zonename attribute is undefined:
# If there is a defaultzone in the zone table, the node is assigned to that zone
# If there is no defaultzone in the zone table, the node is assigned to the ~.ssh keydir
my @allSN=xCAT::ServiceNodeUtils->getAllSN("ALL"); # read all the servicenodes define
my $xcatzone = "__xcatzone"; # if node is in no zones or a service node, use this one
$zonehash->{$xcatzone}->{sshkeydir}= "~/.ssh";
foreach my $node (@$nodes) {
my $zonename;
if (grep(/^$node$/, @allSN)) { # this is a servicenode, treat special
$zonename=$xcatzone; # always use ~/.ssh directory
} else { # use the nodelist.zonename attribute
$zonename=$nodehash->{$node}->[0]->{zonename};
}
if (defined($zonename)) { # zonename explicitly defined in nodelist.zonename
push @{$zonehash->{$zonename}->{nodes}},$node;
} else { # no explict zonename
if (defined ($defaultzone)) { # there is a default zone in the zone table, use it
push @{$zonehash->{$defaultzone}->{nodes}},$node;
} else { # if no default then use the ~/.ssh keys as the default, put them in the __xcatzone
push @{$zonehash->{$xcatzone}->{nodes}},$node;
}
}
}
return;
}
1;

View File

@@ -49,6 +49,8 @@ require Exporter;
"1328205744.315196" => "rhels5.8", #x86_64
"1354216429.587870" => "rhels5.9", #x86_64
"1354214009.518521" => "rhels5.9", #ppc64
"1378846702.129847" => "rhels5.10", #x86_64
"1378845049.643372" => "rhels5.10", #ppc64
"1285193176.460470" => "rhels6", #x86_64
"1285192093.430930" => "rhels6", #ppc64
"1305068199.328169" => "rhels6.1", #x86_64
@@ -60,6 +62,8 @@ require Exporter;
"1339638991.532890" => "rhels6.3", #i386
"1359576752.435900" => "rhels6.4", #x86_64
"1359576196.686790" => "rhels6.4", #ppc64
"1384196515.415715" => "rhels6.5", #x86_64
"1384198011.520581" => "rhels6.5", #ppc64
"1285193176.593806" => "rhelhpc6", #x86_64
"1305067719.718814" => "rhelhpc6.1",#x86_64
"1321545261.599847" => "rhelhpc6.2",#x86_64
@@ -78,7 +82,7 @@ require Exporter;
"1305315870.828212" => "fedora15", #x86_64 DVD ISO
"1372355769.065812" => "fedora19", #x86_64 DVD ISO
"1372402928.663653" => "fedora19", #ppc64 DVD ISO
"1386856788.124593" => "fedora20", #x86_64 DVD ISO
"1194512200.047708" => "rhas4.6",
"1194512327.501046" => "rhas4.6",
"1241464993.830723" => "rhas4.8", #x86-64

View File

@@ -6,84 +6,275 @@
#include <stdlib.h>
#include <errno.h>
#include <netinet/in.h>
int main() {
int serverfd,port;
int getpktinfo = 1;
struct addrinfo hint, *res;
char cmsg[CMSG_SPACE(sizeof(struct in_pktinfo))];
char clientpacket[1024];
struct sockaddr_in clientaddr;
struct msghdr msg;
struct cmsghdr *cmsgptr;
struct iovec iov[1];
unsigned int myip;
char *txtptr;
iov[0].iov_base = clientpacket;
iov[0].iov_len = 1024;
memset(&msg,0,sizeof(msg));
memset(&clientaddr,0,sizeof(clientaddr));
msg.msg_name=&clientaddr;
msg.msg_namelen = sizeof(clientaddr);
msg.msg_iov = iov;
msg.msg_iovlen = 1;
msg.msg_control=&cmsg;
msg.msg_controllen = sizeof(cmsg);
char bootpmagic[4] = {0x63,0x82,0x53,0x63};
int pktsize;
int doexit=0;
port = 4011;
memset(&hint,0,sizeof(hint));
hint.ai_family = PF_INET; /* Would've done UNSPEC, but it doesn't work right and this is heavily v4 specific anyway */
hint.ai_socktype = SOCK_DGRAM;
hint.ai_flags = AI_PASSIVE;
getaddrinfo(NULL,"4011",&hint,&res);
serverfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (!serverfd) { fprintf(stderr,"That's odd...\n"); }
setsockopt(serverfd,IPPROTO_IP,IP_PKTINFO,&getpktinfo,sizeof(getpktinfo));
if (bind(serverfd,res->ai_addr ,res->ai_addrlen) < 0) {
fprintf(stderr,"Unable to bind 4011");
exit(1);
}
while (!doexit) {
pktsize = recvmsg(serverfd,&msg,0);
if (pktsize < 320) {
continue;
}
if (clientpacket[0] != 1 || memcmp(clientpacket+0xec,bootpmagic,4)) {
continue;
}
for (cmsgptr = CMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg,cmsgptr)) {
if (cmsgptr->cmsg_level == IPPROTO_IP && cmsgptr->cmsg_type == IP_PKTINFO) {
myip = ((struct in_pktinfo*)(CMSG_DATA(cmsgptr)))->ipi_addr.s_addr;
}
}
clientpacket[0] = 2; //change to a reply
myip = htonl(myip); //endian neutral change
clientpacket[0x14] = (myip>>24)&0xff; //maybe don't need to do this, maybe assigning the whole int would be better
clientpacket[0x15] = (myip>>16)&0xff;
clientpacket[0x16] = (myip>>8)&0xff;
clientpacket[0x17] = (myip)&0xff;
txtptr = clientpacket+0x6c;
strncpy(txtptr,"Boot/bootmgfw.efi",128); // keeping 128 in there just in case someone changes the string
clientpacket[0xf0]=0x35; //DHCP MSG type
clientpacket[0xf1]=0x1; // LEN of 1
clientpacket[0xf2]=0x5; //DHCP ACK
clientpacket[0xf3]=0x36; //DHCP server identifier
clientpacket[0xf4]=0x4; //DHCP server identifier length
clientpacket[0xf5] = (myip>>24)&0xff; //maybe don't need to do this, maybe assigning the whole int would be better
clientpacket[0xf6] = (myip>>16)&0xff;
clientpacket[0xf7] = (myip>>8)&0xff;
clientpacket[0xf8] = (myip)&0xff;
clientpacket[0xf9] = 0xfc; // dhcp 252 'proxy', but coopeted by bootmgfw, it's actually suggesting the boot config file
clientpacket[0xfa] = 9; //length of 9
txtptr = clientpacket+0xfb;
strncpy(txtptr,"Boot/BCD",8);
clientpacket[0x103]=0;
clientpacket[0x104]=0xff;
sendto(serverfd,clientpacket,pktsize,0,(struct sockaddr*)&clientaddr,sizeof(clientaddr));
}
#include <signal.h>
#include <syslog.h>
// the chunk size for each alloc
int chunknum = 200;
int doreload = 0;
int verbose = 0;
char logmsg[50];
// the struct to store the winpe configuration for each node
struct nodecfg {
char node[50];
char data[150];
};
char *data = NULL; // the ptr to the array of all node config
int nodenum = 0;
// trigger the main program to reload configuration file
void reload(int sig) {
doreload = 1;
}
// the subroutine which is used to load configuration from
// /var/lib/xcat/proxydhcp.cfg to *data
void loadcfg () {
nodenum = 0;
free(data);
data = NULL;
doreload = 0;
char *dp = NULL;
FILE *fp;
fp = fopen("/var/lib/xcat/proxydhcp.cfg", "r");
if (fp) {
int num = chunknum;
int rtime = 1;
while (num == chunknum) {
// realloc the chunknum size of memory each to save memory usage
data = realloc(data, sizeof(struct nodecfg) * chunknum * rtime);
if (NULL == data) {
fprintf (stderr, "Cannot get enough memory.\n");
free (data);
return;
}
dp = data + sizeof(struct nodecfg) * chunknum * (rtime - 1);
memset(dp, 0, sizeof(struct nodecfg) * chunknum);
num = fread(dp, sizeof (struct nodecfg), chunknum, fp);
nodenum += num;
rtime++;
}
fclose(fp);
}
}
// get the path of winpe from configuration file which is stored in *data
char *getwinpepath(char *node) {
int i;
struct nodecfg *nc = (struct nodecfg *)data;
for (i=0; i<nodenum;i++) {
if (0 == strcmp(nc->node, node)) {
return nc->data;
}
nc++;
}
return NULL;
}
int main(int argc, char *argv[]) {
int i;
for(i = 0; i < argc; i++)
{
if (strcmp(argv[i], "-V") == 0) {
verbose = 1;
setlogmask(LOG_UPTO(LOG_DEBUG));
openlog("proxydhcp", LOG_NDELAY, LOG_LOCAL0);
}
}
// regist my pid to /var/run/xcat/proxydhcp.pid
int pid = getpid();
FILE *pidf = fopen ("/var/run/xcat/proxydhcp.pid", "w");
if (pidf) {
fprintf(pidf, "%d", pid);
fclose (pidf);
} else {
fprintf (stderr, "Cannot open /var/run/xcat/proxydhcp.pid\n");
return 1;
}
// load configuration at first start
loadcfg();
// regist signal SIGUSR1 for triggering reload configuration from outside
struct sigaction sigact;
sigact.sa_handler = &reload;
sigaction(SIGUSR1, &sigact, NULL);
int serverfd,port;
int getpktinfo = 1;
struct addrinfo hint, *res;
char cmsg[CMSG_SPACE(sizeof(struct in_pktinfo))];
char clientpacket[1024];
struct sockaddr_in clientaddr;
struct msghdr msg;
struct cmsghdr *cmsgptr;
struct iovec iov[1];
unsigned int myip, clientip;
char *txtptr;
iov[0].iov_base = clientpacket;
iov[0].iov_len = 1024;
memset(&msg,0,sizeof(msg));
memset(&clientaddr,0,sizeof(clientaddr));
msg.msg_name=&clientaddr;
msg.msg_namelen = sizeof(clientaddr);
msg.msg_iov = iov;
msg.msg_iovlen = 1;
msg.msg_control=&cmsg;
msg.msg_controllen = sizeof(cmsg);
char defaultwinpe[20] = "Boot/bootmgfw.efi";
char bootpmagic[4] = {0x63,0x82,0x53,0x63};
int pktsize;
int doexit=0;
port = 4011;
memset(&hint,0,sizeof(hint));
hint.ai_family = PF_INET; /* Would've done UNSPEC, but it doesn't work right and this is heavily v4 specific anyway */
hint.ai_socktype = SOCK_DGRAM;
hint.ai_flags = AI_PASSIVE;
getaddrinfo(NULL,"4011",&hint,&res);
serverfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (!serverfd) { fprintf(stderr,"That's odd...\n"); }
setsockopt(serverfd,IPPROTO_IP,IP_PKTINFO,&getpktinfo,sizeof(getpktinfo));
if (bind(serverfd,res->ai_addr ,res->ai_addrlen) < 0) {
fprintf(stderr,"Unable to bind 4011");
exit(1);
}
while (!doexit) {
// use select to wait for the 4011 request packages coming
fd_set fds;
FD_ZERO(&fds);
FD_SET(serverfd, &fds);
struct timeval timeout;
timeout.tv_sec = 30;
timeout.tv_usec = 0;
int rc;
if ((rc = select(serverfd+1,&fds,0,0, &timeout)) <= 0) {
if (doreload) {
loadcfg();
fprintf(stderr, "load in select\n");
}
if (verbose) {syslog(LOG_DEBUG, "reload /var/lib/xcat/proxydhcp.cfg\n");}
continue;
}
if (doreload) {
loadcfg();
if (verbose) {syslog(LOG_DEBUG, "reload /var/lib/xcat/proxydhcp.cfg\n");}
}
pktsize = recvmsg(serverfd,&msg,0);
if (pktsize < 320) {
continue;
}
if (clientpacket[0] != 1 || memcmp(clientpacket+0xec,bootpmagic,4)) {
continue;
}
for (cmsgptr = CMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg,cmsgptr)) {
if (cmsgptr->cmsg_level == IPPROTO_IP && cmsgptr->cmsg_type == IP_PKTINFO) {
myip = ((struct in_pktinfo*)(CMSG_DATA(cmsgptr)))->ipi_addr.s_addr;
}
}
// get the ip of dhcp client
clientip = 0;
int i;
for (i = 0; i< 4; i++) {
clientip = clientip << 8;
clientip += (unsigned char)clientpacket[15-i];
}
// get the winpe path
struct hostent *host = gethostbyaddr(&clientip, sizeof(clientip), AF_INET);
char *winpepath = defaultwinpe;
if (host) {
if (host->h_name) {
// remove the domain part from hostname
char *place = strstr(host->h_name, ".");
if (place) {
*place = '\0';
}
winpepath = getwinpepath(host->h_name);
if (winpepath == NULL) {
winpepath = defaultwinpe;
}
if (verbose) {
sprintf(logmsg, "Received proxydhcp request from %s\n", host->h_name);
syslog(LOG_DEBUG, logmsg);
}
}
} else {
winpepath = defaultwinpe;
}
// get the Vendor class identifier
char *arch = NULL;
unsigned char *p = clientpacket + 0xf0;
while (*p != 0xff && p < (unsigned char *)clientpacket + pktsize) {
if (*p == 60) {
arch = p + 0x11;
break;
} else {
p += *(p+1) + 2;
}
}
char winboot[50]; // the bootload of winpe
memset(winboot, 0, 50);
if (0 == memcmp(arch, "00000", 5)) { // bios boot mode
strcpy(winboot, winpepath);
strcat(winboot, "Boot/pxeboot.0");
} else if (0 == memcmp(arch, "00007", 5)) { // uefi boot mode
strcpy(winboot, winpepath);
strcat(winboot, "Boot/bootmgfw.efi");
}
clientpacket[0] = 2; //change to a reply
myip = htonl(myip); //endian neutral change
clientpacket[0x14] = (myip>>24)&0xff; //maybe don't need to do this, maybe assigning the whole int would be better
clientpacket[0x15] = (myip>>16)&0xff;
clientpacket[0x16] = (myip>>8)&0xff;
clientpacket[0x17] = (myip)&0xff;
txtptr = clientpacket+0x6c;
strncpy(txtptr, winboot ,128); // keeping 128 in there just in case someone changes the string
//strncpy(txtptr,"winboot/new/Boot/bootmgfw.efi",128); // keeping 128 in there just in case someone changes the string
//strncpy(txtptr,"Boot/pxeboot.0",128); // keeping 128 in there just in case someone changes the string
clientpacket[0xf0]=0x35; //DHCP MSG type
clientpacket[0xf1]=0x1; // LEN of 1
clientpacket[0xf2]=0x5; //DHCP ACK
clientpacket[0xf3]=0x36; //DHCP server identifier
clientpacket[0xf4]=0x4; //DHCP server identifier length
clientpacket[0xf5] = (myip>>24)&0xff; //maybe don't need to do this, maybe assigning the whole int would be better
clientpacket[0xf6] = (myip>>16)&0xff;
clientpacket[0xf7] = (myip>>8)&0xff;
clientpacket[0xf8] = (myip)&0xff;
char winBCD[50];
strcpy(winBCD, winpepath);
strcat(winBCD, "Boot/BCD");
clientpacket[0xf9] = 0xfc; // dhcp 252 'proxy', but coopeted by bootmgfw, it's actually suggesting the boot config file
clientpacket[0xfa] = strlen(winBCD) + 1; //length of 9
txtptr = clientpacket+0xfb;
strncpy(txtptr, winBCD, strlen(winBCD));
clientpacket[0xfa + strlen(winBCD) + 1] = 0;
clientpacket[0xfa + strlen(winBCD) + 2] = 0xff;
sendto(serverfd,clientpacket,pktsize,0,(struct sockaddr*)&clientaddr,sizeof(clientaddr));
if (verbose) {
sprintf(logmsg, "Path of bootloader:%s. Path of BCD:%s\n", winboot, winBCD);
syslog(LOG_DEBUG, logmsg);
}
}
if (verbose) { closelog();}
}

View File

@@ -14,7 +14,7 @@
# postscript (stateful install) or with the otherpkgs processing of
# genimage (stateless/statelite install). This script will install any
# gpfs update rpms that exist on the xCAT management node in the
# /install/post/gpfs_updates directory.
# /install/post/otherpkgs/gpfs_updates directory.
# This is necessary because the GPFS updates can ONLY be installed
# after the base rpms have been installed, and the update rpms cannot
# exist in any rpm repositories used by xCAT otherpkgs processing

View File

@@ -91,7 +91,9 @@ template "/etc/quantum/policy.json" do
group node["openstack"]["network"]["platform"]["group"]
mode 00644
notifies :restart, "service[quantum-server]", :delayed
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::server")
notifies :restart, "service[quantum-server]", :delayed
end
end
rabbit_server_role = node["openstack"]["network"]["rabbit_server_chef_role"]
@@ -143,12 +145,14 @@ end
# may just be running a subset of agents (like l3_agent)
# and not the api server components, so we ignore restart
# failures here as there may be no quantum-server process
service "quantum-server" do
service_name platform_options["quantum_server_service"]
supports :status => true, :restart => true
ignore_failure true
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::server")
service "quantum-server" do
service_name platform_options["quantum_server_service"]
supports :status => true, :restart => true
ignore_failure true
action :nothing
action :nothing
end
end
template "/etc/quantum/quantum.conf" do
@@ -166,7 +170,9 @@ template "/etc/quantum/quantum.conf" do
:service_pass => service_pass
)
notifies :restart, "service[quantum-server]", :delayed
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::server")
notifies :restart, "service[quantum-server]", :delayed
end
end
template "/etc/quantum/api-paste.ini" do
@@ -179,7 +185,9 @@ template "/etc/quantum/api-paste.ini" do
"service_pass" => service_pass
)
notifies :restart, "service[quantum-server]", :delayed
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::server")
notifies :restart, "service[quantum-server]", :delayed
end
end
directory "/etc/quantum/plugins/#{main_plugin}" do
@@ -336,7 +344,9 @@ when "openvswitch"
:sql_connection => sql_connection,
:local_ip => local_ip
)
notifies :restart, "service[quantum-server]", :delayed
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::server")
notifies :restart, "service[quantum-server]", :delayed
end
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-network::openvswitch")
notifies :restart, "service[quantum-plugin-openvswitch-agent]", :delayed
end

View File

@@ -9,31 +9,31 @@
"ref": "f759cd013c0a836f2acb219b3e006ff0a1308878"
},
"memcached": {
"locked_version": "1.4.0"
"locked_version": "1.6.2"
},
"runit": {
"locked_version": "1.1.6"
"locked_version": "1.3.0"
},
"build-essential": {
"locked_version": "1.4.0"
"locked_version": "1.4.2"
},
"yum": {
"locked_version": "2.3.0"
"locked_version": "2.4.0"
},
"sysctl": {
"locked_version": "0.3.3"
},
"apt": {
"locked_version": "2.1.0"
"locked_version": "2.3.0"
},
"git": {
"locked_version": "2.5.2"
"locked_version": "2.7.0"
},
"dmg": {
"locked_version": "1.1.0"
"locked_version": "2.0.4"
},
"windows": {
"locked_version": "1.10.0"
"locked_version": "1.11.0"
},
"chef_handler": {
"locked_version": "1.1.4"

View File

@@ -0,0 +1,46 @@
# CHANGELOG for cookbook-openstack-object-storage
This file is used to list changes made in each version of cookbook-openstack-object-storage.
## 7.1.0:
* Update apt sources to grizzly to prepare for grizzly
and havana branches
## 7.0.11:
* Add missing swift-container-sync upstart service which is
not setup by default in ubuntu 12.04 packages
## 7.0.10:
* Do not role restrict super_admin_key in proxy config
* Case correct swauth_version attribute in proxy recipe
* Treat platform_options["swauth_packages"] as a list
## 7.0.9:
* Bugfix tempurl role restriction
## 7.0.8:
* Bugfix allow_override spacing in proxy server template
## 7.0.7:
* Add flexibility to middleware pipeline
## 7.0.6:
* Add choice of install python-swauth from git or package
## 7.0.5:
* Add support for container-sync
## 7.0.4:
* Allow roles used in searches to be defined by cookbook user
## 7.0.3:
* Bugfix the swift-ring-builder output scanner
## 7.0.2:
* Expand statsd support as well as capacity and recon supporting.
## 7.0.1:
* Support more then 24 disks (/dev/sdaa, /dev/vdab, etc)
## 7.0.0:
* Initial openstack object storage cookbook

View File

@@ -63,6 +63,14 @@ Attributes
* ```default[:swift][:authmode]``` - "swauth" or "keystone" (default "swauth"). Right now, only swauth is supported (defaults to swauth)
* ```default[:swift][:tempurl]``` - "true" or "false". Adds tempurl to the pipeline and sets allow_overrides to true when using swauth
* ```default[:swift][:swauth_source]``` - "git" or "package"(default). Selects between installing python-swauth from git or system package
* ```default[:swift][:swauth_repository]``` - Specifies git repo. Default "https://github.com/gholt/swauth.git"
* ```default[:swift][:swauth_version]``` - Specifies git repo tagged branch. Default "1.0.8"
* ```default[:swift][:swift_secret_databag_name]``` - this cookbook supports an optional secret databag where we will retrieve the following attributes overriding any default attributes below. (defaults to nil)
```
@@ -249,7 +257,7 @@ License and Author
| | |
|:---------------------|:---------------------------------------------------|
| **Authors** | Alan Meadows (<alan.meadows@gmail.com>) |
| | Oisin Feely (<of3434@att.com>) |
| | Oisin Feeley (<of3434@att.com>) |
| | Ron Pedde (<ron.pedde@rackspace.com>) |
| | Will Kelly (<will.kelly@rackspace.com>) |
| | |

View File

@@ -11,7 +11,7 @@ default["swift"]["git_builder_ip"] = "127.0.0.1"
# the release only has any effect on ubuntu, and must be
# a valid release on http://ubuntu-cloud.archive.canonical.com/ubuntu
default["swift"]["release"] = "folsom"
default["swift"]["release"] = "grizzly"
# we support an optional secret databag where we will retrieve the
# following attributes overriding any default attributes here
@@ -25,6 +25,17 @@ default["swift"]["release"] = "folsom"
# }
default["swift"]["swift_secret_databag_name"] = nil
#--------------------
# roles
#--------------------
default["swift"]["setup_chef_role"] = "swift-setup"
default["swift"]["management_server_chef_role"] = "swift-management-server"
default["swift"]["proxy_server_chef_role"] = "swift-proxy-server"
default["swift"]["object_server_chef_role"] = "swift-object-server"
default["swift"]["account_server_chef_role"] = "swift-account-server"
default["swift"]["container_server_chef_role"] = "swift-container-server"
#--------------------
# authentication
#--------------------
@@ -53,7 +64,40 @@ default["swift"]["ring"]["replicas"] = 3
#------------------
# statistics
#------------------
default["swift"]["enable_statistics"] = true
default["swift"]["statistics"]["enabled"] = true
default["swift"]["statistics"]["sample_rate"] = 1
# there are two ways to discover your graphite server ip for
# statsd to periodically publish to. You can directly set
# the ip below, or leave it set to nil and supply chef with
# the role name of your graphite server and the interface
# name to retrieve the appropriate internal ip address from
#
# if no servers with the role below can be found then
# 127.0.0.1 will be used
default["swift"]["statistics"]["graphing_ip"] = nil
default["swift"]["statistics"]["graphing_role"] = 'graphite-role'
default["swift"]["statistics"]["graphing_interface"] = 'eth0'
# how frequently to run chef instantiated /usr/local/bin/swift_statsd_publish.py
# which publishes dispersion and recon statistics (in minutes)
default["swift"]["statistics"]["report_frequency"] = 15
# enable or disable specific portions of generated report
default["swift"]["statistics"]["enable_dispersion_report"] = true
default["swift"]["statistics"]["enable_recon_report"] = true
default["swift"]["statistics"]["enable_disk_report"] = true
# settings for statsd which should be configured to use the local
# statsd daemon that chef will install if statistics are enabled
default["swift"]["statistics"]["statsd_host"] = "127.0.0.1"
default["swift"]["statistics"]["statsd_port"] = "8125"
default["swift"]["statistics"]["statsd_prefix"] = "openstack.swift"
# paths to the recon cache files
default["swift"]["statistics"]["recon_account_cache"] = "/var/cache/swift/account.recon"
default["swift"]["statistics"]["recon_container_cache"] = "/var/cache/swift/container.recon"
default["swift"]["statistics"]["recon_object_cache"] = "/var/cache/swift/object.recon"
#------------------
# network settings
@@ -109,11 +153,52 @@ default["swift"]["disk_test_filter"] = [ "candidate =~ /(sd|hd|xvd|vd)(?!a$)[a-z
"not system('/sbin/parted /dev/' + candidate + ' -s print | grep linux-swap')",
"not info.has_key?('removable') or info['removable'] == 0.to_s" ]
#-------------------
# template overrides
#-------------------
# proxy-server
# override in a wrapper to enable tempurl with swauth
default["swift"]["tempurl"]["enabled"] = false
# container-server
# Override this with an allowed list of your various swift clusters if you wish
# to enable container sync for your end-users between clusters. This should
# be an array of fqdn hostnames for the cluster end-points that your end-users
# would access in the format of ['host1', 'host2', 'host3']
default["swift"]["container-server"]["allowed_sync_hosts"] = []
# container-sync logging settings
default["swift"]["container-server"]["container-sync"]["log_name"] = 'container-sync'
default["swift"]["container-server"]["container-sync"]["log_facility"] = 'LOG_LOCAL0'
default["swift"]["container-server"]["container-sync"]["log_level"] = 'INFO'
# If you need to use an HTTP Proxy, set it here; defaults to no proxy.
default["swift"]["container-server"]["container-sync"]["sync_proxy"] = nil
# Will sync, at most, each container once per interval (in seconds)
default["swift"]["container-server"]["container-sync"]["interval"] = 300
# Maximum amount of time to spend syncing each container per pass (in seconds)
default["swift"]["container-server"]["container-sync"]["container_time"] = 60
#------------------
# swauth source
# -----------------
# Versions of swauth in Ubuntu Cloud Archive PPA can be outdated. This
# allows us to chose to install directly from a tagged branch of
# gholt's repository.
# values: package, git
default["swift"]["swauth_source"] = "package"
default["swift"]["swauth_repository"] = "https://github.com/gholt/swauth.git"
default["swift"]["swauth_version"] = "1.0.8"
#------------------
# packages
#------------------
# Leveling between distros
case platform
when "redhat"
@@ -132,7 +217,8 @@ when "redhat"
"git_dir" => "/var/lib/git",
"git_service" => "git",
"service_provider" => Chef::Provider::Service::Redhat,
"override_options" => ""
"override_options" => "",
"swift_statsd_publish" => "/usr/bin/swift-statsd-publish.py"
}
#
# python-iso8601 is a missing dependency for swift.
@@ -153,7 +239,8 @@ when "centos"
"git_dir" => "/var/lib/git",
"git_service" => "git",
"service_provider" => Chef::Provider::Service::Redhat,
"override_options" => ""
"override_options" => "",
"swift_statsd_publish" => "/usr/bin/swift-statsd-publish.py"
}
when "fedora"
default["swift"]["platform"] = {
@@ -171,7 +258,8 @@ when "fedora"
"git_dir" => "/var/lib/git",
"git_service" => "git",
"service_provider" => Chef::Provider::Service::Systemd,
"override_options" => ""
"override_options" => "",
"swift_statsd_publish" => "/usr/bin/swift-statsd-publish.py"
}
when "ubuntu"
default["swift"]["platform"] = {
@@ -189,6 +277,7 @@ when "ubuntu"
"git_dir" => "/var/cache/git",
"git_service" => "git-daemon",
"service_provider" => Chef::Provider::Service::Upstart,
"override_options" => "-o Dpkg::Options:='--force-confold' -o Dpkg::Option:='--force-confdef'"
"override_options" => "-o Dpkg::Options:='--force-confold' -o Dpkg::Option:='--force-confdef'",
"swift_statsd_publish" => "/usr/local/bin/swift-statsd-publish.py"
}
end

View File

@@ -0,0 +1,19 @@
# swift-container-sync - SWIFT Container Sync
#
# The swift container sync.
description "SWIFT Container Sync"
author "Sergio Rubio <rubiojr@bvox.net>"
start on runlevel [2345]
stop on runlevel [016]
pre-start script
if [ -f "/etc/swift/container-server.conf" ]; then
exec /usr/bin/swift-init container-sync start
else
exit 1
fi
end script
post-stop exec /usr/bin/swift-init container-sync stop

View File

@@ -3,7 +3,7 @@ maintainer "ATT, Inc."
license "Apache 2.0"
description "Installs and configures Openstack Swift"
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version "1.1.0"
version "7.1.0"
recipe "openstack-object-storage::setup", "Does initial setup of a swift cluster"
recipe "openstack-object-storage::account-server", "Installs the swift account server"
recipe "openstack-object-storage::object-server", "Installs the swift object server"

View File

@@ -62,7 +62,8 @@ def generate_script
# figure out what's present in the cluster
disk_data[which] = {}
disk_state,_,_ = Chef::Search::Query.new.search(:node,"chef_environment:#{node.chef_environment} AND roles:swift-#{which}-server")
role = node["swift"]["#{which}_server_chef_role"]
disk_state,_,_ = Chef::Search::Query.new.search(:node,"chef_environment:#{node.chef_environment} AND roles:#{role}")
# for a running track of available disks
disk_data[:available] ||= {}
@@ -195,24 +196,24 @@ def parse_ring_output(ring_data)
next
elsif line =~ /^\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+\.\d+\.\d+\.\d+)\s+(\d+)\s+(\S+)\s+([0-9.]+)\s+(\d+)\s+([-0-9.]+)\s*$/
output[:hosts] ||= {}
output[:hosts][$3] ||= {}
output[:hosts][$4] ||= {}
output[:hosts][$3][$5] = {}
output[:hosts][$4][$6] ||= {}
output[:hosts][$3][$5][:id] = $1
output[:hosts][$3][$5][:region] = $2
output[:hosts][$3][$5][:zone] = $3
output[:hosts][$3][$5][:ip] = $4
output[:hosts][$3][$5][:port] = $5
output[:hosts][$3][$5][:device] = $6
output[:hosts][$3][$5][:weight] = $7
output[:hosts][$3][$5][:partitions] = $8
output[:hosts][$3][$5][:balance] = $9
output[:hosts][$4][$6][:id] = $1
output[:hosts][$4][$6][:region] = $2
output[:hosts][$4][$6][:zone] = $3
output[:hosts][$4][$6][:ip] = $4
output[:hosts][$4][$6][:port] = $5
output[:hosts][$4][$6][:device] = $6
output[:hosts][$4][$6][:weight] = $7
output[:hosts][$4][$6][:partitions] = $8
output[:hosts][$4][$6][:balance] = $9
elsif line =~ /^\s+(\d+)\s+(\d+)\s+(\d+\.\d+\.\d+\.\d+)\s+(\d+)\s+(\S+)\s+([0-9.]+)\s+(\d+)\s+([-0-9.]+)\s*$/
output[:hosts] ||= {}
output[:hosts][$3] ||= {}
output[:hosts][$3][$5] = {}
output[:hosts][$3][$5] ||= {}
output[:hosts][$3][$5][:id] = $1
output[:hosts][$3][$5][:zone] = $2

View File

@@ -23,11 +23,39 @@ end
include_recipe 'sysctl::default'
#-------------
# stats
#-------------
# optionally statsd daemon for stats collection
if node["swift"]["enable_statistics"]
if node["swift"]["statistics"]["enabled"]
node.set['statsd']['relay_server'] = true
include_recipe 'statsd::server'
end
# find graphing server address
if Chef::Config[:solo] and not node['recipes'].include?("chef-solo-search")
Chef::Log.warn("This recipe uses search. Chef Solo does not support search.")
graphite_servers = []
else
graphite_servers = search(:node, "roles:#{node['swift']['statistics']['graphing_role']} AND chef_environment:#{node.chef_environment}")
end
graphite_host = "127.0.0.1"
unless graphite_servers.empty?
graphite_host = graphite_servers[0]['network']["ipaddress_#{node['swift']['statistics']['graphing_interface']}"]
end
if node['swift']['statistics']['graphing_ip'].nil?
node.set['statsd']['graphite_host'] = graphite_host
else
node.set['statsd']['graphite_host'] = node['swift']['statistics']['graphing_ip']
end
#--------------
# swift common
#--------------
platform_options = node["swift"]["platform"]
# update repository if requested with the ubuntu cloud

View File

@@ -91,3 +91,31 @@ template "/etc/swift/container-server.conf" do
notifies :restart, "service[swift-container-updater]", :immediately
notifies :restart, "service[swift-container-auditor]", :immediately
end
# Ubuntu 12.04 packages are missing the swift-container-sync service scripts
# See https://bugs.launchpad.net/cloud-archive/+bug/1250171
if platform?("ubuntu")
cookbook_file "/etc/init/swift-container-sync.conf" do
owner "root"
group "root"
mode "0755"
source "swift-container-sync.conf.upstart"
action :create
not_if "[ -e /etc/init/swift-container-sync.conf ]"
end
link "/etc/init.d/swift-container-sync" do
to "/lib/init/upstart-job"
not_if "[ -e /etc/init.d/swift-container-sync ]"
end
end
service_name=platform_options["service_prefix"] + 'swift-container-sync' + platform_options["service_suffix"]
unless node["swift"]["container-server"]["allowed_sync_hosts"] == []
service "swift-container-sync" do
service_name service_name
provider platform_options["service_provider"]
supports :status => false, :restart => true
action [:enable, :start]
only_if "[ -e /etc/swift/container-server.conf ] && [ -e /etc/swift/container.ring.gz ]"
end
end

View File

@@ -26,10 +26,29 @@ include_recipe "openstack-object-storage::ring-repo"
platform_options = node["swift"]["platform"]
if node["swift"]["authmode"] == "swauth"
platform_options["swauth_packages"].each.each do |pkg|
package pkg do
action :install
options platform_options["override_options"] # retain configs
case node["swift"]["swauth_source"]
when "package"
platform_options["swauth_packages"].each do |pkg|
package pkg do
action :install
options platform_options["override_options"]
end
end
when "git"
git "#{Chef::Config[:file_cache_path]}/swauth" do
repository node["swift"]["swauth_repository"]
revision node["swift"]["swauth_version"]
action :sync
end
bash "install_swauth" do
cwd "#{Chef::Config[:file_cache_path]}/swauth"
user "root"
group "root"
code <<-EOH
python setup.py install
EOH
environment 'PREFIX' => "/usr/local"
end
end
end
@@ -44,6 +63,19 @@ else
auth_key = swift_secrets['dispersion_auth_key']
end
if node['swift']['statistics']['enabled']
template platform_options["swift_statsd_publish"] do
source "swift-statsd-publish.py.erb"
owner "root"
group "root"
mode "0755"
end
cron "cron_swift_statsd_publish" do
command "#{platform_options['swift_statsd_publish']} > /dev/null 2>&1"
minute "*/#{node["swift"]["statistics"]["report_frequency"]}"
end
end
template "/etc/swift/dispersion.conf" do
source "dispersion.conf.erb"
owner "swift"

View File

@@ -94,6 +94,17 @@ template "/etc/swift/object-server.conf" do
notifies :restart, "service[swift-object-auditor]", :immediately
end
%w[ /var/swift /var/swift/recon ].each do |path|
directory path do
# Create the swift recon cache directory and set its permissions.
owner "swift"
group "swift"
mode 00755
action :create
end
end
cron "swift-recon" do
minute "*/5"
command "swift-recon-cron /etc/swift/object-server.conf"

View File

@@ -26,7 +26,8 @@ end
if node.run_list.expand(node.chef_environment).recipes.include?("openstack-object-storage::setup")
Chef::Log.info("I ran the openstack-object-storage::setup so I will use my own swift passwords")
else
setup = search(:node, "chef_environment:#{node.chef_environment} AND roles:swift-setup")
setup_role = node["swift"]["setup_chef_role"]
setup = search(:node, "chef_environment:#{node.chef_environment} AND roles:#{setup_role}")
if setup.length == 0
Chef::Application.fatal! "You must have run the openstack-object-storage::setup recipe (on this or another node) before running the swift::proxy recipe on this node"
elsif setup.length == 1
@@ -47,11 +48,35 @@ platform_options["proxy_packages"].each do |pkg|
end
end
package "python-swauth" do
action :install
only_if { node["swift"]["authmode"] == "swauth" }
if node["swift"]["authmode"] == "swauth"
case node["swift"]["swauth_source"]
when "package"
platform_options["swauth_packages"].each do |pkg|
package pkg do
action :install
options platform_options["override_options"]
end
end
when "git"
git "#{Chef::Config[:file_cache_path]}/swauth" do
repository node["swift"]["swauth_repository"]
revision node["swift"]["swauth_version"]
action :sync
end
bash "install_swauth" do
cwd "#{Chef::Config[:file_cache_path]}/swauth"
user "root"
group "root"
code <<-EOH
python setup.py install
EOH
environment 'PREFIX' => "/usr/local"
end
end
end
package "python-swift-informant" do
action :install
only_if { node["swift"]["use_informant"] }
@@ -84,7 +109,8 @@ if Chef::Config[:solo]
memcache_servers = [ "127.0.0.1:11211" ]
else
memcache_servers = []
proxy_nodes = search(:node, "chef_environment:#{node.chef_environment} AND roles:swift-proxy-server")
proxy_role = node["swift"]["proxy_server_chef_role"]
proxy_nodes = search(:node, "chef_environment:#{node.chef_environment} AND roles:#{proxy_role}")
proxy_nodes.each do |proxy|
proxy_ip = locate_ip_in_cidr(node["swift"]["network"]["proxy-cidr"], proxy)
next if not proxy_ip # skip nil ips so we dont break the config
@@ -101,6 +127,19 @@ else
authkey = swift_secrets['swift_authkey']
end
if node["swift"]["authmode"] == "keystone"
openstack_identity_bootstrap_token = secret "secrets", "openstack_identity_bootstrap_token"
%w[ /home/swift /home/swift/keystone-signing ].each do |path|
directory path do
owner "swift"
group "swift"
mode 00700
action :create
end
end
end
# create proxy config file
template "/etc/swift/proxy-server.conf" do
source "proxy-server.conf.erb"
@@ -108,6 +147,7 @@ template "/etc/swift/proxy-server.conf" do
group "swift"
mode "0600"
variables("authmode" => node["swift"]["authmode"],
"openstack_identity_bootstrap_token" => openstack_identity_bootstrap_token,
"bind_host" => node["swift"]["network"]["proxy-bind-ip"],
"bind_port" => node["swift"]["network"]["proxy-bind-port"],
"authkey" => authkey,

View File

@@ -22,7 +22,8 @@ include_recipe "openstack-object-storage::common"
if Chef::Config[:solo]
Chef::Application.fatal! "This recipe uses search. Chef Solo does not support search."
else
setup_role_count = search(:node, "chef_environment:#{node.chef_environment} AND roles:swift-setup").length
setup_role = node["swift"]["setup_chef_role"]
setup_role_count = search(:node, "chef_environment:#{node.chef_environment} AND roles:#{setup_role}").length
if setup_role_count > 1
Chef::Application.fatal! "You can only have one node with the swift-setup role"
end
@@ -42,9 +43,32 @@ platform_options["proxy_packages"].each do |pkg|
end
end
package "python-swauth" do
action :upgrade
only_if { node["swift"]["authmode"] == "swauth" }
if node["swift"]["authmode"] == "swauth"
case node["swift"]["swauth_source"]
when "package"
platform_options["swauth_packages"].each do |pkg|
package pkg do
action :upgrade
options platform_options["override_options"]
end
end
when "git"
git "#{Chef::Config[:file_cache_path]}/swauth" do
repository node["swift"]["swauth_repository"]
revision node["swift"]["swauth_version"]
action :sync
end
bash "install_swauth" do
cwd "#{Chef::Config[:file_cache_path]}/swauth"
user "root"
group "root"
code <<-EOH
python setup.py install
EOH
environment 'PREFIX' => "/usr/local"
end
end
end
package "python-swift-informant" do

View File

@@ -14,12 +14,12 @@ describe 'openstack-object-storage::common' do
@node = @chef_run.node
@node.set['platform_family'] = "debian"
@node.set['lsb']['codename'] = "precise"
@node.set['swift']['release'] = "folsom"
@node.set['swift']['release'] = "grizzly"
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['git_builder_ip'] = '10.0.0.10'
# TODO: this does not work
# ::Chef::Log.should_receive(:info).with("chefspec: precise-updates/folsom")
# ::Chef::Log.should_receive(:info).with("chefspec: precise-updates/grizzly")
@chef_run.converge "openstack-object-storage::common"
end

View File

@@ -16,6 +16,8 @@ describe 'openstack-object-storage::container-server' do
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['network']['container-bind-ip'] = '10.0.0.1'
@node.set['swift']['network']['container-bind-port'] = '8080'
@node.set['swift']['container-server']['allowed_sync_hosts'] = ['host1', 'host2', 'host3']
@node.set['swift']['container-bind-port'] = '8080'
@node.set['swift']['disk_enum_expr'] = "[{ 'sda' => {}}]"
@node.set['swift']['disk_test_filter'] = [ "candidate =~ /sd[^a]/ or candidate =~ /hd[^a]/ or candidate =~ /vd[^a]/ or candidate =~ /xvd[^a]/",
"File.exist?('/dev/' + candidate)",
@@ -33,7 +35,7 @@ describe 'openstack-object-storage::container-server' do
end
it "starts swift container services on boot" do
%w{swift-container swift-container-auditor swift-container-replicator swift-container-updater}.each do |svc|
%w{swift-container swift-container-auditor swift-container-replicator swift-container-updater swift-container-sync}.each do |svc|
expect(@chef_run).to set_service_to_start_on_boot svc
end
end
@@ -52,12 +54,34 @@ describe 'openstack-object-storage::container-server' do
expect(sprintf("%o", @file.mode)).to eq "600"
end
it "template contents" do
pending "TODO: implement"
it "has allowed sync hosts" do
expect(@chef_run).to create_file_with_content @file.name,
"allowed_sync_hosts = host1,host2,host3"
end
end
end
it "should create container sync upstart conf for ubuntu" do
expect(@chef_run).to create_cookbook_file "/etc/init/swift-container-sync.conf"
end
it "should create container sync init script for ubuntu" do
expect(@chef_run).to create_link "/etc/init.d/swift-container-sync"
end
describe "/etc/swift/container-server.conf" do
before do
@node = @chef_run.node
@node.set["swift"]["container-server"]["allowed_sync_hosts"] = []
@chef_run.converge "openstack-object-storage::container-server"
@file = @chef_run.template "/etc/swift/container-server.conf"
end
it "has no allowed_sync_hosts on empty lists" do
expect(@chef_run).not_to create_file_with_content @file.name,
/^allowed_sync_hots =/
end
end
end
end

View File

@@ -14,7 +14,7 @@ describe 'openstack-object-storage::disks' do
@node = @chef_run.node
@node.set['platform_family'] = "debian"
@node.set['lsb']['codename'] = "precise"
@node.set['swift']['release'] = "folsom"
@node.set['swift']['release'] = "grizzly"
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['git_builder_ip'] = '10.0.0.10'
@node.set['swift']['disk_enum_expr'] = "[{ 'sda' => {}}]"

View File

@@ -14,6 +14,9 @@ describe 'openstack-object-storage::management-server' do
@node = @chef_run.node
@node.set['lsb']['code'] = 'precise'
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['statistics']['enabled'] = true
@node.set['swift']['swauth_source'] = 'package'
@node.set['swift']['platform']['swauth_packages'] = ['swauth']
@chef_run.converge "openstack-object-storage::management-server"
end
@@ -42,6 +45,27 @@ describe 'openstack-object-storage::management-server' do
end
describe "/usr/local/bin/swift-statsd-publish.py" do
before do
@file = @chef_run.template "/usr/local/bin/swift-statsd-publish.py"
end
it "has proper owner" do
expect(@file).to be_owned_by "root", "root"
end
it "has proper modes" do
expect(sprintf("%o", @file.mode)).to eq "755"
end
it "has expected statsd host" do
expect(@chef_run).to create_file_with_content @file.name,
"self.statsd_host = '127.0.0.1'"
end
end
end
end

View File

@@ -14,6 +14,8 @@ describe 'openstack-object-storage::proxy-server' do
@node = @chef_run.node
@node.set['lsb']['code'] = 'precise'
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['platform']['swauth_packages'] = ['swauth']
@node.set['swift']['swauth_source'] = 'package'
@node.set['swift']['network']['proxy-bind-ip'] = '10.0.0.1'
@node.set['swift']['network']['proxy-bind-port'] = '8080'
@chef_run.converge "openstack-object-storage::proxy-server"
@@ -28,7 +30,7 @@ describe 'openstack-object-storage::proxy-server' do
end
it "installs swauth package if swauth is selected" do
expect(@chef_run).to install_package "python-swauth"
expect(@chef_run).to install_package "swauth"
end
it "starts swift-proxy on boot" do

View File

@@ -14,7 +14,7 @@ describe 'openstack-object-storage::ring-repo' do
@node = @chef_run.node
@node.set['platform_family'] = "debian"
@node.set['lsb']['codename'] = "precise"
@node.set['swift']['release'] = "folsom"
@node.set['swift']['release'] = "grizzly"
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['git_builder_ip'] = '10.0.0.10'
@chef_run.converge "openstack-object-storage::ring-repo"

View File

@@ -14,7 +14,7 @@ describe 'openstack-object-storage::rsync' do
@node = @chef_run.node
@node.set['platform_family'] = "debian"
@node.set['lsb']['codename'] = "precise"
@node.set['swift']['release'] = "folsom"
@node.set['swift']['release'] = "grizzly"
@node.set['swift']['authmode'] = 'swauth'
@node.set['swift']['git_builder_ip'] = '10.0.0.10'
@chef_run.converge "openstack-object-storage::rsync"

View File

@@ -15,11 +15,11 @@
bind_ip = <%= @bind_ip %>
bind_port = <%= @bind_port %>
workers = 10
<% if node[:swift][:enable_statistics] -%>
<% if node[:swift][:statistics][:enabled] -%>
log_statsd_host = localhost
log_statsd_port = 8125
log_statsd_default_sample_rate = 1
log_statsd_metric_prefix = openstack.swift.<%= node[:hostname] %>
log_statsd_default_sample_rate = <%= node[:swift][:statistics][:sample_rate] %>
log_statsd_metric_prefix = <%= node[:swift][:statistics][:statsd_prefix] %>.<%= node[:hostname] %>
<% end %>
[pipeline:main]

View File

@@ -18,12 +18,16 @@
bind_ip = <%= @bind_ip %>
bind_port = <%= @bind_port %>
workers = 10
<% if node[:swift][:enable_statistics] -%>
<% if node["swift"]["enable_statistics"] -%>
log_statsd_host = localhost
log_statsd_port = 8125
log_statsd_default_sample_rate = 1
log_statsd_metric_prefix = openstack.swift.<%= node[:hostname] %>
<% end %>
log_statsd_metric_prefix = openstack.swift.<%= node["hostname"] %>
<% end -%>
<% if node["swift"]["container-server"]["allowed_sync_hosts"] -%>
allowed_sync_hosts = <%= node["swift"]["container-server"]["allowed_sync_hosts"].join(",") %>
<% end -%>
[pipeline:main]
pipeline = container-server
@@ -77,12 +81,14 @@ use = egg:swift#container
[container-sync]
# You can override the default log routing for this app here (don't use set!):
# log_name = container-sync
# log_facility = LOG_LOCAL0
# log_level = INFO
log_name = <%= node["swift"]["container-server"]["container-sync"]["log_name"] %>
log_facility = <%= node["swift"]["container-server"]["container-sync"]["log_facility"] %>
log_level = <%= node["swift"]["container-server"]["container-sync"]["log_level"] %>
# If you need to use an HTTP Proxy, set it here; defaults to no proxy.
# sync_proxy = http://127.0.0.1:8888
<% if node["swift"]["container-server"]["container-sync"]["sync_proxy"] -%>
sync_proxy = <%= node["swift"]["container-server"]["container-sync"]["sync_proxy"] %>
<% end -%>
# Will sync, at most, each container once per interval
# interval = 300
interval = <%= node["swift"]["container-server"]["container-sync"]["interval"] %>
# Maximum amount of time to spend syncing each container per pass
# container_time = 60
container_time = <%= node["swift"]["container-server"]["container-sync"]["container_time"] %>

View File

@@ -16,11 +16,11 @@
bind_ip = <%= @bind_ip %>
bind_port = <%= @bind_port %>
workers = 10
<% if node[:swift][:enable_statistics] -%>
<% if node[:swift][:statistics][:enabled] -%>
log_statsd_host = localhost
log_statsd_port = 8125
log_statsd_default_sample_rate = 1
log_statsd_metric_prefix = openstack.swift.<%= node[:hostname] %>
log_statsd_default_sample_rate = <%= node[:swift][:statistics][:sample_rate] %>
log_statsd_metric_prefix = <%= node[:swift][:statistics][:statsd_prefix] %>.<%= node[:hostname] %>
<% end %>
[pipeline:main]

View File

@@ -8,15 +8,23 @@ when "swauth"
end
account_management=false
if node[:roles].include?("swift-management-server") and node[:swift][:authmode] == "swauth" then
if node[:swift][:authmode] == "swauth" then
account_management="true"
end
# need to both: 1) add tempurl before auth middleware, 2) set allow_overrides=true
tempurl_toggle=false
if node[:swift][:authmode] == "swauth" and node[:swift][:tempurl][:enabled] == true then
tempurl_toggle = true
pipeline = "tempurl swauth"
end
-%>
# This file is managed by chef. Do not edit it.
#
# Cluster info:
# Auth mode: <%= node[:swift][:authmode] %>
# Management server: <%= node[:roles].include?("swift-management-server") %>
# Management server: <%= node[:roles].include?(node[:swift][:management_server_chef_role]) %>
# Account management enabled: <%= account_management %>
# Auth pipeline: <%= pipeline %>
@@ -38,11 +46,12 @@ end
workers = <%= [ node[:cpu][:total] - 1, 1 ].max %>
bind_ip = <%= @bind_host %>
bind_port = <%= @bind_port %>
<% if node[:swift][:enable_statistics] -%>
user = swift
<% if node[:swift][:statistics][:enabled] -%>
log_statsd_host = localhost
log_statsd_port = 8125
log_statsd_default_sample_rate = 1
log_statsd_metric_prefix = openstack.swift.<%= node[:hostname] %>
log_statsd_default_sample_rate = <%= node[:swift][:statistics][:sample_rate] %>
log_statsd_metric_prefix = <%= node[:swift][:statistics][:statsd_prefix] %>.<%= node[:hostname] %>
<% end %>
@@ -82,13 +91,7 @@ use = egg:swift#proxy
# If set to 'true' authorized accounts that do not yet exist within the Swift
# cluster will be automatically created.
# account_autocreate = false
######
#
# N.B. ideally allow_account_management would only be set on the
# management server, but swauth will delete using the cluster url
# and not the local url
# allow_account_managemnet = <%= account_management %>
allow_account_management = true
allow_account_management = <%= account_management %>
<% if @authmode == "keystone" -%>
account_autocreate = true
@@ -106,6 +109,12 @@ default_swift_cluster = local#<%= node[:swift][:swift_url] %>#<%= node[:swift][:
<% else %>
default_swift_cluster = local#<%= node[:swift][:swift_url] %>
<% end %>
<% if tempurl_toggle -%>
allow_overrides = true
<% end %>
<% end %>
<% if node["swift"]["container-server"]["allowed_sync_hosts"] -%>
allowed_sync_hosts = <%= node["swift"]["container-server"]["allowed_sync_hosts"].join(",") %>
<% end %>
[filter:healthcheck]
@@ -129,7 +138,10 @@ use = egg:swift#memcache
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211
# memcache_servers = 127.0.0.1:11211
#####
memcache_servers = <%= @memcache_servers.join(",") %>
#memcache_servers = <%= @memcache_servers.join(",") %>
<% unless @memcache_servers.empty? -%>
memcache_servers = <%= @memcache_servers %>
<% end -%>
[filter:ratelimit]
use = egg:swift#ratelimit
@@ -238,7 +250,7 @@ use = egg:swift#tempurl
use = egg:swift#formpost
[filter:keystoneauth]
operator_roles = Member,admin
operator_roles = Member,admin,swiftoperator
use = egg:swift#keystoneauth
[filter:proxy-logging]
@@ -253,10 +265,31 @@ use = egg:swift#proxy_logging
# You can use log_statsd_* from [DEFAULT] or override them here:
# access_log_statsd_host = localhost
# access_log_statsd_port = 8125
# access_log_statsd_default_sample_rate = 1
# access_log_statsd_default_sample_rate = <%= node[:swift][:statistics][:sample_rate] %>
# access_log_statsd_metric_prefix =
# access_log_headers = False
# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY
[filter:authtoken]
<% case @authmode
when "keystone" -%>
paste.filter_factory = keystone.middleware.auth_token:filter_factory
# usage for anonymous referrers ('.r:*')
delay_auth_decision = true
#
signing_dir = /home/swift/keystone-signing
auth_protocol = http
auth_port = 35357
auth_host = <%= node["swift"]["network"]["proxy-bind-ip"] %>
admin_token = <%= @openstack_identity_bootstrap_token %>
# the service tenant and swift userid and password created in Keystone
admin_tenant_name = service
admin_user = swift
admin_password = swift
<% end -%>

View File

@@ -3,7 +3,7 @@ gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = 0.0.0.0
address = <%= @storage_local_net_ip %>
[account]
max connections = 10

View File

@@ -0,0 +1,157 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
# Author: Alan Meadows <alan.meadows@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
THIS FILE WAS INSTALLED BY CHEF. ANY CHANGES WILL BE OVERWRITTEN.
Openstack swift collector for recon and dispersion reports. Will send
back dispersion reporting metrics as well as swift recon statistics
to a statsd server for graphite consumption
"""
from subprocess import Popen, PIPE, check_call
from socket import socket, AF_INET, SOCK_DGRAM
import re
import os
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
class OpenStackSwiftStatisticsCollector(object):
def __init__(self):
'''Setup some initial values defined by chef'''
self.statsd_host = '<%= node[:swift][:statistics][:statsd_host] %>'
self.statsd_port = <%= node[:swift][:statistics][:statsd_port] %>
self.statsd_prefix = '<%= node[:swift][:statistics][:statsd_prefix] %>'
<% if node[:swift][:statistics][:enable_dispersion_report] -%>
self.enable_dispersion_report = True
<% else %>
self.enable_dispersion_report = False
<% end %>
<% if node[:swift][:statistics][:enable_recon_report] -%>
self.enable_recon_report = True
<% else %>
self.enable_recon_report = False
<% end %>
<% if node[:swift][:statistics][:enable_disk_report] -%>
self.enable_disk_report = True
<% else %>
self.enable_disk_report = False
<% end %>
self.recon_account_cache = '<%= node[:swift][:statistics][:recon_account_cache] %>'
self.recon_container_cache = '<%= node[:swift][:statistics][:recon_container_cache] %>'
self.recon_object_cache = '<%= node[:swift][:statistics][:recon_object_cache] %>'
def _dispersion_report(self):
"""
Swift Dispersion Report Collection
"""
p = Popen(['/usr/bin/swift-dispersion-report', '-j'],
stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
self.publish('%s.dispersion.errors' % self.statsd_prefix, len(stderr.split('\n')) - 1)
data = json.loads(stdout)
for t in ('object', 'container'):
for (k, v) in data[t].items():
self.publish('%s.dispersion.%s.%s' % (self.statsd_prefix, t, k), v)
def _recon_report(self):
"""
Swift Recon Collection
"""
recon_cache = {'account': self.recon_account_cache,
'container': self.recon_container_cache,
'object': self.recon_object_cache}
for recon_type in recon_cache:
if not os.access(recon_cache[recon_type], os.R_OK):
continue
try:
f = open(recon_cache[recon_type])
try:
rmetrics = json.loads(f.readlines()[0].strip())
metrics = self._process_cache(rmetrics)
for k, v in metrics:
metric_name = '%s.%s.%s' % (self.statsd_prefix, recon_type, ".".join(k))
if isinstance(v, (int, float)):
self.publish(metric_name, v)
except (ValueError, IndexError):
continue
finally:
f.close()
def _disk_report(self):
"""
Swift Disk Capacity Report
"""
p = Popen(['/usr/bin/swift-recon', '-d'],
stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
used, total = 0, 0
match = re.search(r'.* space used: ([0-9]*\.?[0-9]+) of ([0-9]*\.?[0-9]+)', stdout, re.M|re.I)
if match:
used, total = [int(i) for i in match.groups()]
highest, avg = 0, 0
match = re.search(r'.* lowest:.+highest: ([0-9]*\.?[0-9]+)%, avg: ([0-9]*\.?[0-9]+)%', stdout, re.M|re.I)
if match:
highest, avg = match.groups()
self.publish('%s.capacity.bytes_used' % self.statsd_prefix, used)
self.publish('%s.capacity.bytes_free' % self.statsd_prefix, total-used)
self.publish('%s.capacity.bytes_utilization' % self.statsd_prefix, int((used/total)*100))
self.publish('%s.capacity.single_disk_utilization_highest' % self.statsd_prefix, highest)
self.publish('%s.capacity.single_disk_utilization_average' % self.statsd_prefix, avg)
def collect(self):
if (self.enable_dispersion_report):
self._dispersion_report()
if (self.enable_recon_report):
self._recon_report()
if (self.enable_disk_report):
self._disk_report()
def publish(self, metric_name, value):
"""Publish a metric to statsd server"""
# TODO: IPv6 support
print '%s:%s|g' % (metric_name.encode('utf-8'), value), (self.statsd_host, self.statsd_port)
udp_sock = socket(AF_INET, SOCK_DGRAM)
udp_sock.sendto('%s:%s|g' % (metric_name.encode('utf-8'), value), (self.statsd_host, self.statsd_port))
def _process_cache(self, d, path=()):
"""Recusively walk a nested recon cache dict to obtain path/values"""
metrics = []
for k, v in d.iteritems():
if not isinstance(v, dict):
metrics.append((path + (k,), v))
else:
self._process_cache(v, path + (k,))
return metrics
if __name__ == '__main__':
collector = OpenStackSwiftStatisticsCollector()
collector.collect()

View File

@@ -66,6 +66,13 @@ rabbitmq_user "add openstack rabbit user" do
action :add
end
rabbitmq_user "change the password of the openstack rabbit user" do
user user
password pass
action :change_password
end
rabbitmq_vhost "add openstack rabbit vhost" do
vhost vhost

View File

@@ -0,0 +1 @@
metadata

View File

@@ -0,0 +1,28 @@
{
"sources": {
"statsd": {
"path": "."
},
"build-essential": {
"locked_version": "1.4.2"
},
"git": {
"locked_version": "2.6.0"
},
"dmg": {
"locked_version": "2.0.0"
},
"yum": {
"locked_version": "2.3.2"
},
"windows": {
"locked_version": "1.10.0"
},
"chef_handler": {
"locked_version": "1.1.4"
},
"runit": {
"locked_version": "1.2.0"
}
}
}

View File

@@ -0,0 +1,8 @@
source "https://rubygems.org"
gem "chef", "~> 11.4.4"
gem "json", "<= 1.7.7" # chef dependency
gem "berkshelf", "~> 2.0.10"
gem "chefspec", "~> 1.2.0"
gem "foodcritic"
gem "strainer"

View File

@@ -0,0 +1,212 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (3.2.14)
i18n (~> 0.6, >= 0.6.4)
multi_json (~> 1.0)
addressable (2.3.5)
akami (1.2.0)
gyoku (>= 0.4.0)
nokogiri (>= 1.4.0)
berkshelf (2.0.10)
activesupport (~> 3.2.0)
addressable (~> 2.3.4)
buff-shell_out (~> 0.1)
chozo (>= 0.6.1)
faraday (>= 0.8.5)
hashie (>= 2.0.2)
minitar (~> 0.5.4)
rbzip2 (~> 0.2.0)
retryable (~> 1.3.3)
ridley (~> 1.5.0)
solve (>= 0.5.0)
thor (~> 0.18.0)
buff-config (0.4.0)
buff-extensions (~> 0.3)
varia_model (~> 0.1)
buff-extensions (0.5.0)
buff-ignore (1.1.0)
buff-platform (0.1.0)
buff-ruby_engine (0.1.0)
buff-shell_out (0.1.0)
buff-ruby_engine (~> 0.1.0)
builder (3.2.2)
celluloid (0.14.1)
timers (>= 1.0.0)
celluloid-io (0.14.1)
celluloid (>= 0.14.1)
nio4r (>= 0.4.5)
chef (11.4.4)
erubis
highline (>= 1.6.9)
json (>= 1.4.4, <= 1.7.7)
mixlib-authentication (>= 1.3.0)
mixlib-cli (~> 1.3.0)
mixlib-config (>= 1.1.2)
mixlib-log (>= 1.3.0)
mixlib-shellout
net-ssh (~> 2.6)
net-ssh-multi (~> 1.1.0)
ohai (>= 0.6.0)
rest-client (>= 1.0.4, < 1.7.0)
yajl-ruby (~> 1.1)
chefspec (1.2.0)
chef (>= 10.0)
erubis
fauxhai (>= 0.1.1, < 2.0)
minitest-chef-handler (>= 0.6.0)
rspec (~> 2.0)
chozo (0.6.1)
activesupport (>= 3.2.0)
hashie (>= 2.0.2)
multi_json (>= 1.3.0)
ci_reporter (1.9.0)
builder (>= 2.1.2)
diff-lcs (1.2.4)
erubis (2.7.0)
faraday (0.8.8)
multipart-post (~> 1.2.0)
fauxhai (1.1.1)
httparty
net-ssh
ohai
ffi (1.9.0)
foodcritic (2.2.0)
erubis
gherkin (~> 2.11.7)
nokogiri (~> 1.5.4)
treetop (~> 1.4.10)
yajl-ruby (~> 1.1.0)
gherkin (2.11.8)
multi_json (~> 1.3)
gssapi (1.0.3)
ffi (>= 1.0.1)
gyoku (1.1.0)
builder (>= 2.1.2)
hashie (2.0.5)
highline (1.6.19)
httparty (0.11.0)
multi_json (~> 1.0)
multi_xml (>= 0.5.2)
httpclient (2.2.0.2)
httpi (0.9.7)
rack
i18n (0.6.5)
ipaddress (0.8.0)
json (1.7.7)
little-plugger (1.1.3)
logging (1.6.2)
little-plugger (>= 1.1.3)
mime-types (1.25)
minitar (0.5.4)
minitest (4.7.5)
minitest-chef-handler (1.0.1)
chef
ci_reporter
minitest (~> 4.7.3)
mixlib-authentication (1.3.0)
mixlib-log
mixlib-cli (1.3.0)
mixlib-config (1.1.2)
mixlib-log (1.6.0)
mixlib-shellout (1.2.0)
multi_json (1.7.9)
multi_xml (0.5.5)
multipart-post (1.2.0)
net-http-persistent (2.9)
net-ssh (2.6.8)
net-ssh-gateway (1.2.0)
net-ssh (>= 2.6.5)
net-ssh-multi (1.1)
net-ssh (>= 2.1.4)
net-ssh-gateway (>= 0.99.0)
nio4r (0.5.0)
nokogiri (1.5.10)
nori (1.1.5)
ohai (6.18.0)
ipaddress
mixlib-cli
mixlib-config
mixlib-log
mixlib-shellout
systemu
yajl-ruby
polyglot (0.3.3)
rack (1.5.2)
rbzip2 (0.2.0)
rest-client (1.6.7)
mime-types (>= 1.16)
retryable (1.3.3)
ridley (1.5.2)
addressable
buff-config (~> 0.2)
buff-extensions (~> 0.3)
buff-ignore (~> 1.1)
buff-shell_out (~> 0.1)
celluloid (~> 0.14.0)
celluloid-io (~> 0.14.0)
erubis
faraday (>= 0.8.4)
hashie (>= 2.0.2)
json (>= 1.7.7)
mixlib-authentication (>= 1.3.0)
net-http-persistent (>= 2.8)
net-ssh
nio4r (>= 0.5.0)
retryable
solve (>= 0.4.4)
varia_model (~> 0.1)
winrm (~> 1.1.0)
rspec (2.14.1)
rspec-core (~> 2.14.0)
rspec-expectations (~> 2.14.0)
rspec-mocks (~> 2.14.0)
rspec-core (2.14.5)
rspec-expectations (2.14.2)
diff-lcs (>= 1.1.3, < 2.0)
rspec-mocks (2.14.3)
rubyntlm (0.1.1)
savon (0.9.5)
akami (~> 1.0)
builder (>= 2.1.2)
gyoku (>= 0.4.0)
httpi (~> 0.9)
nokogiri (>= 1.4.0)
nori (~> 1.0)
wasabi (~> 1.0)
solve (0.8.1)
strainer (3.3.0)
berkshelf (~> 2.0)
buff-platform (~> 0.1)
systemu (2.5.2)
thor (0.18.1)
timers (1.1.0)
treetop (1.4.15)
polyglot
polyglot (>= 0.3.1)
uuidtools (2.1.4)
varia_model (0.2.0)
buff-extensions (~> 0.2)
hashie (>= 2.0.2)
wasabi (1.0.0)
nokogiri (>= 1.4.0)
winrm (1.1.2)
gssapi (~> 1.0.0)
httpclient (~> 2.2.0.2)
logging (~> 1.6.1)
nokogiri (~> 1.5.0)
rubyntlm (~> 0.1.1)
savon (= 0.9.5)
uuidtools (~> 2.1.2)
yajl-ruby (1.1.0)
PLATFORMS
ruby
DEPENDENCIES
berkshelf (~> 2.0.10)
chef (~> 11.4.4)
chefspec (~> 1.2.0)
foodcritic
json (<= 1.7.7)
strainer

View File

@@ -1,69 +1,53 @@
# DESCRIPTION
Description
===========
Chef cookbook to install [Etsy's
StatsD](https://github.com/etsy/statsd) daemon. Supports the new
pluggable backend modules.
Installs and sets up statsd <http://github.com/etsy/statsd>
# REQUIREMENTS
Requirements
============
Depends on the cookbooks:
Ubuntu 12.04
* git
* nodejs
Attributes
==========
# ATTRIBUTES
* `node['statsd']['port']` - The port for Statsd to listen for stats on. Defaults to 8125
* `node['statsd']['graphite_host']` - The host to forward processed statistics to. Defaults to localhost.
* `node['statsd']['graphite_port']` - The port to forward processed statistics to. Defaults to 2003
* `node['statsd']['package_version']` - The version to use when creating the package. Defaults to 0.6.0
* `node['statsd']['tmp_dir']` - The temporary directory to while building the package. Defaults to /tmp
* `node['statsd']['repo']` - The gitrepo to use. Defaults to "git://github.com/etsy/statsd.git"
* `node['statsd']['sha']` - The sha checksum of the repo to use
## Basic attributes
Usage
=====
* `repo`: Location of statsd repo (defaults to Etsy's).
* `log_file`: Where to log output (defaults to:
`/var/log/statsd.log`).
* `flush_interval_msecs`: Flush interval in msecs (default 10000).
* `port`: Port to listen for UDP stats (default 8125).
Including this recipe will build a dpkg from the statsd git repository and install it.
## Graphite settings
By default statsd will attempt to send statistics to a graphite instance running on localhost.
* `graphite_enabled`: Enable the built-in Graphite backend (default true).
* `graphite_port`: Port to talk to Graphite on (default 2003).
* `graphite_host`: Host name of Graphite server (default localhost).
Testing
=======
## Adding backends
$ bundle install
$ bundle exec berks install
$ bundle exec strainer test
Set the attribute `backends` to a hash of statsd NPM module
backends. The hash key is the name of the NPM module, while the hash
value is the version of the NPM module to install (or null for latest
version).
License and Author
==================
For example, to use version 0.0.1 of [statsd-librato-backend][]:
Author:: Scott Lampert (<sl724q@att.com>)
attrs[:statsd][:backends] = { 'statsd-librato-backend' => '0.0.1' }
Copyright 2012-2013, AT&T Services, Inc.
To use the latest version of statsd-librato-backend:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
attrs[:statsd][:backends] = { 'statsd-librato-backend' => nil }
http://www.apache.org/licenses/LICENSE-2.0
The cookbook will install each backend module under the statsd
directory and add it to the list of backends loaded in the
configuration file.
### Extra backend configuration
Set the attribute `extra_config` to any additional configuration
options that should be included in the StatsD configuration file.
For example, to set your email and token for the
[statsd-librato-backend][] backend module, use the following:
```js
attrs[:statsd][:extra_config] => {
'librato' => {
'email' => 'myemail@example.com',
'token' => '1234567890ABCDEF'
}
}
```
# USAGE
[statsd-librato-backend]: https://github.com/librato/statsd-librato-backend
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,4 @@
# Strainerfile
knife test: bundle exec knife cookbook test $COOKBOOK
foodcritic: bundle exec foodcritic -f any -t ~FC003 -t ~FC023 $SANDBOX/$COOKBOOK
chefspec: bundle exec rspec $SANDBOX/$COOKBOOK

View File

@@ -1,32 +1,9 @@
default[:statsd][:repo] = "git://github.com/etsy/statsd.git"
default[:statsd][:revision] = "master"
default[:statsd][:log_file] = "/var/log/statsd.log"
default[:statsd][:flush_interval_msecs] = 10000
default[:statsd][:port] = 8125
# Is the graphite backend enabled?
default[:statsd][:graphite_enabled] = true
default[:statsd][:graphite_port] = 2003
default[:statsd][:graphite_host] = "localhost"
#
# Add all NPM module backends here. Each backend should be a
# hash of the backend's name to the NPM module's version. If we
# should just use the latest, set the hash to null.
#
# For example, to use version 0.0.1 of statsd-librato-backend:
#
# attrs[:statsd][:backends] = { 'statsd-librato-backend' => '0.0.1' }
#
# To use the latest version of statsd-librato-backend:
#
# attrs[:statsd][:backends] = { 'statsd-librato-backend' => nil }
#
default[:statsd][:backends] = {}
#
# Add any additional backend configuration here.
#
default[:statsd][:extra_config] = {}
default['statsd']['port'] = 8125
default['statsd']['graphite_port'] = 2003
default['statsd']['graphite_host'] = "localhost"
default['statsd']['relay_server'] = false
default['statsd']['package_version'] = "0.6.0"
default['statsd']['sha'] = "2ccde8266bbe941ac5f79efe39103b99e1196d92"
default['statsd']['user'] = "statsd"
default['statsd']['repo'] = "git://github.com/etsy/statsd.git"
default['statsd']['tmp_dir'] = "/tmp"

View File

@@ -1,12 +1,8 @@
description "statsd"
author "Librato"
author "etsy"
start on runlevel [2345]
stop on runlevel [!2345]
env SL_NAME=statsd
respawn
start on startup
stop on shutdown
script
# We found $HOME is needed. Without it, we ran into problems

View File

@@ -0,0 +1,3 @@
#!/bin/sh
# Called by Upstart, /etc/init/statsd.conf
node /usr/share/statsd/stats.js /etc/statsd/localConfig.js 2>&1 >> /tmp/statsd.log

View File

@@ -1,12 +1,16 @@
maintainer "Mike Heffner"
maintainer_email "mike@librato.com"
name "statsd"
maintainer "AT&T Services, Inc."
maintainer_email "cookbooks@lists.tfoundry.com"
license "Apache 2.0"
description "Installs/Configures statsd"
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version "0.1.1"
version "0.1.4"
recipe "statsd", "Installs stats ruby gem"
recipe "statsd::server", "Configures statsd server"
depends "build-essential"
depends "git"
depends "nodejs", ">= 0.5.2"
%w{ ubuntu }.each do |os|
supports os
end
supports "ubuntu"
depends "build-essential"
depends "git"

View File

@@ -2,109 +2,19 @@
# Cookbook Name:: statsd
# Recipe:: default
#
# Copyright 2011, Librato, Inc.
# Copyright 2013, Scott Lampert
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "nodejs"
include_recipe "git"
git "/usr/share/statsd" do
repository node[:statsd][:repo]
revision node[:statsd][:revision]
action :sync
end
execute "install dependencies" do
command "npm install -d"
cwd "/usr/share/statsd"
end
backends = []
if node[:statsd][:graphite_enabled]
backends << "./backends/graphite"
end
node[:statsd][:backends].each do |k, v|
if v
name = "#{k}@#{v}"
else
name= k
end
execute "install npm module #{name}" do
command "npm install #{name}"
cwd "/usr/share/statsd"
end
backends << k
end
directory "/etc/statsd" do
action :create
end
user "statsd" do
comment "statsd"
system true
shell "/bin/false"
end
service "statsd" do
provider Chef::Provider::Service::Upstart
restart_command "stop statsd; start statsd"
start_command "start statsd"
stop_command "stop statsd"
supports :restart => true, :start => true, :stop => true
end
template "/etc/statsd/config.js" do
source "config.js.erb"
mode 0644
config_hash = {
:flushInterval => node[:statsd][:flush_interval_msecs],
:port => node[:statsd][:port],
:backends => backends
}.merge(node[:statsd][:extra_config])
if node[:statsd][:graphite_enabled]
config_hash[:graphitePort] = node[:statsd][:graphite_port]
config_hash[:graphiteHost] = node[:statsd][:graphite_host]
end
variables(:config_hash => config_hash)
notifies :restart, resources(:service => "statsd")
end
directory "/usr/share/statsd/scripts" do
action :create
end
template "/usr/share/statsd/scripts/start" do
source "upstart.start.erb"
mode 0755
notifies :restart, resources(:service => "statsd")
end
cookbook_file "/etc/init/statsd.conf" do
source "upstart.conf"
mode 0644
notifies :restart, resources(:service => "statsd")
end
bash "create_log_file" do
code <<EOH
touch #{node[:statsd][:log_file]} && chown statsd #{node[:statsd][:log_file]}
EOH
not_if {File.exist?(node[:statsd][:log_file])}
end
service "statsd" do
action [ :enable, :start ]
end
gem_package "statsd-ruby"

View File

@@ -0,0 +1,87 @@
#
# Cookbook Name:: statsd
# Recipe:: server
#
# Copyright 2013, Scott Lampert
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "build-essential"
include_recipe "git"
case node["platform"]
when "ubuntu", "debian"
package "nodejs"
package "debhelper"
statsd_version = node['statsd']['sha']
git ::File.join(node['statsd']['tmp_dir'], "statsd") do
repository node['statsd']['repo']
reference statsd_version
action :sync
notifies :run, "execute[build debian package]"
end
# Fix the debian changelog file of the repo
template ::File.join(node['statsd']['tmp_dir'], "statsd/debian/changelog") do
source "changelog.erb"
end
execute "build debian package" do
command "dpkg-buildpackage -us -uc"
cwd ::File.join(node['statsd']['tmp_dir'], "statsd")
creates ::File.join(node['statsd']['tmp_dir'], "statsd_#{node['statsd']['package_version']}_all.deb")
end
dpkg_package "statsd" do
action :install
source ::File.join(node['statsd']['tmp_dir'], "statsd_#{node['statsd']['package_version']}_all.deb")
end
when "redhat", "centos"
raise "No support for RedHat or CentOS (yet)."
end
template "/etc/statsd/localConfig.js" do
source "localConfig.js.erb"
mode 00644
notifies :restart, "service[statsd]"
end
cookbook_file "/usr/share/statsd/scripts/start" do
source "upstart.start"
owner "root"
group "root"
mode 00755
end
cookbook_file "/etc/init/statsd.conf" do
source "upstart.conf"
owner "root"
group "root"
mode 00644
end
user node['statsd']['user'] do
comment "statsd"
system true
shell "/bin/false"
end
service "statsd" do
provider Chef::Provider::Service::Upstart
action [ :enable, :start ]
end

View File

@@ -0,0 +1,5 @@
statsd (<%= node['statsd']['package_version'] %>) unstable; urgency=low
* Dummy changelog for dpkg build
-- Scott Lampert <scott@lampert.org> Thu, 14 Mar 2013 15:24:00 -0700

View File

@@ -1 +0,0 @@
<%= JSON.pretty_generate(@config_hash) %>

View File

@@ -0,0 +1,17 @@
/********************
AUTOGENERATED BY CHEF
*********************/
{
graphitePort: <%= node['statsd']['graphite_port'] %>
, graphiteHost: "<%= node['statsd']['graphite_host'] %>"
<% if node['statsd']['relay_server'] -%>
, address: "127.0.0.1"
, mgmt_address: "127.0.0.1"
<% else -%>
, address: "<%= node['statsd']['graphite_host'] %>"
, mgmt_address: "<%=node['statsd']['graphite_host'] %>"
<% end -%>
, port: <%= node['statsd']['port'] %>
, backends: [ "./backends/graphite" ]
}

View File

@@ -1,6 +0,0 @@
#!/bin/bash
# Called by Upstart, /etc/init/statsd.conf
export PATH=$PATH:/usr/local/bin
node /usr/share/statsd/stats.js /etc/statsd/config.js 2>&1 >> <%= node[:statsd][:log_file] %>

View File

@@ -0,0 +1,4 @@
{
"id": "ceilometer",
"ceilometer": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "cinder",
"cinder": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "glance",
"glance": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "horizon",
"horizon": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "keystone",
"keystone": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "nova",
"nova": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "quantum",
"quantum": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "openstack_identity_bootstrap_token",
"openstack_identity_bootstrap_token": "openstack_identity_bootstrap_token"
}

View File

@@ -0,0 +1,4 @@
{
"id": "quantum_metadata_secret",
"quantum_metadata_secret": "quantum_metadata_secret"
}

View File

@@ -0,0 +1,4 @@
{
"id": "openstack-block-storage",
"openstack-block-storage": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "openstack-compute",
"openstack-compute": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "openstack-image",
"openstack-image": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "openstack-network",
"openstack-network": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "admin",
"admin": "xcatcloud"
}

View File

@@ -0,0 +1,4 @@
{
"id": "guest",
"guest": "xcatcloud"
}

View File

@@ -0,0 +1,161 @@
#
# IBM(c) 2013 EPL license http://www.eclipse.org/legal/epl-v10.html
#
name "example_allinone"
description "Grizzly allinone environment file."
override_attributes(
"mysql" => {
"server_root_password" => "cluster",
"server_debian_password" => "cluster",
"server_repl_password" => "cluster",
"allow_remote_root" => true,
"root_network_acl" => "%"
},
"openstack" => {
"developer_mode" => true,
"secret"=>{
"key_path"=>"/etc/chef/encrypted_data_bag_secret"
},
"db"=>{
"bind_interface"=>"lo",
"compute"=>{
"host"=>"127.0.0.1"
},
"identity"=>{
"host"=>"127.0.0.1"
},
"image"=>{
"host"=>"127.0.0.1"
},
"network"=>{
"host"=>"127.0.0.1"
},
"volume"=>{
"host"=>"127.0.0.1"
},
"dashboard"=>{
"host"=>"127.0.0.1"
},
"metering"=>{
"host"=>"127.0.0.1"
}
},
"mq"=>{
"bind_interface"=>"lo"
},
"identity"=>{
"bind_interface"=>"lo",
"db"=>{
"username"=>"keystone",
"password"=> "keystone"
}
},
"endpoints"=>{
"identity-api"=>{
"host"=>"127.0.0.1",
},
"identity-admin"=>{
"host"=>"127.0.0.1",
},
"compute-api"=>{
"host"=>"127.0.0.1",
},
"compute-ec2-api"=>{
"host"=>"127.0.0.1",
},
"compute-ec2-admin"=>{
"host"=>"127.0.0.1",
},
"compute-xvpvnc"=>{
"host"=>"127.0.0.1",
},
"compute-novnc"=>{
"host"=>"127.0.0.1",
},
"network-api"=>{
"host"=>"127.0.0.1",
},
"image-api"=>{
"host"=>"127.0.0.1",
},
"image-registry"=>{
"host"=>"127.0.0.1",
},
"volume-api"=>{
"host"=>"127.0.0.1",
},
"metering-api"=>{
"host"=>"127.0.0.1",
}
},
"image" => {
"api"=>{
"bind_interface"=>"lo"
},
"registry"=>{
"bind_interface"=>"lo"
},
"image_upload" => false,
"upload_images" => ["cirros"],
"upload_image" => {
"cirros" => "https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
},
"identity_service_chef_role" => "allinone-compute"
},
"block-storage" => {
"keystone_service_chef_role" => "allinone-compute"
},
"dashboard" => {
"keystone_service_chef_role" => "allinone-compute",
"use_ssl" => "false"
},
"network" => {
"metadata"=>{
"nova_metadata_ip"=>"127.0.0.1"
},
"rabbit"=>{
"host"=>"127.0.0.1"
},
"api"=>{
"bind_interface"=>"lo"
},
"rabbit_server_chef_role" => "allinone-compute",
"l3"=>{
"external_network_bridge_interface"=>"eth0"
},
"openvswitch"=> {
"tenant_network_type"=>"vlan",
"network_vlan_ranges"=>"physnet1",
"bridge_mappings"=>"physnet1:eth2"
}
},
"compute" => {
"identity_service_chef_role" => "allinone-compute",
"rabbit"=>{
"host"=>"127.0.0.1"
},
"xvpvnc_proxy"=>{
"bind_interface"=>"eth0"
},
"novnc_proxy"=>{
"bind_interface"=>"eth0"
},
"network" => {
"service_type" => "quantum"
},
"config" => {
"ram_allocation_ratio" => 5.0
},
"libvirt" => {
"bind_interface"=>"lo",
"virt_type" => "qemu"
}
}
}
)

View File

@@ -0,0 +1,156 @@
#
# IBM(c) 2013 EPL license http://www.eclipse.org/legal/epl-v10.html
# http://docs.openstack.org/grizzly/openstack-network/admin/content/app_demo_routers_with_private_networks.html
#
#
name "example_per-tenant_router"
description "Grizzly environment file based on Per-tenant Routers with Private Networks"
override_attributes(
"mysql" => {
"server_root_password" => "cluster",
"server_debian_password" => "cluster",
"server_repl_password" => "cluster",
"allow_remote_root" => true,
"root_network_acl" => "%"
},
"openstack" => {
"developer_mode" => true,
"db"=>{
"bind_interface"=>"eth1",
"compute"=>{
"host"=>"11.1.0.107"
},
"identity"=>{
"host"=>"11.1.0.107"
},
"image"=>{
"host"=>"11.1.0.107"
},
"network"=>{
"host"=>"11.1.0.107"
},
"volume"=>{
"host"=>"11.1.0.107"
},
"dashboard"=>{
"host"=>"11.1.0.107"
},
"metering"=>{
"host"=>"11.1.0.107"
}
},
"mq"=>{
"bind_interface"=>"eth1"
},
"identity"=>{
"bind_interface"=>"eth1",
"db"=>{
"username"=>"keystone",
"password"=> "keystone"
}
},
"endpoints"=>{
"identity-api"=>{
"host"=>"11.1.0.107",
},
"identity-admin"=>{
"host"=>"11.1.0.107",
},
"compute-api"=>{
"host"=>"11.1.0.107",
},
"compute-ec2-api"=>{
"host"=>"11.1.0.107",
},
"compute-ec2-admin"=>{
"host"=>"11.1.0.107",
},
"compute-xvpvnc"=>{
"host"=>"11.1.0.107",
},
"compute-novnc"=>{
"host"=>"11.1.0.107",
},
"network-api"=>{
"host"=>"11.1.0.107",
},
"image-api"=>{
"host"=>"11.1.0.107",
},
"image-registry"=>{
"host"=>"11.1.0.107",
},
"volume-api"=>{
"host"=>"11.1.0.107",
},
"metering-api"=>{
"host"=>"11.1.0.107",
}
},
"image" => {
"api"=>{
"bind_interface"=>"eth1"
},
"registry"=>{
"bind_interface"=>"eth1"
},
"image_upload" => false,
"upload_images" => ["cirros"],
"upload_image" => {
"cirros" => "https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
},
},
"dashboard" => {
"use_ssl" => "false"
},
"network" => {
"metadata"=>{
"nova_metadata_ip"=>"11.1.0.107"
},
"rabbit"=>{
"host"=>"11.1.0.107"
},
"api"=>{
"bind_interface"=>"eth1"
},
"l3"=>{
"external_network_bridge_interface"=>"eth0"
},
"allow_overlapping_ips" => "True",
"use_namespaces" => "True",
"openvswitch"=> {
"tenant_network_type"=>"gre",
"tunnel_id_ranges"=>"1:1000",
"enable_tunneling"=>"True",
"local_ip_interface"=>"eth2"
}
},
"compute" => {
"identity_service_chef_role" => "os-compute-single-controller",
"rabbit"=>{
"host"=>"11.1.0.107"
},
"xvpvnc_proxy"=>{
"bind_interface"=>"eth0"
},
"novnc_proxy"=>{
"bind_interface"=>"eth0"
},
"network" => {
"service_type" => "quantum",
},
"config" => {
"ram_allocation_ratio" => 5.0
},
"libvirt" => {
"bind_interface"=>"eth1",
"virt_type" => "qemu"
}
}
}
)

View File

@@ -0,0 +1,6 @@
name "os-block-storage-volume"
description "OpenStack Block Storage volume service"
run_list(
"role[os-base]",
"recipe[openstack-block-storage::volume]"
)

View File

@@ -2,5 +2,7 @@ name "os-block-storage"
description "Configures OpenStack block storage, configured by attributes."
run_list(
"role[os-base]",
"recipe[openstack-block-storage]"
"role[os-block-storage-api]",
"role[os-block-storage-scheduler]",
"role[os-block-storage-volume]",
)

View File

@@ -6,6 +6,7 @@ run_list(
"role[os-ops-messaging]",
"role[os-identity]",
"role[os-network-server]",
"role[os-network-dhcp-agent]",
"role[os-compute-scheduler]",
"role[os-compute-api]",
"role[os-compute-cert]",

View File

@@ -2,5 +2,5 @@ name "os-object-storage-account"
description "OpenStack object storage account service"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::account]"
"recipe[openstack-object-storage::account-server]"
)

View File

@@ -2,5 +2,5 @@ name "os-object-storage-container"
description "OpenStack object storage container service"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::container]"
"recipe[openstack-object-storage::container-server]"
)

View File

@@ -2,5 +2,5 @@ name "os-object-storage-management"
description "OpenStack object storage management service"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::management]"
"recipe[openstack-object-storage::management-server]"
)

View File

@@ -2,5 +2,5 @@ name "os-object-storage-object"
description "OpenStack object storage object service"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::object]"
"recipe[openstack-object-storage::object-server]"
)

View File

@@ -2,5 +2,5 @@ name "os-object-storage-proxy"
description "OpenStack object storage proxy service"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::proxy]"
"recipe[openstack-object-storage::proxy-server]"
)

View File

@@ -0,0 +1,6 @@
name "os-object-storage-setup"
description "OpenStack object storage server responsible for generating initial settings"
run_list(
"role[os-base]",
"recipe[openstack-object-storage::setup]"
)

View File

@@ -2,5 +2,10 @@ name "os-object-storage"
description "OpenStack object storage roll-up role"
run_list(
"role[os-base]",
"recipe[openstack-object-storage]"
"role[os-object-storage-setup]",
"role[os-object-storage-management]",
"role[os-object-storage-proxy]",
"role[os-object-storage-object]",
"role[os-object-storage-container]",
"role[os-object-storage-account]"
)

View File

@@ -179,6 +179,10 @@ sub process_request
$callback->($rsp);
next;
}
unless ( -d "$repos/environments") {
mkdir("$repos/environments", 0777);
}
my $tmperr = cloudvars(
$tmplfile,

View File

@@ -1,4 +1,4 @@
#!/bin/sh -vx
#!/bin/sh
bridge_name="br-ex"
@@ -19,7 +19,7 @@ str_value=$(hashget hash_defined_nics $pubinterface)
old_ifs=$IFS
IFS=$','
array_temp=($str_value)
FS=$old_ifs
IFS=$old_ifs
if [ -n "${array_temp[1]}" ];then
str_nic_type=`echo ${array_temp[1]} | tr "[A-Z]" "[a-z]"`
@@ -35,7 +35,27 @@ else
fi
configeth $bridge_name ${array_temp[0]} ${array_temp[2]}
str_network=$(checknetwork ${array_temp[0]})
if [ -z "$str_network" ];then
logger -t xcat -p local4.info "configbr-ex: could not find the network for $bridge_name which is based on $pubinterface. Please check the networks and nics tables."
echo "configbr-ex: could not find the network for $bridge_name which is based on $pubinterface. Please check the networks and nics tables."
exit -1
fi
#configeth $bridge_name ${array_temp[0]} ${array_temp[2]}
configeth $bridge_name ${array_temp[0]} $str_network
if [ $? -ne 0 ];then
logger -t xcat -p local4.info "configbr-ex failed to configure $bridge_name : configeth $bridge_name ${array_temp[0]} $str_network"
echo "confignics: configbr-ex failed to configure $bridge_name : configeth $bridge_name ${array_temp[0]} $str_network"
exit -1
fi
. ./configgw $bridge_name
if [ $? -ne 0 ];then
logger -t xcat -p local4.info "configgw failed to configure gateway for $bridge_name."
echo "configgw failed to configure gateway for $bridge_name."
exit -1
fi
exit 0

View File

@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/sh
# IBM(c) 2007 EPL license http://www.eclipse.org/legal/epl-v10.html
# This script, ("loadclouddata"), is a sample xCAT post script for
@@ -38,17 +38,20 @@ hkeys() {
set | grep -o "^HASH${1}[[:alnum:]]*=" | sed -re "s/^HASH${1}(.*)=/\\1/g"
}
HOME='/root/'
export HOME
#flags
no_args=0
run_all=0
only_load_cookbook=0
only_load_role=0
only_load_clouddata=0
# develop mode. 0 -- false(customer mode); 1 -- true(develop mode)
devmode=1
if [ $# -eq 0 ]
then
no_args=1
run_all=1
else
for arg in "$@"
do
@@ -61,15 +64,30 @@ else
elif [ "$arg" = "--clouddata" ]
then
only_load_clouddata=1
elif [ "$arg" = "--nodevmode" ]
then
devmode=0
run_all=1
else
errmsg="no argument $arg in the loadchefdata script"
logger -t xcat -p local4.err $errmsg
echo $errmsg
logger -t xcat -p local4.err "$errmsg"
echo "$errmsg"
exit 1
fi
done
fi
if [ $devmode -eq 0 ]
then
if [ $only_load_cookbook -eq 1 -o $only_load_role -eq 1 -o $only_load_clouddata -eq 1 ]
then
errmsg="'--nodevmode' could not be used with other arguments"
logger -t xcat -p local4.err "$errmsg"
echo "$errmsg"
exit 1
fi
fi
if [ -z $CLOUDLIST ]
then
errmsg="Error! No Cloud name is assigned to the chef-client of the chef-server $NODE. Please check the cloud table."
@@ -89,7 +107,7 @@ then
fi
cd $REPOSITORY
if [ $no_args -eq 1 -o $only_load_cookbook -eq 1 ]
if [ $run_all -eq 1 -o $only_load_cookbook -eq 1 ]
then
# upload coobooks
knife cookbook bulk delete '.*' -y > /dev/null 2>&1
@@ -103,7 +121,7 @@ then
fi
fi
if [ $no_args -eq 1 -o $only_load_role -eq 1 ]
if [ $run_all -eq 1 -o $only_load_role -eq 1 ]
then
# upload roles
knife role bulk delete '.*' -y > /dev/null 2>&1
@@ -119,7 +137,7 @@ then
fi
if [ $no_args -eq 1 -o $only_load_clouddata -eq 1 ]
if [ $run_all -eq 1 -o $only_load_clouddata -eq 1 ]
then
if [ -z $CFGCLIENTLIST ]
@@ -164,7 +182,8 @@ then
for client in $CFGCLIENTLIST
do
echo "Configuring the chef-client node $client on the chef-server $NODE."
c_fullname="$client.$DOMAIN"
#c_fullname="$client.$DOMAIN"
c_fullname=$client
knife client delete -y $c_fullname > /dev/null 2>&1
knife node delete -y $c_fullname > /dev/null 2>&1
@@ -188,11 +207,18 @@ then
exit 1
fi
roles=`echo $roles | sed -e 's/ /,/g'`
r="";
for onerole in $roles
do
r+="role[$onerole],"
done
# assign the role for the chef node
knife node run_list add $c_fullname "role[$roles]"
knife node run_list add $c_fullname "$r"
if [ $? != 0 ]
then
errmsg="Failed to run knife node run_list add $client 'role[$roles]' on the chef-server $NODE."
errmsg="Failed to run knife node run_list add $client '$r' on the chef-server $NODE."
logger -t xcat -p local4.err $errmsg
echo $errmsg
exit 1
@@ -219,10 +245,76 @@ then
done
IFS=$OIFS
fi
IFS=$OIFS
if [ $devmode -eq 0 ]
then
bags=(db_passwords secrets service_passwords user_passwords)
if [ ! -e "$REPOSITORY/databags" ]
then
mkdir -p "$REPOSITORY/databags"
fi
databag_key="$REPOSITORY/databags/openstack_databag_key"
openssl rand -base64 512 > $databag_key
if [ $? != 0 ]
then
errmsg="Failed to use openssl to generate the data bag key on $NODE. Please check whether openssl is installed."
logger -t xcat -p local4.err "$errmsg"
echo "$errmsg"
exit 1
fi
if [ ! -e "/etc/chef/" ]
then
mkdir -p "/etc/chef/"
fi
#for knife command
cp -f $databag_key "/etc/chef/encrypted_data_bag_secret"
#for other chef-client nodes
cp -f $databag_key "/etc/chef-server/encrypted_data_bag_secret"
# add the path of encrypted_data_bag_secret to knife.rb file
if ! grep -w -q 'encrypted_data_bag_secret' /root/.chef/knife.rb
then
echo "encrypted_data_bag_secret '/etc/chef/openstack_encrypted_data_bag_secret'" >> /root/.chef/knife.rb
fi
# delete the old databags
knife data bag list | xargs -i knife data bag delete -y {}
# create databags and upload items
for bag in ${bags[@]}
do
bagpath="$REPOSITORY/databags/$bag"
if [ ! -e "$bagpath" ]
then
errmsg="$bag doesn't exist in $REPOSITORY/databags. Please make sure the databags are in the directory $REPOSITORY/databags."
logger -t xcat -p local4.err "$errmsg"
echo "$errmsg"
exit 1
fi
knife data bag create --secret-file $databag_key $bag
items=$(ls $bagpath)
for item in $items
do
knife data bag from file $bag $REPOSITORY/databags/$bag/$item --secret-file $databag_key
if [ $? != 0 ]
then
errmsg="Failed to run knife data bag from file $bag $REPOSITORY/databags/$bag/$item --secret-file $databag_key"
logger -t xcat -p local4.err "$errmsg"
echo "$errmsg"
exit 1
fi
done
done
fi
exit 0

View File

@@ -105,7 +105,10 @@ override_attributes(
"identity_service_chef_role" => "allinone-compute"
},
"block-storage" => {
"keystone_service_chef_role" => "allinone-compute"
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"keystone_service_chef_role" => "allinone-compute"
},
"dashboard" => {
"keystone_service_chef_role" => "allinone-compute",
@@ -129,7 +132,7 @@ override_attributes(
"openvswitch"=> {
"tenant_network_type"=>"vlan",
"network_vlan_ranges"=>"physnet1",
"bridge_mappings"=>"physnet1:#TABLE:clouds:name=$CLOUD:datainterface#"
"bridge_mappings"=>"physnet1:br-#TABLE:clouds:name=$CLOUD:datainterface#"
}
},
"compute" => {

View File

@@ -0,0 +1,164 @@
#
# IBM(c) 2013 EPL license http://www.eclipse.org/legal/epl-v10.html
#
name "$CLOUD"
description "Grizzly allinone environment file."
override_attributes(
"mysql" => {
"server_root_password" => "cluster",
"server_debian_password" => "cluster",
"server_repl_password" => "cluster",
"allow_remote_root" => true,
"root_network_acl" => "%"
},
"openstack" => {
"developer_mode" => false,
"secret"=>{
"key_path"=>"/etc/chef/encrypted_data_bag_secret"
},
"db"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"compute"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"identity"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"image"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"network"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"volume"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"dashboard"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"metering"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
}
},
"mq"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"identity"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"db"=>{
"username"=>"keystone",
"password"=> "keystone"
}
},
"endpoints"=>{
"identity-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"identity-admin"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-ec2-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-ec2-admin"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-xvpvnc"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-novnc"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"network-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"image-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"image-registry"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"volume-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"metering-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
}
},
"image" => {
"api"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"registry"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"image_upload" => false,
"upload_images" => ["cirros"],
"upload_image" => {
"cirros" => "https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
},
"identity_service_chef_role" => "allinone-compute"
},
"block-storage" => {
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"keystone_service_chef_role" => "allinone-compute"
},
"dashboard" => {
"keystone_service_chef_role" => "allinone-compute",
"use_ssl" => "false"
},
"network" => {
"metadata"=>{
"nova_metadata_ip"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"api"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"rabbit_server_chef_role" => "allinone-compute",
"l3"=>{
"external_network_bridge_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"openvswitch"=> {
"tenant_network_type"=>"vlan",
"network_vlan_ranges"=>"physnet1",
"bridge_mappings"=>"physnet1:br-#TABLE:clouds:name=$CLOUD:datainterface#"
}
},
"compute" => {
"identity_service_chef_role" => "allinone-compute",
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"xvpvnc_proxy"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"novnc_proxy"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"network" => {
"service_type" => "quantum"
},
"config" => {
"ram_allocation_ratio" => 5.0
},
"libvirt" => {
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"virt_type" => "#TABLE:clouds:name=$CLOUD:virttype#"
}
}
}
)

View File

@@ -0,0 +1,178 @@
#
# IBM(c) 2013 EPL license http://www.eclipse.org/legal/epl-v10.html
#
#
# When using this template, you should change the proxy-cidr and object-cidr
# according to your actual network environment!!!!!!!!
#
name "$CLOUD"
description "Grizzly keystone+swift allinone environment file."
override_attributes(
"mysql" => {
"server_root_password" => "cluster",
"server_debian_password" => "cluster",
"server_repl_password" => "cluster",
"allow_remote_root" => true,
"root_network_acl" => "%"
},
"swift" => {
"authmode" => "keystone",
"authkey" => "swift",
"proxy_server_chef_role"=>"os-object-storage",
"network" => {
"proxy-bind-ip" => "#TABLE:clouds:name=$CLOUD:hostip#",
"proxy-cidr" => "11.0.0.0/8",
"account-bind-ip" => "#TABLE:clouds:name=$CLOUD:hostip#",
"container-bind-ip" => "#TABLE:clouds:name=$CLOUD:hostip#",
"object-bind-ip" => "#TABLE:clouds:name=$CLOUD:hostip#",
"object-cidr" => "11.0.0.0/8"
}
},
"openstack" => {
"developer_mode" => true,
"db"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"compute"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"identity"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"image"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"network"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"volume"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"dashboard"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"metering"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
}
},
"mq"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"identity"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"db"=>{
"username"=>"keystone",
"password"=> "keystone"
}
},
"endpoints"=>{
"identity-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"identity-admin"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-ec2-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-ec2-admin"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-xvpvnc"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"compute-novnc"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"network-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"image-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"image-registry"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"volume-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
},
"metering-api"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#",
}
},
"image" => {
"api"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"registry"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"image_upload" => false,
"upload_images" => ["cirros"],
"upload_image" => {
"cirros" => "https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
},
"identity_service_chef_role" => "allinone-compute"
},
"block-storage" => {
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"keystone_service_chef_role" => "allinone-compute"
},
"dashboard" => {
"keystone_service_chef_role" => "allinone-compute",
"use_ssl" => "false"
},
"network" => {
"metadata"=>{
"nova_metadata_ip"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"api"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#"
},
"rabbit_server_chef_role" => "allinone-compute",
"l3"=>{
"external_network_bridge_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"openvswitch"=> {
"tenant_network_type"=>"vlan",
"network_vlan_ranges"=>"physnet1",
"bridge_mappings"=>"physnet1:br-#TABLE:clouds:name=$CLOUD:datainterface#"
}
},
"compute" => {
"identity_service_chef_role" => "allinone-compute",
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
},
"xvpvnc_proxy"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"novnc_proxy"=>{
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:pubinterface#"
},
"network" => {
"service_type" => "quantum"
},
"config" => {
"ram_allocation_ratio" => 5.0
},
"libvirt" => {
"bind_interface"=>"#TABLE:clouds:name=$CLOUD:mgtinterface#",
"virt_type" => "#TABLE:clouds:name=$CLOUD:virttype#"
}
}
}
)

View File

@@ -105,6 +105,11 @@ override_attributes(
"cirros" => "https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
},
},
"block-storage" => {
"rabbit"=>{
"host"=>"#TABLE:clouds:name=$CLOUD:hostip#"
}
},
"dashboard" => {
"use_ssl" => "false"
},

Some files were not shown because too many files have changed in this diff Show More