2010-02-08 14:29:47 +00:00
#IBM(c) 2007 EPL license http://www.eclipse.org/legal/epl-v10.html
2009-09-02 17:20:28 +00:00
#TODO:
2009-09-09 20:08:51 +00:00
#MEMLEAK fix
# see NodeRange.pm for notes about how to produce a memory leak
# xCAT as it stands at this moment shouldn't leak anymore due to what is
# described there, but that only hides from the real problem and the leak will
# likely crop up if future architecture changes happen
# in summary, a created Table object without benefit of db worker thread
# to abstract its existance will consume a few kilobytes of memory
# that never gets reused
2009-09-02 17:20:28 +00:00
# just enough notes to remind me of the design that I think would allow for
# -cache to persist so long as '_build_cache' calls concurrently stack (for NodeRange interpretation mainly) (done)
# -Allow plugins to define a staleness threshold for getNodesAttribs freshness (complicated enough to postpone...)
# so that actions requested by disparate managed nodes may aggregate in SQL calls
2012-05-12 11:54:49 +00:00
# cache lifetime is no longer determined strictly by function duration
# now it can live up to 5 seconds. However, most calls will ignore the cache unless using a special option.
# Hmm, potential issue, getNodesAttribs might return up to 5 second old data even if caller expects brand new data
2009-09-02 17:20:28 +00:00
# if called again, decrement again and clear cache
# for getNodesAttribs, we can put a parameter to request allowable staleneess
# if the cachestamp is too old, build_cache is called
# in this mode, 'use_cache' is temporarily set to 1, regardless of
# potential other consumers (notably, NodeRange)
2008-07-30 12:55:42 +00:00
#perl errors/and warnings are not currently wrapped.
# This probably will be cleaned
2007-10-26 22:44:33 +00:00
#up
#Some known weird behaviors
#creating new sqlite db files when only requested to read non-existant table, easy to fix,
#class xcattable
2009-10-13 19:37:41 +00:00
#FYI on emulated AutoCommit:
#SQLite specific behavior has Table layer implementing AutoCommit. There
#is a significant limitation, 'rollback' may not roll all the way back
#if an intermediate transaction occured on the same table
#TODO: short term, have tabutils implement it's own rollback (the only consumer)
#TODO: longer term, either figure out a way to properly implement it or
# document it as a limitation for SQLite configurations
2007-10-26 22:44:33 +00:00
package xCAT::Table ;
2009-08-13 14:32:22 +00:00
use xCAT::MsgUtils ;
2008-01-14 22:19:17 +00:00
use Sys::Syslog ;
2009-08-04 21:10:32 +00:00
use Storable qw/freeze thaw/ ;
use IO::Socket ;
2011-04-11 13:49:13 +00:00
#use Data::Dumper;
2010-01-22 20:08:33 +00:00
use POSIX qw/WNOHANG/ ;
2010-08-06 12:54:41 +00:00
use Time::HiRes qw ( sleep ) ;
2010-08-24 20:46:22 +00:00
use Safe ;
my $ evalcpt = new Safe ;
2007-12-11 19:15:28 +00:00
BEGIN
{
2008-01-14 16:58:23 +00:00
$ ::XCATROOT = $ ENV { 'XCATROOT' } ? $ ENV { 'XCATROOT' } : - d '/opt/xcat' ? '/opt/xcat' : '/usr' ;
2007-12-11 19:15:28 +00:00
}
2009-08-03 13:07:29 +00:00
# if AIX - make sure we include perl 5.8.2 in INC path.
# Needed to find perl dependencies shipped in deps tarball.
if ( $^O =~ /^aix/i ) {
2012-08-08 17:27:50 +00:00
unshift ( @ INC , qw( /usr/opt/perl5/lib/5.8.2/aix-thread-multi /usr/opt/perl5/lib/5.8.2 /usr/opt/perl5/lib/site_perl/5.8.2/aix-thread-multi /usr/opt/perl5/lib/site_perl/5.8.2 ) ) ;
2009-08-03 13:07:29 +00:00
}
2007-12-11 19:15:28 +00:00
use lib "$::XCATROOT/lib/perl" ;
2008-06-30 13:51:44 +00:00
my $ cachethreshold = 16 ; #How many nodes in 'getNodesAttribs' before switching to full DB retrieval
2012-03-25 15:15:58 +00:00
#TODO: dynamic tracking/adjustment, the point where cache is cost effective differs based on overall db size
2007-10-26 22:44:33 +00:00
use DBI ;
2010-01-19 19:34:46 +00:00
$ DBI:: dbi_debug = 9 ; # increase the debug output
2007-10-26 22:44:33 +00:00
2008-09-07 21:08:13 +00:00
use strict ;
2007-11-13 21:38:32 +00:00
use Scalar::Util qw/weaken/ ;
2008-04-05 14:53:35 +00:00
require xCAT::Schema ;
require xCAT::NodeRange ;
2007-10-26 22:44:33 +00:00
use Text::Balanced qw( extract_bracketed ) ;
2008-04-05 14:53:35 +00:00
require xCAT::NotifHandler ;
2007-10-26 22:44:33 +00:00
2009-06-04 19:12:04 +00:00
my $ dbworkerpid ; #The process id of the database worker
my $ dbworkersocket ;
2010-07-08 20:49:12 +00:00
my $ dbsockpath = "/tmp/xcat/dbworker.sock." . $$ ;
2009-08-04 18:38:08 +00:00
my $ exitdbthread ;
2009-09-03 18:00:47 +00:00
my $ dbobjsforhandle ;
2010-02-17 22:19:39 +00:00
my $ intendedpid ;
2009-08-04 18:38:08 +00:00
2009-08-04 21:10:32 +00:00
sub dbc_call {
my $ self = shift ;
my $ function = shift ;
my @ args = @ _ ;
my $ request = {
function = > $ function ,
tablename = > $ self - > { tabname } ,
autocommit = > $ self - > { autocommit } ,
args = > \ @ args ,
} ;
return dbc_submit ( $ request ) ;
}
2009-08-04 18:38:08 +00:00
sub dbc_submit {
my $ request = shift ;
2009-08-04 21:10:32 +00:00
$ request - > { 'wantarray' } = wantarray ( ) ;
2009-08-04 18:38:08 +00:00
my $ data = freeze ( $ request ) ;
2009-08-09 15:48:38 +00:00
$ data . = "\nENDOFFREEZEQFVyo4Cj6Q0v\n" ;
2009-10-26 09:56:54 +00:00
my $ clisock ;
2010-08-06 12:54:41 +00:00
my $ tries = 300 ;
while ( $ tries and ! ( $ clisock = IO::Socket::UNIX - > new ( Peer = > $ dbsockpath , Type = > SOCK_STREAM , Timeout = > 120 ) ) ) {
2009-10-26 09:56:54 +00:00
#print "waiting for clisock to be available\n";
sleep 0.1 ;
2010-08-06 12:54:41 +00:00
$ tries - - ;
2009-10-26 09:56:54 +00:00
}
2009-08-04 21:10:32 +00:00
unless ( $ clisock ) {
use Carp qw/cluck/ ;
cluck ( ) ;
}
2009-08-04 18:38:08 +00:00
print $ clisock $ data ;
$ data = "" ;
2009-08-09 15:48:38 +00:00
my $ lastline = "" ;
2011-06-07 17:00:59 +00:00
while ( read ( $ clisock , $ lastline , 32768 ) ) { #$lastline ne "ENDOFFREEZEQFVyo4Cj6Q0j\n" and $lastline ne "*XCATBUGDETECTED*76e9b54341\n") { #index($lastline,"ENDOFFREEZEQFVyo4Cj6Q0j") < 0) {
# $lastline = <$clisock>;
2009-09-30 17:42:21 +00:00
$ data . = $ lastline ;
}
2011-06-07 17:00:59 +00:00
close ( $ clisock ) ;
2011-06-07 18:11:48 +00:00
if ( $ lastline =~ m/\*XCATBUGDETECTED\*76e9b54341\n\z/ ) { #if it was an error
2009-09-30 17:42:21 +00:00
#in the midst of the operation, die like it used to die
my $ err ;
$ data =~ /\*XCATBUGDETECTED\*:(.*):\*XCATBUGDETECTED\*/s ;
$ err = $ 1 ;
die $ err ;
2009-08-04 18:38:08 +00:00
}
2009-08-04 21:10:32 +00:00
my @ returndata = @ { thaw ( $ data ) } ;
if ( wantarray ) {
return @ returndata ;
} else {
return $ returndata [ 0 ] ;
}
2009-08-04 18:38:08 +00:00
}
2009-08-04 21:10:32 +00:00
sub shut_dbworker {
$ dbworkerpid = 0 ; #For now, just turn off usage of the db worker
#This was created as the monitoring framework shutdown code otherwise seems to have a race condition
#this may incur an extra db handle per service node to tolerate shutdown scenarios
}
2009-08-04 18:38:08 +00:00
sub init_dbworker {
#create a db worker process
2009-10-13 19:40:51 +00:00
#First, release all non-db-worker owned db handles (will recreate if we have to)
foreach ( values % { $ ::XCAT_DBHS } )
{ #@{$drh->{ChildHandles}}) {
if ( $ _ ) { $ _ - > disconnect ( ) ; }
$ _ - > { InactiveDestroy } = 1 ;
undef $ _ ;
}
2009-10-26 09:53:35 +00:00
$ ::XCAT_DBHS = { } ;
$ dbobjsforhandle = { } ; #TODO: It's not said explicitly, but this means an
#existing TABLE object is useless if going into db worker. Table objects
#must be recreated after the transition. Only xcatd should have to
#worry about it. This may warrant being done better, making a Table
#object meaningfully survive in much the same way it survives a DB handle
#migration in handle_dbc_request
2009-10-13 19:40:51 +00:00
2009-08-04 18:38:08 +00:00
$ dbworkerpid = fork ;
2011-02-02 20:51:58 +00:00
xCAT::NodeRange:: reset_db ( ) ; #do in both processes, to be sure
2009-08-04 18:38:08 +00:00
unless ( defined $ dbworkerpid ) {
die "Error spawining database worker" ;
}
unless ( $ dbworkerpid ) {
2010-02-17 22:19:39 +00:00
$ intendedpid = $$ ;
2010-01-22 20:08:33 +00:00
$ SIG { CHLD } = sub { while ( waitpid ( - 1 , WNOHANG ) > 0 ) { } } ; #avoid zombies from notification framework
2009-08-04 18:38:08 +00:00
#This process is the database worker, it's job is to manage database queries to reduce required handles and to permit cross-process caching
2009-08-06 12:44:22 +00:00
$ 0 = "xcatd: DB Access" ;
2009-08-04 18:38:08 +00:00
use File::Path ;
2009-08-06 12:58:14 +00:00
mkpath ( '/tmp/xcat/' ) ;
2009-08-04 18:38:08 +00:00
use IO::Socket ;
$ SIG { TERM } = $ SIG { INT } = sub {
$ exitdbthread = 1 ;
$ SIG { ALRM } = sub { exit 0 ; } ;
alarm ( 10 ) ;
} ;
unlink ( $ dbsockpath ) ;
2009-08-04 21:10:32 +00:00
umask ( 0077 ) ;
2009-08-22 13:18:55 +00:00
$ dbworkersocket = IO::Socket::UNIX - > new ( Local = > $ dbsockpath , Type = > SOCK_STREAM , Listen = > 8192 ) ;
2009-08-04 21:10:32 +00:00
unless ( $ dbworkersocket ) {
die $! ;
}
2009-08-04 18:38:08 +00:00
my $ currcon ;
my $ clientset = new IO:: Select ;
$ clientset - > add ( $ dbworkersocket ) ;
2010-04-01 20:02:08 +00:00
#setup signal in NotifHandler so that the cache can be updated
xCAT::NotifHandler:: setup ( $$ , 0 ) ;
2009-08-04 18:38:08 +00:00
while ( not $ exitdbthread ) {
2009-08-13 14:32:22 +00:00
eval {
my @ ready_socks = $ clientset - > can_read ;
foreach $ currcon ( @ ready_socks ) {
if ( $ currcon == $ dbworkersocket ) { #We have a new connection to register
my $ dbconn = $ currcon - > accept ;
if ( $ dbconn ) {
$ clientset - > add ( $ dbconn ) ;
}
} else {
2009-09-30 17:42:21 +00:00
eval {
handle_dbc_conn ( $ currcon , $ clientset ) ;
} ;
if ( $@ ) {
my $ err = $@ ;
xCAT::MsgUtils - > message ( "S" , "xcatd: possible BUG encountered by xCAT DB worker " . $ err ) ;
if ( $ currcon ) {
eval { #avoid hang by allowin client to die too
print $ currcon "*XCATBUGDETECTED*:$err:*XCATBUGDETECTED*\n" ;
print $ currcon "*XCATBUGDETECTED*76e9b54341\n" ;
2012-04-25 18:24:40 +00:00
$ clientset - > remove ( $ currcon ) ;
close ( $ currcon ) ;
2009-09-30 17:42:21 +00:00
} ;
}
}
2009-08-04 18:38:08 +00:00
}
}
2009-08-13 14:32:22 +00:00
} ;
2009-09-30 17:42:21 +00:00
if ( $@ ) { #this should never be reached, but leave it intact just in case
my $ err = $@ ;
2012-06-21 15:19:06 +00:00
eval { xCAT::MsgUtils - > message ( "S" , "xcatd: possible BUG encountered by xCAT DB worker " . $ err ) ; } ;
2009-08-04 18:38:08 +00:00
}
2010-02-17 22:19:39 +00:00
if ( $ intendedpid != $$ ) { #avoid redundant fork
2012-06-21 15:19:06 +00:00
eval { xCAT::MsgUtils - > message ( "S" , "Pid $$ shutting itself down because only pid $intendedpid is permitted to be in this area" ) ; } ;
2010-02-17 22:19:39 +00:00
exit ( 0 ) ;
}
2009-08-04 18:38:08 +00:00
}
2009-08-04 21:10:32 +00:00
close ( $ dbworkersocket ) ;
unlink ( $ dbsockpath ) ;
exit 0 ;
2009-08-04 18:38:08 +00:00
}
2009-08-06 12:44:22 +00:00
return $ dbworkerpid ;
2009-08-04 18:38:08 +00:00
}
sub handle_dbc_conn {
my $ client = shift ;
my $ clientset = shift ;
my $ data ;
if ( $ data = <$client> ) {
2009-08-09 15:48:38 +00:00
my $ lastline ;
while ( $ lastline ne "ENDOFFREEZEQFVyo4Cj6Q0v\n" ) { #$data !~ /ENDOFFREEZEQFVyo4Cj6Q0v/) {
$ lastline = <$client> ;
$ data . = $ lastline ;
2009-08-04 18:38:08 +00:00
}
my $ request = thaw ( $ data ) ;
2009-08-04 21:10:32 +00:00
my $ response ;
my @ returndata ;
if ( $ request - > { 'wantarray' } ) {
@ returndata = handle_dbc_request ( $ request ) ;
} else {
@ returndata = ( scalar ( handle_dbc_request ( $ request ) ) ) ;
}
$ response = freeze ( \ @ returndata ) ;
2011-06-07 17:00:59 +00:00
# $response .= "\nENDOFFREEZEQFVyo4Cj6Q0j\n";
2009-08-04 18:38:08 +00:00
print $ client $ response ;
2011-06-07 17:00:59 +00:00
$ clientset - > remove ( $ client ) ;
close ( $ client ) ;
2009-08-04 18:38:08 +00:00
} else { #Connection terminated, clean up
$ clientset - > remove ( $ client ) ;
close ( $ client ) ;
}
}
my % opentables ; #USED ONLY BY THE DB WORKER TO TRACK OPEN DATABASES
sub handle_dbc_request {
my $ request = shift ;
my $ functionname = $ request - > { function } ;
my $ tablename = $ request - > { tablename } ;
my @ args = @ { $ request - > { args } } ;
my $ autocommit = $ request - > { autocommit } ;
2009-09-03 16:33:55 +00:00
my $ dbindex ;
2009-10-26 09:53:35 +00:00
foreach $ dbindex ( keys % { $ ::XCAT_DBHS } ) { #Go through the current open DB handles
unless ( $ ::XCAT_DBHS - > { $ dbindex } ) { next ; } #If we have a stale dbindex entry skip it (should no longer happen with additions to init_dbworker
2009-09-03 18:00:47 +00:00
unless ( $ ::XCAT_DBHS - > { $ dbindex } and $ ::XCAT_DBHS - > { $ dbindex } - > ping ) {
2009-10-26 09:53:35 +00:00
#We have a database that we were unable to reach, migrate database
#handles out from under table objects
my @ afflictedobjs = ( ) ; #Get the list of objects whose database handle needs to be replaced
if ( defined $ dbobjsforhandle - > { $ ::XCAT_DBHS - > { $ dbindex } } ) {
@ afflictedobjs = @ { $ dbobjsforhandle - > { $ ::XCAT_DBHS - > { $ dbindex } } } ;
} else {
die "DB HANDLE TRACKING CODE HAS A BUG" ;
}
my $ oldhandle = $ ::XCAT_DBHS - > { $ dbindex } ; #store old handle off
$ ::XCAT_DBHS - > { $ dbindex } = $ ::XCAT_DBHS - > { $ dbindex } - > clone ( ) ; #replace broken db handle with nice, new, working one
2011-11-15 15:51:21 +00:00
unless ( $ ::XCAT_DBHS - > { $ dbindex } ) { #this means the clone failed
#most likely result is the DB is down
#restore the old broken handle
#so that future recovery attempts have a shot
#a broken db handle we can recover, no db handle we cannot
$ ::XCAT_DBHS - > { $ dbindex } = $ oldhandle ;
return undef ;
}
2009-10-26 09:53:35 +00:00
$ dbobjsforhandle - > { $ ::XCAT_DBHS - > { $ dbindex } } = $ dbobjsforhandle - > { $ oldhandle } ; #Move the map of depenednt objects to the new handle
foreach ( @ afflictedobjs ) { #migrate afflicted objects to the new DB handle
2009-09-03 16:33:55 +00:00
$$ _ - > { dbh } = $ ::XCAT_DBHS - > { $ dbindex } ;
}
2009-10-26 09:53:35 +00:00
delete $ dbobjsforhandle - > { $ oldhandle } ; #remove the entry for the stale handle
$ oldhandle - > disconnect ( ) ; #free resources associated with dead handle
2009-09-03 16:33:55 +00:00
}
}
2009-08-04 18:38:08 +00:00
if ( $ functionname eq 'new' ) {
unless ( $ opentables { $ tablename } - > { $ autocommit } ) {
2009-08-04 21:10:32 +00:00
shift @ args ; #Strip repeat class stuff
2009-08-04 18:38:08 +00:00
$ opentables { $ tablename } - > { $ autocommit } = xCAT::Table - > new ( @ args ) ;
}
if ( $ opentables { $ tablename } - > { $ autocommit } ) {
2012-11-20 14:45:16 +00:00
if ( $ opentables { $ tablename } - > { $ autocommit ^ 1 } ) {
$ opentables { $ tablename } - > { $ autocommit } - > { cachepeer } = $ opentables { $ tablename } - > { $ autocommit ^ 1 } ;
$ opentables { $ tablename } - > { $ autocommit ^ 1 } - > { cachepeer } = $ opentables { $ tablename } - > { $ autocommit } ;
}
return 1 ;
2009-08-04 18:38:08 +00:00
} else {
return 0 ;
}
2009-09-15 15:33:33 +00:00
} else {
unless ( defined $ opentables { $ tablename } - > { $ autocommit } ) {
#We are servicing a Table object that used to be
#non data-worker. Create a new DB worker side Table like the one
#that requests this
$ opentables { $ tablename } - > { $ autocommit } = xCAT::Table - > new ( $ tablename , - create = > 0 , - autocommit = > $ autocommit ) ;
unless ( $ opentables { $ tablename } - > { $ autocommit } ) {
return undef ;
}
2012-11-20 14:45:16 +00:00
if ( $ opentables { $ tablename } - > { $ autocommit ^ 1 } ) {
$ opentables { $ tablename } - > { $ autocommit } - > { cachepeer } = $ opentables { $ tablename } - > { $ autocommit ^ 1 } ;
$ opentables { $ tablename } - > { $ autocommit ^ 1 } - > { cachepeer } = $ opentables { $ tablename } - > { $ autocommit } ;
}
2009-09-15 15:33:33 +00:00
}
}
if ( $ functionname eq 'getAllAttribs' ) {
2009-08-04 18:38:08 +00:00
return $ opentables { $ tablename } - > { $ autocommit } - > getAllAttribs ( @ args ) ;
} elsif ( $ functionname eq 'getAttribs' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > getAttribs ( @ args ) ;
} elsif ( $ functionname eq 'getTable' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > getTable ( @ args ) ;
} elsif ( $ functionname eq 'getAllNodeAttribs' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > getAllNodeAttribs ( @ args ) ;
} elsif ( $ functionname eq 'getAllEntries' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > getAllEntries ( @ args ) ;
2011-05-11 17:50:08 +00:00
} elsif ( $ functionname eq 'writeAllEntries' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > writeAllEntries ( @ args ) ;
2009-08-04 18:38:08 +00:00
} elsif ( $ functionname eq 'getAllAttribsWhere' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > getAllAttribsWhere ( @ args ) ;
2011-05-11 17:50:08 +00:00
} elsif ( $ functionname eq 'writeAllAttribsWhere' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > writeAllAttribsWhere ( @ args ) ;
2009-08-04 18:38:08 +00:00
} elsif ( $ functionname eq 'addAttribs' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > addAttribs ( @ args ) ;
} elsif ( $ functionname eq 'setAttribs' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > setAttribs ( @ args ) ;
} elsif ( $ functionname eq 'setAttribsWhere' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > setAttribsWhere ( @ args ) ;
} elsif ( $ functionname eq 'delEntries' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > delEntries ( @ args ) ;
2009-08-04 21:10:32 +00:00
} elsif ( $ functionname eq 'commit' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > commit ( @ args ) ;
} elsif ( $ functionname eq 'rollback' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > rollback ( @ args ) ;
2009-08-09 15:48:38 +00:00
} elsif ( $ functionname eq 'getNodesAttribs' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > getNodesAttribs ( @ args ) ;
2009-11-26 18:01:38 +00:00
} elsif ( $ functionname eq 'setNodesAttribs' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > setNodesAttribs ( @ args ) ;
2009-08-09 15:48:38 +00:00
} elsif ( $ functionname eq 'getNodeAttribs' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > getNodeAttribs ( @ args ) ;
} elsif ( $ functionname eq '_set_use_cache' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > _set_use_cache ( @ args ) ;
} elsif ( $ functionname eq '_build_cache' ) {
return $ opentables { $ tablename } - > { $ autocommit } - > _build_cache ( @ args ) ;
2009-08-04 21:10:32 +00:00
} else {
die "undefined function $functionname" ;
2009-08-04 18:38:08 +00:00
}
}
2009-08-09 15:48:38 +00:00
sub _set_use_cache {
my $ self = shift ;
if ( $ dbworkerpid ) {
return dbc_call ( $ self , '_set_use_cache' , @ _ ) ;
}
2011-06-06 18:47:13 +00:00
my $ usecache = shift ;
if ( $ usecache and not $ self - > { _tablecache } ) {
return ; #do not allow cache to be enabled while the cache is broken
}
$ self - > { _use_cache } = $ usecache ;
2009-08-09 15:48:38 +00:00
}
2007-10-26 22:44:33 +00:00
#--------------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head1 xCAT:: Table
2007-10-26 22:44:33 +00:00
xCAT:: Table - Perl module for xCAT configuration access
= head2 SYNOPSIS
use xCAT::Table ;
my $ table = xCAT::Table - > new ( "tablename" ) ;
my $ hashref = $ table - > getNodeAttribs ( "nodename" , "columname1" , "columname2" ) ;
printf $ hashref - > { columname1 } ;
= head2 DESCRIPTION
This module provides convenience methods that abstract the backend specific configuration to a common API .
2008-08-26 13:43:31 +00:00
Currently implements the preferred SQLite backend , as well as a CSV backend , postgresql and MySQL , using their respective perl DBD modules .
2007-10-26 22:44:33 +00:00
NOTES
The CSV backend is really slow at scale . Room for optimization is likely , but in general DBD:: CSV is slow , relative to xCAT 1.2 . x .
The SQLite backend , on the other hand , is significantly faster on reads than the xCAT 1.2 . x way , so it is recommended .
BUGS
This module is not thread - safe , due to underlying DBD thread issues . Specifically in testing , SQLite DBD leaks scalars if a thread
where a Table object exists spawns a child and that child exits . The recommended workaround for now is to spawn a thread to contain
all Table objects if you intend to spawn threads from your main thread . As long as no thread in which the new method is called spawns
child threads , it seems to work fine .
AUTHOR
Jarrod Johnson <jbjohnso@us.ibm.com>
xCAT:: Table is released under an IBM license ... .
= cut
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head2 Subroutines
2007-10-26 22:44:33 +00:00
= cut
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 buildcreatestmt
2007-10-26 22:44:33 +00:00
Description: Build create table statement ( see new )
Arguments:
Table name
Table schema ( hash of column names )
Returns:
2008-02-21 21:10:35 +00:00
Table creation SQL
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
my $ str =
buildcreatestmt ( $ self - > { tabname } ,
$ xCAT:: Schema:: tabspec { $ self - > { tabname } } ) ;
= cut
#--------------------------------------------------------------------------------
sub buildcreatestmt
{
my $ tabn = shift ;
my $ descr = shift ;
2009-01-27 17:53:44 +00:00
my $ xcatcfg = shift ;
2007-10-26 22:44:33 +00:00
my $ retv = "CREATE TABLE $tabn (\n " ;
my $ col ;
2009-01-12 22:12:25 +00:00
my $ types = $ descr - > { types } ;
2011-01-10 17:50:11 +00:00
my $ delimitedcol ;
2007-10-26 22:44:33 +00:00
foreach $ col ( @ { $ descr - > { cols } } )
{
2010-02-05 15:17:29 +00:00
my $ datatype ;
if ( $ xcatcfg =~ /^DB2:/ ) {
2010-02-08 14:29:47 +00:00
$ datatype = get_datatype_string_db2 ( $ col , $ types , $ tabn , $ descr ) ;
2010-02-05 15:17:29 +00:00
} else {
$ datatype = get_datatype_string ( $ col , $ xcatcfg , $ types ) ;
}
2010-02-08 14:29:47 +00:00
if ( $ datatype eq "TEXT" ) {
2009-08-20 03:26:32 +00:00
if ( isAKey ( \ @ { $ descr - > { keys } } , $ col ) ) { # keys need defined length
2010-01-19 19:34:46 +00:00
$ datatype = "VARCHAR(128) " ;
2009-01-27 17:53:44 +00:00
}
2010-01-19 19:34:46 +00:00
}
2011-01-11 13:56:33 +00:00
# delimit the columns of the table
$ delimitedcol = & delimitcol ( $ col ) ;
$ retv . = $ delimitedcol . " $datatype" ; # mysql change
2011-01-10 17:50:11 +00:00
2010-01-19 19:34:46 +00:00
2007-10-26 22:44:33 +00:00
if ( grep /^$col$/ , @ { $ descr - > { required } } )
2010-01-19 19:34:46 +00:00
{
# will have already put in NOT NULL, if DB2 and a key
2010-02-08 14:29:47 +00:00
if ( ! ( $ xcatcfg =~ /^DB2:/ ) ) { # not a db2 key
2010-01-19 19:34:46 +00:00
$ retv . = " NOT NULL" ;
2011-02-16 20:20:46 +00:00
} else { # is DB2
if ( ! ( isAKey ( \ @ { $ descr - > { keys } } , $ col ) ) ) { # not a key
$ retv . = " NOT NULL" ;
}
2010-01-19 19:34:46 +00:00
}
2007-10-26 22:44:33 +00:00
}
$ retv . = ",\n " ;
}
2010-08-18 15:06:17 +00:00
2009-08-20 03:26:32 +00:00
if ( $ retv =~ /PRIMARY KEY/ ) {
2009-01-27 17:53:44 +00:00
$ retv =~ s/,\n $/\n)/ ;
} else {
$ retv . = "PRIMARY KEY (" ;
foreach ( @ { $ descr - > { keys } } )
{
2010-01-19 19:34:46 +00:00
2011-01-10 17:50:11 +00:00
$ delimitedcol = & delimitcol ( $ _ ) ;
$ retv . = $ delimitedcol . "," ;
2009-01-27 17:53:44 +00:00
}
$ retv =~ s/,$/)\n)/ ;
2007-10-26 22:44:33 +00:00
}
2010-08-18 15:06:17 +00:00
$ retv =~ s/,$/)\n)/ ;
2010-09-15 12:56:05 +00:00
# allow engine change for mysql
if ( $ descr - > { engine } ) {
if ( $ xcatcfg =~ /^mysql:/ ) { #for mysql
$ retv . = " ENGINE=$descr->{engine} " ;
}
2011-05-01 11:42:23 +00:00
}
2011-05-24 16:53:39 +00:00
# allow compression for DB2
if ( $ descr - > { compress } ) {
if ( $ xcatcfg =~ /^DB2:/ ) { #for DB2
$ retv . = " compress $descr->{compress} " ;
}
}
2011-05-01 11:42:23 +00:00
# allow tablespace change for DB2
if ( $ descr - > { tablespace } ) {
if ( $ xcatcfg =~ /^DB2:/ ) { #for DB2
$ retv . = " in $descr->{tablespace} " ;
}
2010-09-15 12:56:05 +00:00
}
2009-04-22 14:06:46 +00:00
#print "retv=$retv\n";
2009-08-20 03:26:32 +00:00
return $ retv ;
}
2010-02-05 15:17:29 +00:00
#--------------------------------------------------------------------------
= head3
Description: get_datatype_string ( for mysql , sqlite , postgresql )
Arguments:
Table column , database , types
Returns:
the datatype for the column being defined
Globals:
Error:
Example:
my $ datatype = get_datatype_string ( $ col , $ xcatcfg , $ types ) ;
= cut
#--------------------------------------------------------------------------------
2009-08-20 03:26:32 +00:00
sub get_datatype_string {
my $ col = shift ; #column name
my $ xcatcfg = shift ; #db config string
my $ types = shift ; #hash pointer
my $ ret ;
if ( ( $ types ) && ( $ types - > { $ col } ) ) {
if ( $ types - > { $ col } =~ /INTEGER AUTO_INCREMENT/ ) {
if ( $ xcatcfg =~ /^SQLite:/ ) {
$ ret = "INTEGER PRIMARY KEY AUTOINCREMENT" ;
} elsif ( $ xcatcfg =~ /^Pg:/ ) {
$ ret = "SERIAL" ;
} elsif ( $ xcatcfg =~ /^mysql:/ ) {
$ ret = "INTEGER AUTO_INCREMENT" ;
} else {
}
} else {
$ ret = $ types - > { $ col } ;
}
} else {
2010-02-05 15:17:29 +00:00
$ ret = "TEXT" ;
2009-08-20 03:26:32 +00:00
}
return $ ret ;
}
2010-02-05 15:17:29 +00:00
#--------------------------------------------------------------------------
= head3
Description: get_datatype_string_db2 ( for DB2 )
Arguments:
2010-02-08 14:29:47 +00:00
Table column , database , types , tablename , table schema
2010-02-05 15:17:29 +00:00
Returns:
the datatype for the column being defined
Globals:
Error:
Example:
2010-02-08 14:29:47 +00:00
my $ datatype = get_datatype_string_db2 ( $ col , $ types , $ tablename , $ descr ) ;
2010-02-05 15:17:29 +00:00
= cut
#--------------------------------------------------------------------------------
sub get_datatype_string_db2 {
my $ col = shift ; #column name
my $ types = shift ; #types field (eventlog)
my $ tablename = shift ; # tablename
2010-02-08 14:29:47 +00:00
my $ descr = shift ; # table schema
my $ ret = "varchar(512)" ; # default for most attributes
2010-02-05 15:17:29 +00:00
if ( ( $ types ) && ( $ types - > { $ col } ) ) {
if ( $ types - > { $ col } =~ /INTEGER AUTO_INCREMENT/ ) {
$ ret = "INTEGER GENERATED ALWAYS AS IDENTITY" ;
} else {
2010-08-04 14:17:31 +00:00
# if the column is a key
if ( isAKey ( \ @ { $ descr - > { keys } } , $ col ) ) {
2010-08-30 20:04:27 +00:00
$ ret = $ types - > { $ col } ;
$ ret . = " NOT NULL " ;
2010-08-04 14:17:31 +00:00
} else {
2010-02-05 15:17:29 +00:00
$ ret = $ types - > { $ col } ;
2010-08-04 14:17:31 +00:00
if ( $ ret eq "TEXT" ) { # text not in db2
$ ret = "VARCHAR(512)" ;
}
}
2010-02-05 15:17:29 +00:00
}
2010-08-10 13:29:46 +00:00
} else { # type not specifically define
if ( isAKey ( \ @ { $ descr - > { keys } } , $ col ) ) {
$ ret = "VARCHAR(128) NOT NULL " ;
}
2010-02-08 14:29:47 +00:00
}
if ( $ col eq "disable" ) {
$ ret = "varchar(8)" ;
}
if ( $ col eq "rawdata" ) { # from eventlog table
$ ret = "varchar(4098)" ;
}
2010-02-05 15:17:29 +00:00
return $ ret ;
}
#--------------------------------------------------------------------------
= head3
Description: get_xcatcfg
Arguments:
none
Returns:
the database name from /etc/xc at / cfgloc or sqlite
Globals:
Error:
Example:
my $ xcatcfg = get_xcatcfg ( ) ;
= cut
#--------------------------------------------------------------------------------
2009-08-20 03:26:32 +00:00
sub get_xcatcfg
{
my $ xcatcfg = ( defined $ ENV { 'XCATCFG' } ? $ ENV { 'XCATCFG' } : '' ) ;
unless ( $ xcatcfg ) {
if ( - r "/etc/xcat/cfgloc" ) {
my $ cfgl ;
open ( $ cfgl , "<" , "/etc/xcat/cfgloc" ) ;
$ xcatcfg = <$cfgl> ;
close ( $ cfgl ) ;
chomp ( $ xcatcfg ) ;
$ ENV { 'XCATCFG' } = $ xcatcfg ; #Store it in env to avoid many file reads
}
}
if ( $ xcatcfg =~ /^$/ )
{
if ( - d "/opt/xcat/cfg" )
{
$ xcatcfg = "SQLite:/opt/xcat/cfg" ;
}
else
{
if ( - d "/etc/xcat" )
{
$ xcatcfg = "SQLite:/etc/xcat" ;
}
}
}
( $ xcatcfg =~ /^$/ ) && die "Can't locate xCAT configuration" ;
unless ( $ xcatcfg =~ /:/ )
{
$ xcatcfg = "SQLite:" . $ xcatcfg ;
}
return $ xcatcfg ;
2007-10-26 22:44:33 +00:00
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 new
2007-10-26 22:44:33 +00:00
Description: Constructor: Connects to or Creates Database Table
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Arguments: Table name
0 = Connect to table
1 = Create table
Returns:
Hash: Database Handle , Statement Handle , nodelist
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2008-02-21 21:10:35 +00:00
$ nodelisttab = xCAT::Table - > new ( "nodelist" ) ;
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub new
{
#Constructor takes table name as argument
#Also takes a true/false value, or assumes 0. If something true is passed, create table
#is requested
2009-08-04 18:38:08 +00:00
my @ args = @ _ ;
2007-10-26 22:44:33 +00:00
my $ self = { } ;
my $ proto = shift ;
$ self - > { tabname } = shift ;
unless ( defined ( $ xCAT:: Schema:: tabspec { $ self - > { tabname } } ) ) { return undef ; }
$ self - > { schema } = $ xCAT:: Schema:: tabspec { $ self - > { tabname } } ;
$ self - > { colnames } = \ @ { $ self - > { schema } - > { cols } } ;
2008-02-21 21:10:35 +00:00
$ self - > { descriptions } = \ % { $ self - > { schema } - > { descriptions } } ;
2007-10-26 22:44:33 +00:00
my % otherargs = @ _ ;
2009-07-21 03:27:23 +00:00
my $ create = 1 ;
if ( exists ( $ otherargs { '-create' } ) && ( $ otherargs { '-create' } == 0 ) ) { $ create = 0 ; }
2008-01-23 15:52:27 +00:00
$ self - > { autocommit } = $ otherargs { '-autocommit' } ;
unless ( defined ( $ self - > { autocommit } ) )
2007-10-26 22:44:33 +00:00
{
2008-01-23 15:52:27 +00:00
$ self - > { autocommit } = 1 ;
2007-10-26 22:44:33 +00:00
}
2011-01-05 21:33:03 +00:00
$ self - > { realautocommit } = $ self - > { autocommit } ; #Assume requester autocommit behavior maps directly to DBI layer autocommit
2007-10-26 22:44:33 +00:00
my $ class = ref ( $ proto ) || $ proto ;
2009-08-04 18:38:08 +00:00
if ( $ dbworkerpid ) {
my $ request = {
function = > "new" ,
tablename = > $ self - > { tabname } ,
autocommit = > $ self - > { autocommit } ,
args = > \ @ args ,
} ;
2009-08-04 21:10:32 +00:00
unless ( dbc_submit ( $ request ) ) {
return undef ;
}
2009-08-04 18:38:08 +00:00
} else { #direct db access mode
2012-05-10 19:59:49 +00:00
if ( $ opentables { $ self - > { tabname } } - > { $ self - > { autocommit } } ) { #if we are inside the db worker and asked to create a new table that is already open, just return a reference to that table
#generally speaking, this should cause a lot of nodelists to be shared
return $ opentables { $ self - > { tabname } } - > { $ self - > { autocommit } } ;
}
2009-08-04 18:38:08 +00:00
$ self - > { dbuser } = "" ;
$ self - > { dbpass } = "" ;
2009-08-20 03:26:32 +00:00
my $ xcatcfg = get_xcatcfg ( ) ;
2010-01-19 19:34:46 +00:00
my $ xcatdb2schema ;
if ( $ xcatcfg =~ /^DB2:/ ) { # for DB2 , get schema name
my @ parts = split ( '\|' , $ xcatcfg ) ;
$ xcatdb2schema = $ parts [ 1 ] ;
$ xcatdb2schema =~ tr /a-z/ A - Z / ; # convert to upper
}
2009-08-20 03:26:32 +00:00
2009-08-04 18:38:08 +00:00
if ( $ xcatcfg =~ /^SQLite:/ )
{
$ self - > { backend_type } = 'sqlite' ;
2009-10-26 09:53:35 +00:00
$ self - > { realautocommit } = 1 ; #Regardless of autocommit semantics, only electively do autocommit due to SQLite locking difficulties
2011-01-05 21:33:03 +00:00
#for SQLite, we cannot open the same db twice without deadlock risk, so we cannot have both autocommit on and off via
#different handles, so we pick one
#previously, Table.pm tried to imitate autocommit, but evidently was problematic, so now SQLite is just almost always
#autocommit, turned off very selectively
#so realautocommit is a hint to say that the handle needs to be set back to autocommit as soon as possible
2009-08-04 18:38:08 +00:00
my @ path = split ( ':' , $ xcatcfg , 2 ) ;
unless ( - e $ path [ 1 ] . "/" . $ self - > { tabname } . ".sqlite" || $ create )
2007-10-26 22:44:33 +00:00
{
2009-08-04 18:38:08 +00:00
return undef ;
2007-10-26 22:44:33 +00:00
}
2009-08-04 18:38:08 +00:00
$ self - > { connstring } =
"dbi:" . $ xcatcfg . "/" . $ self - > { tabname } . ".sqlite" ;
2007-10-26 22:44:33 +00:00
}
2009-08-04 18:38:08 +00:00
elsif ( $ xcatcfg =~ /^CSV:/ )
2007-10-26 22:44:33 +00:00
{
2009-08-04 18:38:08 +00:00
$ self - > { backend_type } = 'csv' ;
$ xcatcfg =~ m/^.*?:(.*)$/ ;
my $ path = $ 1 ;
$ self - > { connstring } = "dbi:CSV:f_dir=" . $ path ;
2007-10-26 22:44:33 +00:00
}
2009-08-04 18:38:08 +00:00
else #Generic DBI
{
( $ self - > { connstring } , $ self - > { dbuser } , $ self - > { dbpass } ) = split ( /\|/ , $ xcatcfg ) ;
$ self - > { connstring } =~ s/^dbi:// ;
$ self - > { connstring } =~ s/^/dbi:/ ;
#return undef;
}
2010-02-17 14:38:03 +00:00
if ( $ xcatcfg =~ /^DB2:/ ) { # for DB2 ,export the INSTANCE name
$ ENV { 'DB2INSTANCE' } = $ self - > { dbuser } ;
}
2009-08-04 18:38:08 +00:00
my $ oldumask = umask 0077 ;
2009-10-13 19:37:41 +00:00
unless ( $ ::XCAT_DBHS - > { $ self - > { connstring } , $ self - > { dbuser } , $ self - > { dbpass } , $ self - > { realautocommit } } ) { #= $self->{tabname};
$ ::XCAT_DBHS - > { $ self - > { connstring } , $ self - > { dbuser } , $ self - > { dbpass } , $ self - > { realautocommit } } =
DBI - > connect ( $ self - > { connstring } , $ self - > { dbuser } , $ self - > { dbpass } , { AutoCommit = > $ self - > { realautocommit } } ) ;
2009-08-04 18:38:08 +00:00
}
umask $ oldumask ;
2009-10-13 19:37:41 +00:00
$ self - > { dbh } = $ ::XCAT_DBHS - > { $ self - > { connstring } , $ self - > { dbuser } , $ self - > { dbpass } , $ self - > { realautocommit } } ;
2009-09-03 16:33:55 +00:00
#Store the Table object reference as afflicted by changes to the DBH
#This for now is ok, as either we aren't in DB worker mode, in which case this structure would be short lived...
#or we are in db worker mode, in which case Table objects live indefinitely
#TODO: be able to reap these objects sanely, just in case
2009-10-26 09:53:35 +00:00
push @ { $ dbobjsforhandle - > { $ ::XCAT_DBHS - > { $ self - > { connstring } , $ self - > { dbuser } , $ self - > { dbpass } , $ self - > { realautocommit } } } } , \ $ self ;
2009-08-04 18:38:08 +00:00
#DBI->connect($self->{connstring}, $self->{dbuser}, $self->{dbpass}, {AutoCommit => $autocommit});
if ( $ xcatcfg =~ /^SQLite:/ )
{
my $ dbexistq =
"SELECT name from sqlite_master WHERE type='table' and name = ?" ;
my $ sth = $ self - > { dbh } - > prepare ( $ dbexistq ) ;
$ sth - > execute ( $ self - > { tabname } ) ;
my $ result = $ sth - > fetchrow ( ) ;
$ sth - > finish ;
unless ( defined $ result )
{
if ( $ create )
{
my $ str =
buildcreatestmt ( $ self - > { tabname } ,
$ xCAT:: Schema:: tabspec { $ self - > { tabname } } ,
$ xcatcfg ) ;
$ self - > { dbh } - > do ( $ str ) ;
2009-10-29 16:24:04 +00:00
if ( ! $ self - > { dbh } - > { AutoCommit } ) {
$ self - > { dbh } - > commit ;
}
2009-08-04 18:38:08 +00:00
}
else { return undef ; }
}
}
elsif ( $ xcatcfg =~ /^CSV:/ )
2007-10-26 22:44:33 +00:00
{
2009-08-04 18:38:08 +00:00
$ self - > { dbh } - > { 'csv_tables' } - > { $ self - > { tabname } } =
{ 'file' = > $ self - > { tabname } . ".csv" } ;
$ xcatcfg =~ m/^.*?:(.*)$/ ;
my $ path = $ 1 ;
if ( ! - e $ path . "/" . $ self - > { tabname } . ".csv" )
2007-10-26 22:44:33 +00:00
{
2009-08-04 18:38:08 +00:00
unless ( $ create )
{
return undef ;
}
2007-10-26 22:44:33 +00:00
my $ str =
buildcreatestmt ( $ self - > { tabname } ,
2009-01-27 17:53:44 +00:00
$ xCAT:: Schema:: tabspec { $ self - > { tabname } } ,
2009-08-04 18:38:08 +00:00
$ xcatcfg ) ;
2007-10-26 22:44:33 +00:00
$ self - > { dbh } - > do ( $ str ) ;
}
2009-08-04 18:38:08 +00:00
} else { #generic DBI
2009-09-27 03:02:46 +00:00
if ( ! $ self - > { dbh } )
{
2010-01-21 20:39:55 +00:00
xCAT::MsgUtils - > message ( "S" , "Could not connect to the database. Database handle not defined." ) ;
2009-09-27 03:02:46 +00:00
return undef ;
}
2010-01-19 19:34:46 +00:00
my $ tbexistq ;
2010-02-22 19:09:41 +00:00
my $ dbtablename = $ self - > { tabname } ;
2010-01-19 19:34:46 +00:00
my $ found = 0 ;
if ( $ xcatcfg =~ /^DB2:/ ) { # for DB2
2010-02-22 19:09:41 +00:00
$ dbtablename =~ tr /a-z/ A - Z / ; # convert to upper
$ tbexistq = $ self - > { dbh } - > table_info ( undef , $ xcatdb2schema , $ dbtablename , 'TABLE' ) ;
2010-01-19 19:34:46 +00:00
} else {
$ tbexistq = $ self - > { dbh } - > table_info ( '' , '' , $ self - > { tabname } , 'TABLE' ) ;
}
2009-08-04 18:38:08 +00:00
while ( my $ data = $ tbexistq - > fetchrow_hashref ) {
2010-02-22 19:09:41 +00:00
if ( $ data - > { 'TABLE_NAME' } =~ /^\"?$dbtablename\"?\z/ ) {
if ( $ xcatcfg =~ /^DB2:/ ) { # for DB2
if ( $ data - > { 'TABLE_SCHEM' } =~ /^\"?$xcatdb2schema\"?\z/ ) {
# must check schema also with db2
$ found = 1 ;
last ;
}
} else { # not db2
$ found = 1 ;
last ;
}
2007-10-26 22:44:33 +00:00
}
2010-01-19 19:34:46 +00:00
}
unless ( $ found ) {
unless ( $ create )
{
return undef ;
}
my $ str =
2009-08-04 18:38:08 +00:00
buildcreatestmt ( $ self - > { tabname } ,
$ xCAT:: Schema:: tabspec { $ self - > { tabname } } ,
$ xcatcfg ) ;
2011-02-06 11:35:12 +00:00
$ self - > { dbh } - > do ( $ str ) ;
if ( ! $ self - > { dbh } - > { AutoCommit } ) {
$ self - > { dbh } - > commit ; # commit the create
}
2010-01-19 19:34:46 +00:00
}
} # end Generic DBI
2008-01-20 02:15:55 +00:00
2008-02-21 21:10:35 +00:00
2009-08-04 21:10:32 +00:00
} #END DB ACCESS SPECIFIC SECTION
2007-10-26 22:44:33 +00:00
if ( $ self - > { tabname } eq 'nodelist' )
{
2007-11-13 21:38:32 +00:00
weaken ( $ self - > { nodelist } = $ self ) ;
2007-10-26 22:44:33 +00:00
}
else
{
$ self - > { nodelist } = xCAT::Table - > new ( 'nodelist' , - create = > 1 ) ;
}
bless ( $ self , $ class ) ;
return $ self ;
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 updateschema
2007-10-26 22:44:33 +00:00
2010-08-31 13:57:24 +00:00
Description: Alters table info in the database based on Schema changes
Handles adding attributes
Handles removing attributes but does not really remove them
from the database .
Handles adding keys
2007-10-26 22:44:33 +00:00
Arguments: Hash containing Database and Table Handle and schema
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Returns: None
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2010-09-01 14:09:38 +00:00
Example: my $ nodelisttab = xCAT::Table - > new ( 'nodelist' ) ;
$ nodelisttab - > updateschema ( ) ;
$ nodelisttab - > close ( ) ;
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub updateschema
{
#This determines alter table statements required..
my $ self = shift ;
2009-08-20 03:26:32 +00:00
my $ descr = $ xCAT:: Schema:: tabspec { $ self - > { tabname } } ;
2009-08-20 14:39:55 +00:00
my $ tn = $ self - > { tabname } ;
2010-01-19 19:34:46 +00:00
my $ xcatdb2schema ;
2010-09-01 14:09:38 +00:00
my $ xcatcfg = get_xcatcfg ( ) ;
my $ rc = 0 ;
my $ msg ;
2010-01-19 19:34:46 +00:00
if ( $ xcatcfg =~ /^DB2:/ ) { # for DB2 , get schema name
my @ parts = split ( '\|' , $ xcatcfg ) ;
$ xcatdb2schema = $ parts [ 1 ] ;
$ xcatdb2schema =~ tr /a-z/ A - Z / ; # convert to upper
}
2009-08-20 03:26:32 +00:00
2008-01-20 02:15:55 +00:00
my @ columns ;
2009-08-20 03:26:32 +00:00
my % dbkeys ;
2007-10-26 22:44:33 +00:00
if ( $ self - > { backend_type } eq 'sqlite' )
{
my $ dbexistq =
2009-08-20 14:39:55 +00:00
"PRAGMA table_info('$tn')" ;
2007-10-26 22:44:33 +00:00
my $ sth = $ self - > { dbh } - > prepare ( $ dbexistq ) ;
2009-08-20 14:39:55 +00:00
$ sth - > execute ;
my $ tn = $ self - > { tabname } ;
while ( my $ col_info = $ sth - > fetchrow_hashref ) {
#print Dumper($col_info);
2009-08-31 19:52:33 +00:00
my $ tmp_col = $ col_info - > { name } ;
$ tmp_col =~ s/"//g ;
push @ columns , $ tmp_col ;
2009-08-20 14:39:55 +00:00
if ( $ col_info - > { pk } ) {
2009-08-31 19:52:33 +00:00
$ dbkeys { $ tmp_col } = 1 ;
2009-08-20 03:26:32 +00:00
}
2009-08-20 14:39:55 +00:00
}
$ sth - > finish ;
2008-01-20 02:15:55 +00:00
} else { #Attempt generic dbi..
2008-08-26 13:43:31 +00:00
#my $sth = $self->{dbh}->column_info('','',$self->{tabname},'');
2010-01-19 19:34:46 +00:00
my $ sth ;
if ( $ xcatcfg =~ /^DB2:/ ) { # for DB2
my $ db2table = $ self - > { tabname } ;
$ db2table =~ tr /a-z/ A - Z / ; # convert to upper for db2
$ sth = $ self - > { dbh } - > column_info ( undef , $ xcatdb2schema , $ db2table , '%' ) ;
} else {
$ sth = $ self - > { dbh } - > column_info ( undef , undef , $ self - > { tabname } , '%' ) ;
}
2008-01-20 02:15:55 +00:00
while ( my $ cd = $ sth - > fetchrow_hashref ) {
2009-08-20 03:26:32 +00:00
#print Dumper($cd);
2008-01-20 02:15:55 +00:00
push @ columns , $ cd - > { 'COLUMN_NAME' } ;
2009-09-25 13:44:48 +00:00
#special code for old version of perl-DBD-mysql
2010-02-05 15:17:29 +00:00
if ( defined ( $ cd - > { mysql_is_pri_key } ) && ( $ cd - > { mysql_is_pri_key } == 1 ) ) {
2009-09-25 13:44:48 +00:00
my $ tmp_col = $ cd - > { 'COLUMN_NAME' } ;
$ tmp_col =~ s/"//g ;
$ dbkeys { $ tmp_col } = 1 ;
}
2008-01-20 02:15:55 +00:00
}
foreach ( @ columns ) { #Column names may end up quoted by database engin
s/"//g ;
}
2009-08-20 03:26:32 +00:00
#get primary keys
2010-01-19 19:34:46 +00:00
if ( $ xcatcfg =~ /^DB2:/ ) { # for DB2
my $ db2table = $ self - > { tabname } ;
$ db2table =~ tr /a-z/ A - Z / ; # convert to upper for db2
$ sth = $ self - > { dbh } - > primary_key_info ( undef , $ xcatdb2schema , $ db2table ) ;
} else {
$ sth = $ self - > { dbh } - > primary_key_info ( undef , undef , $ self - > { tabname } ) ;
}
2009-09-25 13:44:48 +00:00
if ( $ sth ) {
my $ data = $ sth - > fetchall_arrayref ;
#print "data=". Dumper($data);
foreach my $ cd ( @$ data ) {
my $ tmp_col = $ cd - > [ 3 ] ;
$ tmp_col =~ s/"//g ;
$ dbkeys { $ tmp_col } = 1 ;
}
}
2008-01-20 02:15:55 +00:00
}
2007-10-26 22:44:33 +00:00
2009-08-20 03:26:32 +00:00
#Now @columns reflects the *actual* columns in the database
my $ dcol ;
my $ types = $ descr - > { types } ;
foreach $ dcol ( @ { $ self - > { colnames } } )
{
unless ( grep /^$dcol$/ , @ columns )
2007-10-26 22:44:33 +00:00
{
2009-08-20 03:26:32 +00:00
#TODO: log/notify of schema upgrade?
2010-02-05 15:17:29 +00:00
my $ datatype ;
if ( $ xcatcfg =~ /^DB2:/ ) {
2010-02-08 14:29:47 +00:00
$ datatype = get_datatype_string_db2 ( $ dcol , $ types , $ tn , $ descr ) ;
2010-02-05 15:17:29 +00:00
} else {
$ datatype = get_datatype_string ( $ dcol , $ xcatcfg , $ types ) ;
}
2010-08-31 13:57:24 +00:00
if ( $ datatype eq "TEXT" ) { # keys cannot be TEXT
if ( isAKey ( \ @ { $ descr - > { keys } } , $ dcol ) ) { # keys
$ datatype = "VARCHAR(128) " ;
}
2009-08-20 03:26:32 +00:00
}
2007-10-26 22:44:33 +00:00
2009-08-20 03:26:32 +00:00
if ( grep /^$dcol$/ , @ { $ descr - > { required } } )
{
2010-02-22 19:09:41 +00:00
$ datatype . = " NOT NULL" ;
2009-08-20 03:26:32 +00:00
}
2011-01-11 13:56:33 +00:00
# delimit the columns of the table
my $ tmpcol = & delimitcol ( $ dcol ) ;
2010-08-31 19:18:47 +00:00
my $ tablespace ;
2009-08-20 03:26:32 +00:00
my $ stmt =
2010-08-31 13:57:24 +00:00
"ALTER TABLE " . $ self - > { tabname } . " ADD $tmpcol $datatype" ;
2009-08-20 03:26:32 +00:00
$ self - > { dbh } - > do ( $ stmt ) ;
2010-09-01 14:09:38 +00:00
$ msg = "updateschema: Running $stmt" ;
xCAT::MsgUtils - > message ( "S" , $ msg ) ;
2010-08-31 19:18:47 +00:00
if ( $ self - > { dbh } - > errstr ) {
xCAT::MsgUtils - > message ( "S" , "Error adding columns for table " . $ self - > { tabname } . ":" . $ self - > { dbh } - > errstr ) ;
if ( $ xcatcfg =~ /^DB2:/ ) { # see if table space error
my $ error = $ self - > { dbh } - > errstr ;
if ( $ error =~ /54010/ ) {
# move the table to the next tablespace
if ( $ error =~ /USERSPACE1/ ) {
$ tablespace = "XCATTBS16K" ;
} else {
$ tablespace = "XCATTBS32K" ;
}
my $ tablename = $ self - > { tabname } ;
$ tablename =~ tr /a-z/ A - Z / ; # convert to upper
2010-09-01 14:09:38 +00:00
$ msg = "Moving table $self->{tabname} to $tablespace" ;
2010-08-31 19:18:47 +00:00
xCAT::MsgUtils - > message ( "S" , $ msg ) ;
my $ stmt2 = "Call sysproc.admin_move_table('XCATDB',\'$tablename\',\'$tablespace\',\'$tablespace\',\'$tablespace\','','','','','','MOVE')" ;
$ self - > { dbh } - > do ( $ stmt2 ) ;
if ( $ self - > { dbh } - > errstr ) {
xCAT::MsgUtils - > message ( "S" , "Error on tablespace move for table " . $ self - > { tabname } . ":" . $ self - > { dbh } - > errstr ) ;
} else { # tablespace move try column add again
if ( ! $ self - > { dbh } - > { AutoCommit } ) { # commit tbsp move
$ self - > { dbh } - > commit ;
}
$ self - > { dbh } - > do ( $ stmt ) ;
}
} # if tablespace error
} # if db2
} # error on add column
2010-09-01 14:09:38 +00:00
if ( ! $ self - > { dbh } - > { AutoCommit } ) { # commit add column
$ self - > { dbh } - > commit ;
}
2009-08-20 03:26:32 +00:00
}
}
2010-08-31 17:23:28 +00:00
#for existing columns that are new keys now
# note new keys can only be created from existing columns
# since keys cannot be null, the copy from the backup table will fail if
# the old value was null.
2009-08-20 03:26:32 +00:00
my @ new_dbkeys = @ { $ descr - > { keys } } ;
2009-08-20 14:39:55 +00:00
my @ old_dbkeys = keys % dbkeys ;
#print "new_dbkeys=@new_dbkeys; old_dbkeys=@old_dbkeys; columns=@columns\n";
2009-08-20 03:26:32 +00:00
my $ change_keys = 0 ;
2010-08-31 17:23:28 +00:00
#Add the new key columns to the table
2009-08-20 03:26:32 +00:00
foreach my $ dbkey ( @ new_dbkeys ) {
if ( ! exists ( $ dbkeys { $ dbkey } ) ) {
$ change_keys = 1 ;
2010-09-06 12:01:59 +00:00
# Call tabdump plugin to create a CSV file
# can be used in case the restore fails
# put in /tmp/<tablename.csv.pid>
my $ backuptable = "/tmp/$tn.csv.$$" ;
my $ cmd = "$::XCATROOT/sbin/tabdump $tn > $backuptable" ;
`$cmd` ;
$ msg = "updateschema: Backing up table before key change, $cmd" ;
xCAT::MsgUtils - > message ( "S" , $ msg ) ;
2009-08-20 03:26:32 +00:00
#for my sql, we do not have to recreate table, but we have to make sure the type is correct,
2010-02-05 15:17:29 +00:00
my $ datatype ;
2010-08-31 17:23:28 +00:00
if ( $ xcatcfg =~ /^mysql:/ ) {
$ datatype = get_datatype_string ( $ dbkey , $ xcatcfg , $ types ) ;
} else { # db2
$ datatype = get_datatype_string_db2 ( $ dbkey , $ types , $ tn , $ descr ) ;
}
if ( $ datatype eq "TEXT" ) {
2009-08-20 03:26:32 +00:00
if ( isAKey ( \ @ { $ descr - > { keys } } , $ dbkey ) ) { # keys need defined length
2010-01-19 19:34:46 +00:00
$ datatype = "VARCHAR(128) " ;
2009-08-20 03:26:32 +00:00
}
2010-08-31 17:23:28 +00:00
}
2009-08-20 03:26:32 +00:00
2011-01-11 13:56:33 +00:00
# delimit the columns
my $ tmpkey = & delimitcol ( $ dbkey ) ;
2011-01-06 19:02:52 +00:00
if ( ( $ xcatcfg =~ /^DB2:/ ) || ( $ xcatcfg =~ /^Pg:/ ) ) {
2010-08-31 17:23:28 +00:00
# get rid of NOT NULL, cannot modify with NOT NULL
my ( $ tmptype , $ nullvalue ) = split ( 'NOT NULL' , $ datatype ) ;
$ datatype = $ tmptype ;
}
my $ stmt ;
if ( $ xcatcfg =~ /^DB2:/ ) {
$ stmt =
"ALTER TABLE " . $ self - > { tabname } . " ALTER COLUMN $tmpkey SET DATA TYPE $datatype" ;
} else {
$ stmt =
"ALTER TABLE " . $ self - > { tabname } . " MODIFY COLUMN $tmpkey $datatype" ;
}
2010-09-01 14:09:38 +00:00
$ msg = "updateschema: Running $stmt" ;
xCAT::MsgUtils - > message ( "S" , $ msg ) ;
2010-08-31 17:23:28 +00:00
#print "stmt=$stmt\n";
$ self - > { dbh } - > do ( $ stmt ) ;
if ( $ self - > { dbh } - > errstr ) {
2009-08-20 03:26:32 +00:00
xCAT::MsgUtils - > message ( "S" , "Error changing the keys for table " . $ self - > { tabname } . ":" . $ self - > { dbh } - > errstr ) ;
2010-08-31 17:23:28 +00:00
}
2007-10-26 22:44:33 +00:00
}
2009-08-20 03:26:32 +00:00
}
2010-08-31 17:23:28 +00:00
#finally add the new keys
2009-08-20 03:26:32 +00:00
if ( $ change_keys ) {
if ( $ xcatcfg =~ /^mysql:/ ) { #for mysql, just alter the table
my $ tmp = join ( ',' , @ new_dbkeys ) ;
my $ stmt =
"ALTER TABLE " . $ self - > { tabname } . " DROP PRIMARY KEY, ADD PRIMARY KEY ($tmp)" ;
2010-08-31 17:23:28 +00:00
#print "stmt=$stmt\n";
2009-08-20 03:26:32 +00:00
$ self - > { dbh } - > do ( $ stmt ) ;
2010-09-01 14:09:38 +00:00
$ msg = "updateschema: Running $stmt" ;
xCAT::MsgUtils - > message ( "S" , $ msg ) ;
2009-08-20 03:26:32 +00:00
if ( $ self - > { dbh } - > errstr ) {
xCAT::MsgUtils - > message ( "S" , "Error changing the keys for table " . $ self - > { tabname } . ":" . $ self - > { dbh } - > errstr ) ;
}
} else { #for the rest, recreate the table
2009-12-17 17:01:08 +00:00
#print "need to change keys\n";
2009-08-20 03:26:32 +00:00
my $ btn = $ tn . "_xcatbackup" ;
#remove the backup table just in case;
2010-08-31 17:23:28 +00:00
# gets error if not there
2009-12-17 17:01:08 +00:00
#my $str="DROP TABLE $btn";
#$self->{dbh}->do($str);
2009-08-20 03:26:32 +00:00
#rename the table name to name_xcatbackup
2010-08-31 17:23:28 +00:00
my $ str ;
if ( $ xcatcfg =~ /^DB2:/ ) {
$ str = "RENAME TABLE $tn TO $btn" ;
} else {
$ str = "ALTER TABLE $tn RENAME TO $btn" ;
}
2009-08-20 03:26:32 +00:00
$ self - > { dbh } - > do ( $ str ) ;
2010-08-31 17:23:28 +00:00
if ( ! $ self - > { dbh } - > { AutoCommit } ) {
$ self - > { dbh } - > commit ;
}
2010-09-01 14:09:38 +00:00
$ msg = "updateschema: Running $str" ;
xCAT::MsgUtils - > message ( "S" , $ msg ) ;
2009-08-20 03:26:32 +00:00
if ( $ self - > { dbh } - > errstr ) {
xCAT::MsgUtils - > message ( "S" , "Error renaming the table from $tn to $btn:" . $ self - > { dbh } - > errstr ) ;
}
2010-08-31 17:23:28 +00:00
if ( ! $ self - > { dbh } - > { AutoCommit } ) {
$ self - > { dbh } - > commit ;
}
2009-08-20 03:26:32 +00:00
#create the table again
$ str =
buildcreatestmt ( $ tn ,
$ descr ,
$ xcatcfg ) ;
$ self - > { dbh } - > do ( $ str ) ;
if ( $ self - > { dbh } - > errstr ) {
xCAT::MsgUtils - > message ( "S" , "Error recreating table $tn:" . $ self - > { dbh } - > errstr ) ;
}
2010-08-31 17:23:28 +00:00
if ( ! $ self - > { dbh } - > { AutoCommit } ) {
$ self - > { dbh } - > commit ;
}
2009-08-20 03:26:32 +00:00
#copy the data from backup to the table
$ str = "INSERT INTO $tn SELECT * FROM $btn" ;
$ self - > { dbh } - > do ( $ str ) ;
2010-09-01 14:09:38 +00:00
$ msg = "updateschema: Running $str" ;
xCAT::MsgUtils - > message ( "S" , $ msg ) ;
2009-08-20 03:26:32 +00:00
if ( $ self - > { dbh } - > errstr ) {
xCAT::MsgUtils - > message ( "S" , "Error copying data from table $btn to $tn:" . $ self - > { dbh } - > errstr ) ;
} else {
#drop the backup table
$ str = "DROP TABLE $btn" ;
$ self - > { dbh } - > do ( $ str ) ;
}
2009-12-17 17:01:08 +00:00
if ( ! $ self - > { dbh } - > { AutoCommit } ) {
$ self - > { dbh } - > commit ;
}
2009-08-20 03:26:32 +00:00
}
}
2010-09-01 14:09:38 +00:00
return $ rc ;
2007-10-26 22:44:33 +00:00
}
#--------------------------------------------------------------------------
= head3 setNodeAttribs
Description: Set attributes values on the node input to the routine
Arguments:
Hash: Database Handle , Statement Handle , nodelist
Node name
Attribute hash
Returns:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
my $ mactab = xCAT::Table - > new ( 'mac' , - create = > 1 ) ;
$ mactab - > setNodeAttribs ( $ node , { mac = > $ mac } ) ;
$ mactab - > close ( ) ;
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub setNodeAttribs
{
my $ self = shift ;
my $ node = shift ;
2009-11-21 18:38:59 +00:00
my $ nodekey = "node" ;
if ( defined $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol } ) {
$ nodekey = $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol }
} ;
return $ self - > setAttribs ( { $ nodekey = > $ node } , @ _ ) ;
2007-10-26 22:44:33 +00:00
}
#--------------------------------------------------------------------------
2011-01-06 19:12:45 +00:00
= head3 addNodeAttribs ( not supported )
2007-10-26 22:44:33 +00:00
Description: Add new attributes input to the routine to the nodes
Arguments:
2008-02-21 21:10:35 +00:00
Hash of new attributes
2007-10-26 22:44:33 +00:00
Returns:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub addNodeAttribs
{
my $ self = shift ;
2011-01-06 19:12:45 +00:00
xCAT::MsgUtils - > message ( "S" , "addNodeAttribs is not supported" ) ;
die "addNodeAttribs is not supported" ;
2007-10-26 22:44:33 +00:00
return $ self - > addAttribs ( 'node' , @ _ ) ;
}
#--------------------------------------------------------------------------
2011-01-06 19:12:45 +00:00
= head3 addAttribs ( not supported )
2007-10-26 22:44:33 +00:00
Description: add new attributes
2008-02-21 21:10:35 +00:00
Arguments:
2007-10-26 22:44:33 +00:00
Hash: Database Handle , Statement Handle , nodelist
Key name
Key value
Hash reference of column - value pairs to set
Returns:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub addAttribs
{
my $ self = shift ;
2011-01-06 19:12:45 +00:00
xCAT::MsgUtils - > message ( "S" , "addAttribs is not supported" ) ;
die "addAttribs is not supported" ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'addAttribs' , @ _ ) ;
}
2009-10-26 09:53:35 +00:00
if ( not $ self - > { intransaction } and not $ self - > { autocommit } and $ self - > { realautocommit } ) {
2011-01-05 21:33:03 +00:00
#I have realized that this needs some comment work
#so if we are not in a transaction, *but* the current caller context expects autocommit to be off (i.e. fast performance and rollback
#however, the DBI layer is set to stick to autocommit on as much as possible (pretty much SQLite) because the single
#handle is shared, we disable autocommit on that handle until commit or rollback is called
#yes, that means some table calls coming in with expectation of autocommit during this hopefully short interval
#could get rolled back along with this transaction, but it's unlikely and moving to a more robust DB solves it
#so it is intentional that autocommit is left off because it is expected that a commit will come along one day and fix it right up
#TODO: if caller crashes after inducing a 'begin transaction' without commit or rollback, this could be problematic.
#calling commit on all table handles if a client goes away uncleanly may be a good enough solution.
2009-10-26 09:53:35 +00:00
$ self - > { intransaction } = 1 ;
$ self - > { dbh } - > { AutoCommit } = 0 ;
}
2007-10-26 22:44:33 +00:00
my $ key = shift ;
my $ keyval = shift ;
my $ elems = shift ;
my $ cols = "" ;
my @ bind = ( ) ;
@ bind = ( $ keyval ) ;
$ cols = "$key," ;
for my $ col ( keys %$ elems )
{
$ cols = $ cols . $ col . "," ;
if ( ref ( $$ elems { $ col } ) )
{
push @ bind , $ { $ elems } { $ col } - > [ 0 ] ;
}
else
{
push @ bind , $$ elems { $ col } ;
}
}
chop ( $ cols ) ;
my $ qstring = 'INSERT INTO ' . $ self - > { tabname } . " ($cols) VALUES (" ;
for ( @ bind )
{
$ qstring = $ qstring . "?," ;
}
$ qstring =~ s/,$/)/ ;
my $ sth = $ self - > { dbh } - > prepare ( $ qstring ) ;
$ sth - > execute ( @ bind ) ;
#$self->{dbh}->commit;
#notify the interested parties
my $ notif = xCAT::NotifHandler - > needToNotify ( $ self - > { tabname } , 'a' ) ;
if ( $ notif == 1 )
{
my % new_notif_data ;
$ new_notif_data { $ key } = $ keyval ;
foreach ( keys %$ elems )
{
$ new_notif_data { $ _ } = $$ elems { $ _ } ;
}
xCAT::NotifHandler - > notify ( "a" , $ self - > { tabname } , [ 0 ] ,
\ % new_notif_data ) ;
}
2009-10-13 19:37:41 +00:00
$ sth - > finish ( ) ;
2007-10-26 22:44:33 +00:00
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 rollback
2007-10-26 22:44:33 +00:00
Description: rollback changes
Arguments:
Database Handle
Returns:
2008-02-21 21:10:35 +00:00
none
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
my $ tab = xCAT::Table - > new ( $ table , - create = > 1 , - autocommit = > 0 ) ;
2008-02-21 21:10:35 +00:00
$ tab - > rollback ( ) ;
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub rollback
{
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'rollback' , @ _ ) ;
}
2007-10-26 22:44:33 +00:00
$ self - > { dbh } - > rollback ;
2009-10-26 09:53:35 +00:00
if ( $ self - > { intransaction } and not $ self - > { autocommit } and $ self - > { realautocommit } ) {
2011-01-05 21:33:03 +00:00
#on rollback, if sharing a DB handle for autocommit and non-autocommit, put the handle back to autocommit
2009-10-26 09:53:35 +00:00
$ self - > { intransaction } = 0 ;
$ self - > { dbh } - > { AutoCommit } = 1 ;
}
2007-10-26 22:44:33 +00:00
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 commit
2007-10-26 22:44:33 +00:00
Description:
Commit changes
Arguments:
Database Handle
Returns:
2008-02-21 21:10:35 +00:00
none
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
my $ tab = xCAT::Table - > new ( $ table , - create = > 1 , - autocommit = > 0 ) ;
2008-02-21 21:10:35 +00:00
$ tab - > commit ( ) ;
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub commit
{
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'commit' , @ _ ) ;
}
2011-01-05 21:33:03 +00:00
unless ( $ self - > { dbh } - > { AutoCommit } ) { #caller can now hammer commit function with impunity, even when it makes no sense
$ self - > { dbh } - > commit ;
}
2009-10-26 09:53:35 +00:00
if ( $ self - > { intransaction } and not $ self - > { autocommit } and $ self - > { realautocommit } ) {
2011-01-05 21:33:03 +00:00
#if realautocommit indicates shared DBH between manual and auto commit, put the handle back to autocommit if a transaction
#is committed (aka ended)
2009-10-26 09:53:35 +00:00
$ self - > { intransaction } = 0 ;
$ self - > { dbh } - > { AutoCommit } = 1 ;
}
2007-10-26 22:44:33 +00:00
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 setAttribs
2007-10-26 22:44:33 +00:00
Description:
Arguments:
Key name
Key value
Hash reference of column - value pairs to set
Returns:
2008-02-21 21:10:35 +00:00
None
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
my $ tab = xCAT::Table - > new ( 'ppc' , - create = > 1 , - autocommit = > 0 ) ;
$ keyhash { 'node' } = $ name ;
$ updates { 'type' } = lc ( $ type ) ;
$ updates { 'id' } = $ lparid ;
$ updates { 'hcp' } = $ server ;
$ updates { 'profile' } = $ prof ;
$ updates { 'frame' } = $ frame ;
$ updates { 'mtms' } = "$model*$serial" ;
$ tab - > setAttribs ( \ % keyhash , \ % updates ) ;
$ tab - > commit ;
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub setAttribs
{
#Takes three arguments:
#-Key name
#-Key value
#-Hash reference of column-value pairs to set
2009-12-01 19:26:56 +00:00
my $ xcatcfg = get_xcatcfg ( ) ;
2007-10-26 22:44:33 +00:00
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'setAttribs' , @ _ ) ;
}
2009-01-27 17:53:44 +00:00
my $ pKeypairs = shift ;
my % keypairs = ( ) ;
if ( $ pKeypairs != undef ) { % keypairs = % { $ pKeypairs } ; }
2007-10-26 22:44:33 +00:00
#my $key = shift;
#my $keyval=shift;
my $ elems = shift ;
my $ cols = "" ;
my @ bind = ( ) ;
my $ action ;
my @ notif_data ;
2010-01-19 19:34:46 +00:00
my $ qstring ;
$ qstring = "SELECT * FROM " . $ self - > { tabname } . " WHERE " ;
2007-10-26 22:44:33 +00:00
my @ qargs = ( ) ;
2009-01-27 17:53:44 +00:00
my $ query ;
my $ data ;
2009-10-26 09:53:35 +00:00
if ( not $ self - > { intransaction } and not $ self - > { autocommit } and $ self - > { realautocommit } ) {
2011-01-05 21:33:03 +00:00
#search this code for the other if statement just like it for an explanation of why I do this
2009-10-26 09:53:35 +00:00
$ self - > { intransaction } = 1 ;
$ self - > { dbh } - > { AutoCommit } = 0 ;
}
2009-01-27 17:53:44 +00:00
if ( ( $ pKeypairs != undef ) && ( keys ( % keypairs ) > 0 ) ) {
foreach ( keys % keypairs )
{
2009-12-01 19:26:56 +00:00
2011-01-11 15:01:40 +00:00
# delimit the columns of the table
my $ delimitedcol = & delimitcol ( $ _ ) ;
if ( $ xcatcfg =~ /^DB2:/ ) { # for DB2
$ qstring . = $ delimitedcol . " LIKE ? AND " ;
2010-01-19 19:34:46 +00:00
} else {
2011-01-11 15:01:40 +00:00
$ qstring . = $ delimitedcol . " = ? AND " ;
2009-12-01 19:26:56 +00:00
}
2009-01-27 17:53:44 +00:00
push @ qargs , $ keypairs { $ _ } ;
}
$ qstring =~ s/ AND \z// ;
2009-12-01 19:26:56 +00:00
#print "this is qstring1: $qstring\n";
2009-01-27 17:53:44 +00:00
$ query = $ self - > { dbh } - > prepare ( $ qstring ) ;
$ query - > execute ( @ qargs ) ;
#get the first row
$ data = $ query - > fetchrow_arrayref ( ) ;
if ( defined $ data )
{
$ action = "u" ;
}
else
{
$ action = "a" ;
}
} else { $ action = "a" ; }
2007-10-26 22:44:33 +00:00
#prepare the notification data
my $ notif =
xCAT::NotifHandler - > needToNotify ( $ self - > { tabname } , $ action ) ;
if ( $ notif == 1 )
{
if ( $ action eq "u" )
{
#put the column names at the very front
push ( @ notif_data , $ query - > { NAME } ) ;
#copy the data out because fetchall_arrayref overrides the data.
my @ first_row = @$ data ;
push ( @ notif_data , \ @ first_row ) ;
#get the rest of the rows
my $ temp_data = $ query - > fetchall_arrayref ( ) ;
foreach ( @$ temp_data )
{
push ( @ notif_data , $ _ ) ;
}
}
}
2009-01-27 17:53:44 +00:00
if ( $ query ) {
$ query - > finish ( ) ;
}
2007-10-26 22:44:33 +00:00
if ( $ action eq "u" )
{
#update the rows
$ action = "u" ;
for my $ col ( keys %$ elems )
{
2011-01-11 15:01:40 +00:00
# delimit the columns of the table
my $ delimitedcol = & delimitcol ( $ col ) ;
$ cols = $ cols . $ delimitedcol . " = ?," ;
push @ bind , ( ( $$ elems { $ col } =~ /NULL/ ) ? undef : $$ elems { $ col } ) ;
2007-10-26 22:44:33 +00:00
}
chop ( $ cols ) ;
2010-02-09 16:20:43 +00:00
my $ cmd ;
$ cmd = "UPDATE " . $ self - > { tabname } . " set $cols where " ;
2007-10-26 22:44:33 +00:00
foreach ( keys % keypairs )
{
if ( ref ( $ keypairs { $ _ } ) )
{
2011-01-11 15:01:40 +00:00
# delimit the columns
my $ delimitedcol = & delimitcol ( $ _ ) ;
$ cmd . = $ delimitedcol . " = '" . $ keypairs { $ _ } - > [ 0 ] . "' AND " ;
2007-10-26 22:44:33 +00:00
}
else
{
2011-01-11 15:01:40 +00:00
my $ delimitedcol = & delimitcol ( $ _ ) ;
$ cmd . = $ delimitedcol . " = '" . $ keypairs { $ _ } . "' AND " ;
2007-10-26 22:44:33 +00:00
}
}
$ cmd =~ s/ AND \z// ;
my $ sth = $ self - > { dbh } - > prepare ( $ cmd ) ;
2008-05-22 16:58:35 +00:00
unless ( $ sth ) {
return ( undef , "Error attempting requested DB operation" ) ;
}
2007-10-26 22:44:33 +00:00
my $ err = $ sth - > execute ( @ bind ) ;
if ( not defined ( $ err ) )
{
return ( undef , $ sth - > errstr ) ;
}
2009-10-13 19:37:41 +00:00
$ sth - > finish ;
2007-10-26 22:44:33 +00:00
}
else
{
#insert the rows
$ action = "a" ;
@ bind = ( ) ;
$ cols = "" ;
2008-01-21 22:15:26 +00:00
my % newpairs ;
#first, merge the two structures to a single hash
2007-10-26 22:44:33 +00:00
foreach ( keys % keypairs )
{
2008-01-21 22:15:26 +00:00
$ newpairs { $ _ } = $ keypairs { $ _ } ;
}
2009-09-29 20:41:13 +00:00
my $ needinsert = 0 ;
2007-10-26 22:44:33 +00:00
for my $ col ( keys %$ elems )
{
2009-09-29 20:41:13 +00:00
$ newpairs { $ col } = $$ elems { $ col } ;
if ( defined $ newpairs { $ col } and not $ newpairs { $ col } eq "" ) {
$ needinsert = 1 ;
}
}
unless ( $ needinsert ) { #Don't bother inserting truly blank lines
return ;
2008-01-21 22:15:26 +00:00
}
foreach ( keys % newpairs ) {
2010-01-19 19:34:46 +00:00
2011-01-11 15:01:40 +00:00
my $ delimitedcol = & delimitcol ( $ _ ) ;
$ cols . = $ delimitedcol . "," ;
push @ bind , $ newpairs { $ _ } ;
2007-10-26 22:44:33 +00:00
}
chop ( $ cols ) ;
my $ qstring = 'INSERT INTO ' . $ self - > { tabname } . " ($cols) VALUES (" ;
for ( @ bind )
{
$ qstring = $ qstring . "?," ;
}
$ qstring =~ s/,$/)/ ;
my $ sth = $ self - > { dbh } - > prepare ( $ qstring ) ;
my $ err = $ sth - > execute ( @ bind ) ;
if ( not defined ( $ err ) )
{
return ( undef , $ sth - > errstr ) ;
}
2009-10-13 19:37:41 +00:00
$ sth - > finish ;
2007-10-26 22:44:33 +00:00
}
2010-02-19 14:45:49 +00:00
$ self - > _refresh_cache ( ) ; #cache is invalid, refresh
2007-10-26 22:44:33 +00:00
#notify the interested parties
if ( $ notif == 1 )
{
#create new data ref
my % new_notif_data = % keypairs ;
foreach ( keys %$ elems )
{
$ new_notif_data { $ _ } = $$ elems { $ _ } ;
}
xCAT::NotifHandler - > notify ( $ action , $ self - > { tabname } ,
\ @ notif_data , \ % new_notif_data ) ;
}
return 0 ;
}
2007-11-28 19:44:47 +00:00
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 setAttribsWhere
2007-11-28 19:44:47 +00:00
Description:
This function sets the attributes for the rows selected by the where clause .
2010-01-19 19:34:46 +00:00
Warning , because we support mulitiple databases ( SQLite , MySQL and DB2 ) that
require different syntax . Any code using this routine , must call the
2010-04-06 13:32:23 +00:00
Utils - > getDBName routine and code the where clause that is appropriate for
2010-01-19 19:34:46 +00:00
each supported database .
2007-11-28 19:44:47 +00:00
Arguments:
Where clause .
2009-12-01 19:26:56 +00:00
Note: if the Where clause contains any reserved keywords like
keys from the site table , then you will have to code them in backticks
for MySQL and not in backticks for Postgresql .
2007-11-28 19:44:47 +00:00
Hash reference of column - value pairs to set
Returns:
2008-02-21 21:10:35 +00:00
None
Globals:
Error:
2007-11-28 19:44:47 +00:00
Example:
my $ tab = xCAT::Table - > new ( 'ppc' , - create = > 1 , - autocommit = > 1 ) ;
$ updates { 'type' } = lc ( $ type ) ;
$ updates { 'id' } = $ lparid ;
$ updates { 'hcp' } = $ server ;
$ updates { 'profile' } = $ prof ;
$ updates { 'frame' } = $ frame ;
$ updates { 'mtms' } = "$model*$serial" ;
2009-12-01 19:26:56 +00:00
$ tab - > setAttribsWhere ( "node in ('node1', 'node2', 'node3')" , \ % updates ) ;
2007-11-28 19:44:47 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub setAttribsWhere
{
#Takes three arguments:
#-Where clause
#-Hash reference of column-value pairs to set
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'setAttribsWhere' , @ _ ) ;
}
2007-11-28 19:44:47 +00:00
my $ where_clause = shift ;
my $ elems = shift ;
my $ cols = "" ;
my @ bind = ( ) ;
my $ action ;
my @ notif_data ;
2009-10-26 09:53:35 +00:00
if ( not $ self - > { intransaction } and not $ self - > { autocommit } and $ self - > { realautocommit } ) {
2011-01-05 21:33:03 +00:00
#search this code for the other if statement just like it for an explanation of why I do this
2009-10-26 09:53:35 +00:00
$ self - > { intransaction } = 1 ;
$ self - > { dbh } - > { AutoCommit } = 0 ;
}
2007-11-28 19:44:47 +00:00
my $ qstring = "SELECT * FROM " . $ self - > { tabname } . " WHERE " . $ where_clause ;
my @ qargs = ( ) ;
my $ query = $ self - > { dbh } - > prepare ( $ qstring ) ;
$ query - > execute ( @ qargs ) ;
#get the first row
my $ data = $ query - > fetchrow_arrayref ( ) ;
if ( defined $ data ) { $ action = "u" ; }
else { return ( 0 , "no rows selected." ) ; }
#prepare the notification data
my $ notif =
xCAT::NotifHandler - > needToNotify ( $ self - > { tabname } , $ action ) ;
if ( $ notif == 1 )
{
#put the column names at the very front
push ( @ notif_data , $ query - > { NAME } ) ;
#copy the data out because fetchall_arrayref overrides the data.
my @ first_row = @$ data ;
push ( @ notif_data , \ @ first_row ) ;
#get the rest of the rows
my $ temp_data = $ query - > fetchall_arrayref ( ) ;
foreach ( @$ temp_data ) {
push ( @ notif_data , $ _ ) ;
}
}
$ query - > finish ( ) ;
#update the rows
for my $ col ( keys %$ elems )
{
2011-01-11 15:59:48 +00:00
# delimit the columns of the table
my $ delimitedcol = & delimitcol ( $ col ) ;
$ cols = $ cols . $ delimitedcol . " = ?," ;
2007-11-28 19:44:47 +00:00
push @ bind , ( ( $$ elems { $ col } =~ /NULL/ ) ? undef : $$ elems { $ col } ) ;
}
chop ( $ cols ) ;
my $ cmd = "UPDATE " . $ self - > { tabname } . " set $cols where " . $ where_clause ;
my $ sth = $ self - > { dbh } - > prepare ( $ cmd ) ;
my $ err = $ sth - > execute ( @ bind ) ;
if ( not defined ( $ err ) )
{
return ( undef , $ sth - > errstr ) ;
}
#notify the interested parties
if ( $ notif == 1 )
{
#create new data ref
my % new_notif_data = ( ) ;
foreach ( keys %$ elems )
{
$ new_notif_data { $ _ } = $$ elems { $ _ } ;
}
xCAT::NotifHandler - > notify ( $ action , $ self - > { tabname } ,
\ @ notif_data , \ % new_notif_data ) ;
}
2008-01-20 19:20:46 +00:00
$ sth - > finish ;
2007-11-28 19:44:47 +00:00
return 0 ;
}
2008-07-11 19:12:05 +00:00
#--------------------------------------------------------------------------
= head3 setNodesAttribs
Description: Unconditionally assigns the requested values to tables for a list of nodes
Arguments:
'self' ( implicit in OO style call )
2009-11-26 18:01:38 +00:00
A reference to a two - level hash similar to:
{
'n1' = > {
comments = > 'foo' ,
data = > 'foo2'
} ,
'n2' = > {
comments = > 'bar' ,
data = > 'bar2'
}
}
Alternative arguments ( same set of data to be applied to multiple nodes ) :
'self'
2008-07-11 19:12:05 +00:00
Reference to a list of nodes ( no noderanges , just nodes )
A hash of attributes to set , like in 'setNodeAttribs'
Returns:
= cut
#--------------------------------------------------------------------------
sub setNodesAttribs {
#This is currently a stub to be filled out with at scale enhancements. It will be a touch more complex than getNodesAttribs, due to the notification
#The three steps should be:
#-Query table and divide nodes into list to update and list to insert
#-Update intelligently with respect to scale
2009-11-26 18:01:38 +00:00
#-Insert intelligently with respect to scale (prepare one statement, execute many times, other syntaxes not universal)
2008-07-11 19:12:05 +00:00
#Intelligently in this case means folding them to some degree. Update where clauses will be longer, but must be capped to avoid exceeding SQL statement length restrictions on some DBs. Restricting even all the way down to 256 could provide better than an order of magnitude better performance though
my $ self = shift ;
2009-11-26 18:01:38 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'setNodesAttribs' , @ _ ) ;
}
2008-07-14 14:35:54 +00:00
my $ nodelist = shift ;
2009-11-26 18:01:38 +00:00
my $ keyset = shift ;
my % cols = ( ) ;
my @ orderedcols = ( ) ;
my $ oldac = $ self - > { dbh } - > { AutoCommit } ; #save autocommit state
$ self - > { dbh } - > { AutoCommit } = 0 ; #turn off autocommit for performance
my $ hashrec ;
my $ colsmatch = 1 ;
if ( ref $ nodelist eq 'HASH' ) { # argument of the form { n001 => { groups => 'something' }, n002 => { groups => 'other' } }
$ hashrec = $ nodelist ;
my @ nodes = keys %$ nodelist ;
$ nodelist = \ @ nodes ;
my $ firstpass = 1 ;
foreach my $ node ( keys %$ hashrec ) { #Determine whether the passed structure is trying to set the same columns
#for every node to determine if the short path can work or not
if ( $ firstpass ) {
$ firstpass = 0 ;
foreach ( keys % { $ hashrec - > { $ node } } ) {
$ cols { $ _ } = 1 ;
}
} else {
foreach ( keys % { $ hashrec - > { $ node } } ) { #make sure all columns in this entry are in the first
unless ( defined $ cols { $ _ } ) {
$ colsmatch = 0 ;
last ;
}
}
foreach my $ col ( keys % cols ) { #make sure this entry does not lack any columns from the first
unless ( defined $ hashrec - > { $ node } - > { $ col } ) {
$ colsmatch = 0 ;
last ;
}
}
}
}
} else { #the legacy calling style with a list reference and a single hash reference of col=>va/ue pairs
$ hashrec = { } ;
foreach ( @$ nodelist ) {
$ hashrec - > { $ _ } = $ keyset ;
}
foreach ( keys %$ keyset ) {
$ cols { $ _ } = 1 ;
}
}
#revert to the old way if notification is required or asymettric setNodesAttribs was requested with different columns
#for different nodes
if ( not $ colsmatch or xCAT::NotifHandler - > needToNotify ( $ self - > { tabname } , 'u' ) or xCAT::NotifHandler - > needToNotify ( $ self - > { tabname } , 'a' ) ) {
#TODO: enhance performance of this case too, for now just call the notification-capable code per node
foreach ( keys %$ hashrec ) {
$ self - > setNodeAttribs ( $ _ , $ hashrec - > { $ _ } ) ;
}
$ self - > { dbh } - > commit ; #commit pending transactions
$ self - > { dbh } - > { AutoCommit } = $ oldac ; #restore autocommit semantics
return ;
}
#this code currently is notification incapable. It enhances scaled setting by:
#-turning off autocommit if on (done for above code too, but to be clear document the fact here too
#-executing one select statement per set of nodes instead of per node (chopping into 1,000 node chunks for SQL statement length
#-aggregating update statements
#-preparing one insert statement and re-execing it (SQL-92 multi-row insert isn't ubiquitous enough)
my $ nodekey = "node" ;
if ( defined $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol } ) {
$ nodekey = $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol }
} ;
@ orderedcols = keys % cols ; #pick a specific column ordering explicitly to assure consistency
2010-08-12 16:26:20 +00:00
my @ sqlorderedcols = ( ) ;
# must quote to protect from reserved DB keywords
foreach my $ col ( @ orderedcols ) {
2011-01-11 16:55:06 +00:00
my $ delimitedcol = & delimitcol ( $ col ) ;
push @ sqlorderedcols , $ delimitedcol ;
2010-08-12 16:26:20 +00:00
}
2011-04-11 13:49:13 +00:00
#use Data::Dumper;
2009-11-28 00:52:10 +00:00
my $ nodesatatime = 999 ; #the update case statement will consume '?' of which we are allowed 999 in the most restricted DB we support
2009-11-26 18:01:38 +00:00
#ostensibly, we could do 999 at a time for the select statement, and subsequently limit the update aggregation only
#to get fewer sql statements, but the code is probably more complex than most people want to read
#at the moment anyway
my @ currnodes = splice ( @$ nodelist , 0 , $ nodesatatime ) ; #Do a few at a time to stay under max sql statement length and max variable count
my $ insertsth ; #if insert is needed, this will hold the single prepared insert statement
2009-11-26 18:22:17 +00:00
my $ upsth ;
2011-01-11 16:55:06 +00:00
my $ dnodekey = & delimitcol ( $ nodekey ) ;
2009-11-26 18:01:38 +00:00
while ( scalar @ currnodes ) {
2011-01-11 16:55:06 +00:00
my % updatenodes = ( ) ;
my % insertnodes = ( ) ;
my $ qstring ;
#sort nodes into inserts and updates
$ qstring = "SELECT * FROM " . $ self - > { tabname } . " WHERE $dnodekey in (" ;
$ qstring . = '?, ' x scalar ( @ currnodes ) ;
$ qstring =~ s/, $/)/ ;
my $ query = $ self - > { dbh } - > prepare ( $ qstring ) ;
$ query - > execute ( @ currnodes ) ;
my $ rec ;
2009-11-26 18:01:38 +00:00
while ( $ rec = $ query - > fetchrow_hashref ( ) ) {
$ updatenodes { $ rec - > { $ nodekey } } = 1 ;
2011-01-11 16:55:06 +00:00
}
if ( scalar keys % updatenodes < scalar @ currnodes ) {
2009-11-26 18:01:38 +00:00
foreach ( @ currnodes ) {
unless ( $ updatenodes { $ _ } ) {
$ insertnodes { $ _ } = 1 ;
}
}
2011-01-11 16:55:06 +00:00
}
2009-11-26 18:01:38 +00:00
my $ havenodecol ; #whether to put node first in execute arguments or let it go naturally
if ( not $ insertsth and keys % insertnodes ) { #prepare an insert statement since one will be needed
my $ columns = "" ;
my $ bindhooks = "" ;
$ havenodecol = defined $ cols { $ nodekey } ;
unless ( $ havenodecol ) {
2011-01-11 16:55:06 +00:00
$ columns = "$dnodekey, " ;
$ bindhooks = "?, " ;
2009-11-26 18:01:38 +00:00
}
2010-08-12 16:26:20 +00:00
$ columns . = join ( ", " , @ sqlorderedcols ) ;
$ bindhooks . = "?, " x scalar @ sqlorderedcols ;
2009-11-26 18:01:38 +00:00
$ bindhooks =~ s/, $// ;
$ columns =~ s/, $// ;
my $ instring = "INSERT INTO " . $ self - > { tabname } . " ($columns) VALUES ($bindhooks)" ;
2010-08-12 16:26:20 +00:00
#print $instring;
2009-11-26 18:01:38 +00:00
$ insertsth = $ self - > { dbh } - > prepare ( $ instring ) ;
}
foreach my $ node ( keys % insertnodes ) {
my @ args = ( ) ;
unless ( $ havenodecol ) {
@ args = ( $ node ) ;
}
foreach my $ col ( @ orderedcols ) {
push @ args , $ hashrec - > { $ node } - > { $ col } ;
}
$ insertsth - > execute ( @ args ) ;
}
2009-11-26 18:22:17 +00:00
if ( not $ upsth and keys % updatenodes ) { #prepare an insert statement since one will be needed
2009-11-26 18:01:38 +00:00
my $ upstring = "UPDATE " . $ self - > { tabname } . " set " ;
2010-08-12 16:26:20 +00:00
foreach my $ col ( @ sqlorderedcols ) { #try aggregating requests. Could also see about single prepare, multiple executes instead
$ upstring . = "$col = ?, " ;
2009-11-26 18:22:17 +00:00
}
2010-01-22 20:08:33 +00:00
if ( grep { $ _ eq $ nodekey } @ orderedcols ) {
$ upstring =~ s/, \z// ;
} else {
2011-01-11 16:55:06 +00:00
$ upstring =~ s/, \z/ where $dnodekey = ?/ ;
2010-01-22 20:08:33 +00:00
}
2009-11-26 18:22:17 +00:00
$ upsth = $ self - > { dbh } - > prepare ( $ upstring ) ;
}
if ( scalar keys % updatenodes ) {
my $ upstring = "UPDATE " . $ self - > { tabname } . " set " ;
foreach my $ node ( keys % updatenodes ) {
my @ args = ( ) ;
foreach my $ col ( @ orderedcols ) { #try aggregating requests. Could also see about single prepare, multiple executes instead
2010-08-12 16:26:20 +00:00
push @ args , $ hashrec - > { $ node } - > { $ col } ;
2009-11-26 18:01:38 +00:00
}
2009-11-26 18:22:17 +00:00
push @ args , $ node ;
$ upsth - > execute ( @ args ) ;
2009-11-26 18:01:38 +00:00
}
}
@ currnodes = splice ( @$ nodelist , 0 , $ nodesatatime ) ;
2008-07-11 19:12:05 +00:00
}
2009-11-26 18:01:38 +00:00
$ self - > { dbh } - > commit ; #commit pending transactions
$ self - > { dbh } - > { AutoCommit } = $ oldac ; #restore autocommit semantics
2010-02-19 14:45:49 +00:00
$ self - > _refresh_cache ( ) ; #cache is invalid, refresh
2008-07-11 19:12:05 +00:00
}
2007-11-28 19:44:47 +00:00
2008-06-30 13:51:44 +00:00
#--------------------------------------------------------------------------
= head3 getNodesAttribs
Description: Retrieves the requested attributes for a node list
Arguments:
Table handle ( 'self' )
List ref of nodes
Attribute type array
Returns:
two layer hash reference ( - > { nodename } - > { attrib }
Globals:
Error:
Example:
my $ ostab = xCAT::Table - > new ( 'nodetype' ) ;
my $ ent = $ ostab - > getNodesAttribs ( \ @ nodes , [ 'profile' , 'os' , 'arch' ] ) ;
if ( $ ent ) { print $ ent - > { n1 } - > { profile }
Comments:
Using this function will clue the table layer into the atomic nature of the request , and allow shortcuts to be taken as appropriate to fulfill the request at scale .
= cut
#--------------------------------------------------------------------------------
sub getNodesAttribs {
my $ self = shift ;
2009-08-09 15:48:38 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'getNodesAttribs' , @ _ ) ;
}
2008-06-30 13:51:44 +00:00
my $ nodelist = shift ;
2012-04-24 18:31:02 +00:00
unless ( $ nodelist ) { $ nodelist = [] ; } #common to be invoked with undef seemingly
2010-03-05 22:28:36 +00:00
my % options = ( ) ;
2008-06-30 13:51:44 +00:00
my @ attribs ;
if ( ref $ _ [ 0 ] ) {
@ attribs = @ { shift ( ) } ;
2010-03-05 22:28:36 +00:00
% options = @ _ ;
2008-06-30 13:51:44 +00:00
} else {
@ attribs = @ _ ;
}
2012-05-24 17:14:38 +00:00
my @ realattribs = @ attribs ; #store off the requester attribute list, the cached columns may end up being a superset and we shouldn't return more than asked
2012-03-25 15:15:58 +00:00
#it should also be the case that cache will be used if it already is in play even if below cache threshold. This would be desired behavior
if ( scalar ( @$ nodelist ) > $ cachethreshold ) {
2008-07-07 22:47:38 +00:00
$ self - > { _use_cache } = 0 ;
$ self - > { nodelist } - > { _use_cache } = 0 ;
2008-09-07 21:08:13 +00:00
if ( $ self - > { tabname } eq 'nodelist' ) { #a sticky situation
my @ locattribs = @ attribs ;
unless ( grep ( /^node$/ , @ locattribs ) ) {
push @ locattribs , 'node' ;
}
unless ( grep ( /^groups$/ , @ locattribs ) ) {
2011-06-04 11:28:44 +00:00
push @ locattribs , 'groups' ;
2008-09-07 21:08:13 +00:00
}
$ self - > _build_cache ( \ @ locattribs ) ;
} else {
$ self - > _build_cache ( \ @ attribs ) ;
$ self - > { nodelist } - > _build_cache ( [ 'node' , 'groups' ] ) ;
}
2008-07-07 22:47:38 +00:00
$ self - > { _use_cache } = 1 ;
2008-06-30 20:51:41 +00:00
$ self - > { nodelist } - > { _use_cache } = 1 ;
}
2008-06-30 13:51:44 +00:00
my $ rethash ;
foreach ( @$ nodelist ) {
2012-05-24 17:14:38 +00:00
my @ nodeentries = $ self - > getNodeAttribs ( $ _ , \ @ realattribs , % options ) ;
2008-07-07 19:08:26 +00:00
$ rethash - > { $ _ } = \ @ nodeentries ; #$self->getNodeAttribs($_,\@attribs);
2008-06-30 13:51:44 +00:00
}
2008-06-30 20:51:41 +00:00
$ self - > { _use_cache } = 0 ;
2012-05-12 11:54:49 +00:00
if ( $ self - > { tabname } ne 'nodelist' ) {
2011-06-06 16:58:59 +00:00
$ self - > { nodelist } - > { _use_cache } = 0 ;
}
2008-06-30 13:51:44 +00:00
return $ rethash ;
}
2010-02-19 14:45:49 +00:00
sub _refresh_cache { #if cache exists, force a rebuild, leaving reference counts alone
my $ self = shift ; #dbworker check not currently required
2012-11-20 14:45:16 +00:00
if ( $ self - > { cachepeer } - > { _cachestamp } ) { $ self - > { cachepeer } - > { _cachestamp } = 0 ; }
2010-02-19 14:45:49 +00:00
if ( $ self - > { _use_cache } ) { #only do things if cache is set up
$ self - > _build_cache ( 1 ) ; #for now, rebuild the whole thing.
#in the future, a faster cache update may be possible
#however, the payoff may not be worth it
#as this case is so rare
#the only known case that trips over this is:
#1st noderange starts being expanded
#the nodelist is updated by another process
#2nd noderange starts being expanded (sharing first cache)
# (uses stale nodelist data and misses new nodes, the error)
#1st noderange finishes
#2nd noderange finishes
2012-10-12 16:31:55 +00:00
} else { #even if a cache is not in use *right this second*, we need to mark any cached data that may exist as invalid, do so by suggesting the cache is from 1970
if ( $ self - > { _cachestamp } ) { $ self - > { _cachestamp } = 0 ; }
2010-02-19 14:45:49 +00:00
}
return ;
}
2008-06-30 20:51:41 +00:00
sub _build_cache { #PRIVATE FUNCTION, PLEASE DON'T CALL DIRECTLY
2009-09-02 17:20:28 +00:00
#TODO: increment a reference counter type thing to preserve current cache
#Also, if ref count is 1 or greater, and the current cache is less than 3 seconds old, reuse the cache?
2008-06-30 20:51:41 +00:00
my $ self = shift ;
2009-08-09 15:48:38 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , '_build_cache' , @ _ ) ;
}
2010-02-19 18:44:02 +00:00
my $ attriblist = shift ;
2012-05-11 14:14:48 +00:00
my % copts = @ _ ;
2010-02-19 18:44:02 +00:00
my $ refresh = not ref $ attriblist ; #if attriblist is not a reference, it is a refresh request
2011-06-06 14:53:37 +00:00
if ( not ref $ attriblist ) {
$ attriblist = $ self - > { _cached_attriblist } ; #need attriblist to mean something, don't know how this didn't break horribly already
}
2010-02-19 14:45:49 +00:00
if ( not $ refresh and $ self - > { _cache_ref } ) { #we have active cache reference, increment counter and return
2011-06-06 14:53:37 +00:00
my $ currattr ;
my $ cachesufficient = 1 ;
foreach $ currattr ( @$ attriblist ) { #if any of the requested attributes are not cached, we must rebuild
unless ( grep { $ currattr eq $ _ } @ { $ self - > { _cached_attriblist } } ) {
$ cachesufficient = 0 ;
last ;
}
}
2012-05-12 11:54:49 +00:00
if ( $ self - > { _cachestamp } < ( time ( ) - 5 ) ) { #NEVER use a cache older than 5 seconds
$ cachesufficient = 0 ;
}
2011-06-06 14:53:37 +00:00
if ( $ cachesufficient ) { return ; }
#cache is insufficient, now we must do the converse of above
#must add any currently cached columns to new list if not requested
foreach $ currattr ( @ { $ self - > { _cached_attriblist } } ) {
unless ( grep { $ currattr eq $ _ } @$ attriblist ) {
push @$ attriblist , $ currattr ;
}
}
2009-09-02 17:20:28 +00:00
}
#If here, _cache_ref indicates no cache
2011-06-06 14:53:37 +00:00
if ( not $ refresh and not $ self - > { _cache_ref } ) { #we have active cache reference, increment counter and return
2010-02-19 14:45:49 +00:00
$ self - > { _cache_ref } = 1 ;
}
2009-08-27 19:57:41 +00:00
my $ oldusecache = $ self - > { _use_cache } ; #save previous 'use_cache' setting
$ self - > { _use_cache } = 0 ; #This function must disable cache
#to function
2009-09-17 18:27:07 +00:00
my $ nodekey = "node" ;
if ( defined $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol } ) {
$ nodekey = $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol }
} ;
unless ( grep /^$nodekey$/ , @$ attriblist ) {
push @$ attriblist , $ nodekey ;
2008-07-10 13:40:32 +00:00
}
2008-06-30 20:51:41 +00:00
my @ tabcache = $ self - > getAllAttribs ( @$ attriblist ) ;
$ self - > { _tablecache } = \ @ tabcache ;
2008-07-09 12:43:57 +00:00
$ self - > { _nodecache } = { } ;
2009-09-17 18:27:07 +00:00
if ( $ tabcache [ 0 ] - > { $ nodekey } ) {
2008-07-09 12:43:57 +00:00
foreach ( @ tabcache ) {
2009-09-17 18:27:07 +00:00
push @ { $ self - > { _nodecache } - > { $ _ - > { $ nodekey } } } , $ _ ;
2008-07-09 12:43:57 +00:00
}
}
2011-06-06 14:53:37 +00:00
$ self - > { _cached_attriblist } = $ attriblist ;
2009-08-27 19:57:41 +00:00
$ self - > { _use_cache } = $ oldusecache ; #Restore setting to previous value
2008-06-30 20:51:41 +00:00
$ self - > { _cachestamp } = time ;
}
2010-08-24 20:46:22 +00:00
# This is a utility function to create a number out of a string, useful for things like round robin algorithms on unnumbered nodes
sub mknum {
my $ string = shift ;
my $ number = 0 ;
foreach ( unpack ( "C*" , $ string ) ) { #do big endian, then it would make 'fred' and 'free' be one number apart
$ number += $ _ ;
}
return $ number ;
}
$ evalcpt - > share ( '&mknum' ) ;
$ evalcpt - > permit ( 'require' ) ;
2007-10-26 22:44:33 +00:00
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 getNodeAttribs
2007-10-26 22:44:33 +00:00
2008-02-21 21:10:35 +00:00
Description: Retrieves the requested attribute
2007-10-26 22:44:33 +00:00
Arguments:
Table handle
Noderange
2008-02-21 21:10:35 +00:00
Attribute type array
2007-10-26 22:44:33 +00:00
Returns:
2008-02-21 21:10:35 +00:00
Attribute hash ( key attribute type )
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
my $ ostab = xCAT::Table - > new ( 'nodetype' ) ;
2008-02-21 21:10:35 +00:00
my $ ent = $ ostab - > getNodeAttribs ( $ node , [ 'profile' , 'os' , 'arch' ] ) ;
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub getNodeAttribs
{
my $ self = shift ;
2012-05-10 20:57:08 +00:00
if ( $ dbworkerpid ) { #TODO: should this be moved outside of the DB worker entirely? I'm thinking so, but I don't dare do so right now...
#the benefit would be the potentially computationally intensive substitution logic would be moved out and less time inside limited
#db worker scope
2009-08-09 15:48:38 +00:00
return dbc_call ( $ self , 'getNodeAttribs' , @ _ ) ;
}
2007-10-26 22:44:33 +00:00
my $ node = shift ;
2008-05-14 00:04:55 +00:00
my @ attribs ;
2010-03-05 22:28:36 +00:00
my % options = ( ) ;
2008-05-14 00:04:55 +00:00
if ( ref $ _ [ 0 ] ) {
@ attribs = @ { shift ( ) } ;
2010-03-05 22:28:36 +00:00
% options = @ _ ;
2008-05-14 00:04:55 +00:00
} else {
@ attribs = @ _ ;
}
2008-01-14 22:19:17 +00:00
my $ datum ;
2012-05-10 20:57:08 +00:00
my $ oldusecache ;
2012-05-11 14:14:48 +00:00
my $ nloldusecache ;
if ( $ options { prefetchcache } ) { #TODO: If this *were* split out of DB worker, this logic would have to move *into* returnany
2012-05-10 20:57:08 +00:00
if ( $ self - > { tabname } eq 'nodelist' ) { #a sticky situation
my @ locattribs = @ attribs ;
unless ( grep ( /^node$/ , @ locattribs ) ) {
push @ locattribs , 'node' ;
}
unless ( grep ( /^groups$/ , @ locattribs ) ) {
push @ locattribs , 'groups' ;
}
2012-05-11 14:14:48 +00:00
$ self - > _build_cache ( \ @ locattribs , noincrementref = > 1 ) ;
2012-05-10 20:57:08 +00:00
} else {
2012-05-11 14:14:48 +00:00
$ self - > _build_cache ( \ @ attribs , noincrementref = > 1 ) ;
$ self - > { nodelist } - > _build_cache ( [ 'node' , 'groups' ] , noincrementref = > 1 ) ;
2012-05-10 20:57:08 +00:00
}
$ oldusecache = $ self - > { _use_cache } ;
2012-05-11 14:14:48 +00:00
$ nloldusecache = $ self - > { nodelist } - > { _use_cache } ;
2012-05-10 20:57:08 +00:00
$ self - > { _use_cache } = 1 ;
2012-05-11 14:14:48 +00:00
$ self - > { nodelist } - > { _use_cache } = 1 ;
2012-05-10 20:57:08 +00:00
}
2010-07-28 06:17:30 +00:00
my @ data = $ self - > getNodeAttribs_nosub_returnany ( $ node , \ @ attribs , % options ) ;
2012-05-11 14:14:48 +00:00
if ( $ options { prefetchcache } ) {
2012-05-10 20:57:08 +00:00
$ self - > { _use_cache } = $ oldusecache ;
2012-05-11 14:14:48 +00:00
$ self - > { nodelist } - > { _use_cache } = $ nloldusecache ;
2012-05-10 20:57:08 +00:00
#in this case, we just let the cache live, even if it is to be ignored by most invocations
}
2008-01-14 22:19:17 +00:00
#my ($datum, $extra) = $self->getNodeAttribs_nosub($node, \@attribs);
2008-09-07 21:08:13 +00:00
#if ($extra) { return undef; } # return (undef,"Ambiguous query"); }
2008-01-14 22:19:17 +00:00
defined ( $ data [ 0 ] )
2007-10-26 22:44:33 +00:00
|| return undef ; #(undef,"No matching entry found in configuration");
2010-09-30 14:30:58 +00:00
unless ( scalar keys % { $ data [ 0 ] } ) {
return undef ;
}
2007-10-26 22:44:33 +00:00
my $ attrib ;
2008-01-14 22:19:17 +00:00
foreach $ datum ( @ data ) {
2007-10-26 22:44:33 +00:00
foreach $ attrib ( @ attribs )
{
2009-01-08 21:01:36 +00:00
unless ( defined $ datum - > { $ attrib } ) {
#skip undefined values, save time
next ;
}
2009-09-04 13:38:44 +00:00
if ( $ datum - > { $ attrib } =~ /^\/[^\/]*\/[^\/]*\/$/ )
2007-10-26 22:44:33 +00:00
{
2008-01-14 22:19:17 +00:00
my $ exp = substr ( $ datum - > { $ attrib } , 1 ) ;
2007-10-26 22:44:33 +00:00
chop $ exp ;
my @ parts = split ( '/' , $ exp , 2 ) ;
2013-03-28 12:07:25 +00:00
my $ retval = $ node ;
2013-01-30 16:33:02 +00:00
$ retval =~ s/$parts[0]/$parts[1]/ ;
$ datum - > { $ attrib } = $ retval ;
2007-10-26 22:44:33 +00:00
}
2013-03-28 12:07:25 +00:00
elsif ( $ datum - > { $ attrib } =~ /^\|.*\|$/ )
2007-10-26 22:44:33 +00:00
{
#Perform arithmetic and only arithmetic operations in bracketed issues on the right.
#Tricky part: don't allow potentially dangerous code, only eval if
#to-be-evaled expression is only made up of ()\d+-/%$
#Futher paranoia? use Safe module to make sure I'm good
2008-01-14 22:19:17 +00:00
my $ exp = substr ( $ datum - > { $ attrib } , 1 ) ;
2007-10-26 22:44:33 +00:00
chop $ exp ;
my @ parts = split ( '\|' , $ exp , 2 ) ;
2013-03-28 12:07:25 +00:00
my $ arraySize = @ parts ;
if ( $ arraySize < 2 ) { # easy regx, generate lhs from node
my $ lhs ;
my @ numbers = $ node =~ m/[\D0]*(\d+)/g ;
$ lhs = '[\D0]*(\d+)' x scalar ( @ numbers ) ;
$ lhs . = '.*$' ;
unshift ( @ parts , $ lhs ) ;
}
2007-10-26 22:44:33 +00:00
my $ curr ;
my $ next ;
my $ prev ;
my $ retval = $ parts [ 1 ] ;
( $ curr , $ next , $ prev ) =
extract_bracketed ( $ retval , '()' , qr/[^()]*/ ) ;
2008-08-20 16:47:39 +00:00
unless ( $ curr ) { #If there were no paramaters to save, treat this one like a plain regex
2010-10-28 18:39:34 +00:00
undef $@ ; #extract_bracketed would have set $@ if it didn't return, undef $@
2008-03-17 21:09:36 +00:00
$ retval = $ node ;
$ retval =~ s/$parts[0]/$parts[1]/ ;
2008-08-20 16:47:39 +00:00
$ datum - > { $ attrib } = $ retval ;
2009-01-08 21:01:36 +00:00
if ( $ datum - > { $ attrib } =~ /^$/ ) {
#If regex forces a blank, act like a normal blank does
delete $ datum - > { $ attrib } ;
}
2008-08-20 16:47:39 +00:00
next ; #skip the redundancy that follows otherwise
2008-03-17 21:09:36 +00:00
}
2007-10-26 22:44:33 +00:00
while ( $ curr )
{
#my $next = $comps[0];
2010-08-24 20:46:22 +00:00
my $ value = $ node ;
$ value =~ s/$parts[0]/$curr/ ;
$ value = $ evalcpt - > reval ( 'use integer;' . $ value ) ;
$ retval = $ prev . $ value . $ next ;
2007-10-26 22:44:33 +00:00
( $ curr , $ next , $ prev ) =
extract_bracketed ( $ retval , '()' , qr/[^()]*/ ) ;
}
2010-10-28 18:39:34 +00:00
undef $@ ;
2008-08-20 16:47:39 +00:00
#At this point, $retval is the expression after being arithmetically contemplated, a generated regex, and therefore
#must be applied in total
2008-03-17 21:09:36 +00:00
my $ answval = $ node ;
$ answval =~ s/$parts[0]/$retval/ ;
$ datum - > { $ attrib } = $ answval ; #$retval;
2007-10-26 22:44:33 +00:00
2008-04-05 14:53:35 +00:00
#print Data::Dumper::Dumper(extract_bracketed($parts[1],'()',qr/[^()]*/));
2007-10-26 22:44:33 +00:00
#use text::balanced extract_bracketed to parse earch atom, make sure nothing but arith operators, parans, and numbers are in it to guard against code execution
}
2009-01-08 21:01:36 +00:00
if ( $ datum - > { $ attrib } =~ /^$/ ) {
#If regex forces a blank, act like a normal blank does
delete $ datum - > { $ attrib } ;
}
2007-10-26 22:44:33 +00:00
}
2008-01-14 22:19:17 +00:00
}
return wantarray ? @ data : $ data [ 0 ] ;
2007-10-26 22:44:33 +00:00
}
2011-12-30 07:44:19 +00:00
#--------------------------------------------------------------------------
= head3 getNodeSpecAttribs
Description: Retrieves the requested attributes which matching the specified options for a node
Arguments:
Noderange
The specified options
List of attributes
Return:
Attribute hash
Example:
my $ tab = xCAT::Table - > new ( 'ppcdirect' ) ;
my $ ent = $ tab - > getNodeSpecAttribs ( $ node , { username = > 'HMC' } , qw/password/ ) ;
Comments:
The keys of the specified options can be given in the list of attributes or not ,
this routine will deal with them .
= cut
#--------------------------------------------------------------------------
2012-03-27 11:21:58 +00:00
#sub getNodeSpecAttribs {
# my $self = shift;
# my $node = shift;
# my %options = ();
# my @attribs = ();
# my @keys = ();
# if (ref $_[0] eq 'HASH') {
# %options = %{shift()};
# @attribs = @_;
# foreach my $key (keys %options) {
# if (!grep(/^$key$/, @attribs)) {
# push @attribs, $key;
# }
# }
# } else {
# @attribs = @_;
# }
# if ((keys (%options)) == 0) {
# my $ent = $self->getNodeAttribs($node, \@attribs);
# return $ent;
# } else {
# my $nodekey = "node";
# if (defined $xCAT::Schema::tabspec{$self->{tabname}}->{nodecol}) {
# $nodekey = $xCAT::Schema::tabspec{$self->{tabname}}->{nodecol};
# }
# $options{$nodekey} = $node;
# my $ent = $self->getAttribs(\%options, \@attribs);
# if ($ent) {
# return $ent;
# }
# my ($nodeghash) = $self->{nodelist}->getAttribs({node=>$node}, "groups");
# unless(defined($nodeghash) && defined($nodeghash->{groups})) {
# return undef;
# }
# my @nodegroups = split(/,/, $nodeghash->{groups});
# foreach my $group (@nodegroups) {
# $options{$nodekey} = $group;
# my $g_ret = $self->getAttribs(\%options, \@attribs);
# if ($g_ret) {
# return $g_ret;
# }
# }
# }
# return undef;
#}
2007-10-26 22:44:33 +00:00
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 getNodeAttribs_nosub
2007-10-26 22:44:33 +00:00
Description:
Arguments:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Returns:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
2010-07-28 06:17:30 +00:00
sub getNodeAttribs_nosub_old
2007-10-26 22:44:33 +00:00
{
my $ self = shift ;
my $ node = shift ;
my $ attref = shift ;
2010-03-05 22:28:36 +00:00
my % options = @ _ ;
2008-01-14 22:19:17 +00:00
my @ data ;
my $ datum ;
my @ tents ;
2007-10-26 22:44:33 +00:00
my $ return = 0 ;
2010-03-05 22:28:36 +00:00
@ tents = $ self - > getNodeAttribs_nosub_returnany ( $ node , $ attref , % options ) ;
my $ nodekey = "node" ;
if ( defined $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol } ) {
$ nodekey = $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol }
}
2008-01-14 22:19:17 +00:00
foreach my $ tent ( @ tents ) {
$ datum = { } ;
foreach ( @$ attref )
{
2007-10-26 22:44:33 +00:00
if ( $ tent and defined ( $ tent - > { $ _ } ) )
{
2008-01-14 22:19:17 +00:00
$ return = 1 ;
$ datum - > { $ _ } = $ tent - > { $ _ } ;
2010-03-05 22:28:36 +00:00
if ( $ options { withattribution } and $ _ ne $ nodekey ) {
$ datum - > { '!!xcatgroupattribution!!' } - > { $ _ } = $ tent - > { '!!xcatsourcegroup!!' } ;
}
2010-07-28 06:17:30 +00:00
}
#else { #attempt to fill in gapped attributes
#unless (scalar(@$attref) <= 1) {
#my $sent = $self->getNodeAttribs_nos($node, [$_],%options);
#if ($sent and defined($sent->{$_})) {
#$return = 1;
#$datum->{$_} = $sent->{$_};
#if ($options{withattribution} and $_ ne $nodekey) {
#$datum->{'!!xcatgroupattribution!!'}->{$_} = $sent->{'!!xcatgroupattribution!!'}->{$_};
#}
#}
#}
#}
2008-01-14 22:19:17 +00:00
}
push ( @ data , $ datum ) ;
2007-10-26 22:44:33 +00:00
}
if ( $ return )
{
2008-01-14 22:19:17 +00:00
return wantarray ? @ data : $ data [ 0 ] ;
2007-10-26 22:44:33 +00:00
}
else
{
return undef ;
}
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 getNodeAttribs_nosub_returnany
2007-10-26 22:44:33 +00:00
2010-07-23 15:47:21 +00:00
Description: not used , kept for reference
2007-10-26 22:44:33 +00:00
Arguments:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Returns:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
2010-07-23 15:47:21 +00:00
sub getNodeAttribs_nosub_returnany_old
2007-10-26 22:44:33 +00:00
{ #This is the original function
my $ self = shift ;
my $ node = shift ;
my @ attribs = @ { shift ( ) } ;
2010-03-05 22:28:36 +00:00
my % options = @ _ ;
2008-01-14 22:19:17 +00:00
my @ results ;
2007-10-26 22:44:33 +00:00
#my $recurse = ((scalar(@_) == 1) ? shift : 1);
2009-09-17 18:27:07 +00:00
my $ nodekey = "node" ;
if ( defined $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol } ) {
$ nodekey = $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol }
} ;
@ results = $ self - > getAttribs ( { $ nodekey = > $ node } , @ attribs ) ;
2008-01-14 22:19:17 +00:00
my $ data = $ results [ 0 ] ;
2007-10-26 22:44:33 +00:00
if ( ! defined ( $ data ) )
{
my ( $ nodeghash ) =
$ self - > { nodelist } - > getAttribs ( { node = > $ node } , 'groups' ) ;
unless ( defined ( $ nodeghash ) && defined ( $ nodeghash - > { groups } ) )
{
return undef ;
}
my @ nodegroups = split ( /,/ , $ nodeghash - > { groups } ) ;
my $ group ;
foreach $ group ( @ nodegroups )
{
2009-09-17 18:27:07 +00:00
@ results = $ self - > getAttribs ( { $ nodekey = > $ group } , @ attribs ) ;
2008-01-14 22:19:17 +00:00
$ data = $ results [ 0 ] ;
2007-10-26 22:44:33 +00:00
if ( $ data != undef )
{
2008-01-14 22:19:17 +00:00
foreach ( @ results ) {
2010-03-05 22:28:36 +00:00
if ( $ _ - > { $ nodekey } ) { $ _ - > { $ nodekey } = $ node ; }
2010-08-13 14:40:26 +00:00
if ( $ options { withattribution } ) { $ _ - > { '!!xcatgroupattribution!!' } = $ group ; }
2008-01-14 22:19:17 +00:00
} ;
return @ results ;
2007-10-26 22:44:33 +00:00
}
}
}
else
{
#Don't need to 'correct' node attribute, considering result of the if that governs this code block?
2008-01-14 22:19:17 +00:00
return @ results ;
2007-10-26 22:44:33 +00:00
}
return undef ; #Made it here, config has no good answer
}
2010-08-13 14:40:26 +00:00
my $ nextRecordAtEnd = qr/\+=NEXTRECORD$/ ;
my $ nextRecord = qr/\+=NEXTRECORD/ ;
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
#this evolved a bit and i intend to rewrite it into something a bit cleaner at some point - cjhardee
#looks for all of the requested attributes, looking into the groups of the node if needed
2010-07-23 15:47:21 +00:00
sub getNodeAttribs_nosub_returnany
{
2010-08-13 14:40:26 +00:00
my $ self = shift ;
my $ node = shift ;
my @ attribs = @ { shift ( ) } ;
my % options = @ _ ;
my @ results ;
my $ nodekey = "node" ;
if ( defined $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol } ) {
$ nodekey = $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol }
} ;
@ results = $ self - > getAttribs ( { $ nodekey = > $ node } , @ attribs ) ;
2010-07-23 15:47:21 +00:00
2010-08-13 14:40:26 +00:00
my % attribsToDo ;
for ( @ attribs ) {
$ attribsToDo { $ _ } = 0
} ;
2010-07-23 15:47:21 +00:00
2010-08-13 14:40:26 +00:00
my $ attrib ;
my $ result ;
2010-07-23 15:47:21 +00:00
2010-08-13 14:40:26 +00:00
my $ data = $ results [ 0 ] ;
if ( defined { $ data } ) { #if there was some data for the node, loop through and check it
foreach $ result ( @ results ) {
foreach $ attrib ( keys % attribsToDo ) {
#check each item in the results to see which attributes were satisfied
if ( defined ( $ result ) && defined ( $ result - > { $ attrib } ) && $ result - > { $ attrib } !~ $ nextRecordAtEnd ) {
delete $ attribsToDo { $ attrib } ;
}
}
2010-07-28 06:17:30 +00:00
}
2010-08-13 14:40:26 +00:00
}
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
2010-08-13 14:40:26 +00:00
if ( ( keys ( % attribsToDo ) ) == 0 ) { #if all of the attributes are satisfied, don't look at the groups
return @ results ;
}
2010-07-28 06:17:30 +00:00
2010-08-13 14:40:26 +00:00
#find the groups for this node
my ( $ nodeghash ) = $ self - > { nodelist } - > getAttribs ( { node = > $ node } , 'groups' ) ;
2010-07-23 15:47:21 +00:00
2010-08-13 14:40:26 +00:00
#no groups for the node, we are done
unless ( defined ( $ nodeghash ) && defined ( $ nodeghash - > { groups } ) ) {
return @ results ;
}
2010-07-23 15:47:21 +00:00
2010-08-13 14:40:26 +00:00
my @ nodegroups = split ( /,/ , $ nodeghash - > { groups } ) ;
my $ group ;
my @ groupResults ;
my $ groupResult ;
my % attribsDone ;
#print "After node results, still missing ".Dumper(\%attribsToDo)."\n";
#print "groups are ".Dumper(\@nodegroups);
foreach $ group ( @ nodegroups ) {
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
use Storable qw( dclone ) ;
my @ prevResCopy = @ { dclone ( \ @ results ) } ;
my @ expandedResults ;
2010-08-13 14:40:26 +00:00
@ groupResults = $ self - > getAttribs ( { $ nodekey = > $ group } , keys ( % attribsToDo ) ) ;
#print "group results for $group are ".Dumper(\@groupResults)."\n";
$ data = $ groupResults [ 0 ] ;
if ( defined ( $ data ) ) { #if some attributes came back from the query for this group
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
2010-08-13 14:40:26 +00:00
foreach $ groupResult ( @ groupResults ) {
my % toPush ;
foreach $ attrib ( keys % attribsToDo ) { #check each unfinished attribute against the results for this group
#print "looking for attrib $attrib\n";
if ( defined ( $ groupResult - > { $ attrib } ) ) {
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
$ attribsDone { $ attrib } = 0 ;
#print "found attArib $attrib = $groupResult->{$attrib}\n";
#print "and results look like this: \n".Dumper(\@results)."\n\n\n";
2010-08-13 14:40:26 +00:00
foreach $ result ( @ results ) { #loop through our existing results to add or modify the value for this attribute
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
if ( defined ( $ result ) ) {
if ( defined ( $ result - > { $ attrib } ) ) {
if ( $ result - > { $ attrib } =~ $ nextRecordAtEnd ) { #if the attribute value should be added
$ result - > { $ attrib } =~ s/$nextRecordAtEnd// ; #pull out the existing next record string
$ result - > { $ attrib } . = $ groupResult - > { $ attrib } ; #add the group result onto the end of the existing value
if ( $ groupResult - > { $ attrib } =~ $ nextRecordAtEnd && defined ( $ attribsDone { $ attrib } ) ) {
delete $ attribsDone { $ attrib } ;
2010-08-13 14:40:26 +00:00
}
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
if ( $ options { withattribution } && $ attrib ne $ nodekey ) {
if ( defined ( $ result - > { '!!xcatgroupattribution!!' } ) ) {
if ( defined ( $ result - > { '!!xcatgroupattribution!!' } - > { $ attrib } ) ) {
$ result - > { '!!xcatgroupattribution!!' } - > { $ attrib } . = "," . $ group ;
}
else {
$ result - > { '!!xcatgroupattribution!!' } - > { $ attrib } = $ node . "," . $ group ;
}
}
else {
$ result - > { '!!xcatgroupattribution!!' } - > { $ attrib } = $ node . "," . $ group ;
}
2010-08-13 14:40:26 +00:00
}
}
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
}
else { #attribute did not already have an entry
#print "attrib $attrib was added with value $groupResult->{$attrib}\n";
$ result - > { $ attrib } = $ groupResult - > { $ attrib } ;
if ( $ options { withattribution } && $ attrib ne $ nodekey ) {
$ result - > { '!!xcatgroupattribution!!' } - > { $ attrib } = $ group ;
}
if ( $ groupResult - > { $ attrib } =~ $ nextRecordAtEnd && defined ( $ attribsDone { $ attrib } ) ) {
delete $ attribsDone { $ attrib } ;
2010-08-13 14:40:26 +00:00
}
2010-07-23 15:47:21 +00:00
}
2010-08-13 14:40:26 +00:00
}
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
else { #no results in the array so far
#print "pushing for the first time. attr=$attrib groupResults=$groupResult->{$attrib}\n";
$ toPush { $ attrib } = $ groupResult - > { $ attrib } ;
if ( $ options { withattribution } && $ attrib ne $ nodekey ) {
$ toPush { '!!xcatgroupattribution!!' } - > { $ attrib } = $ group ;
}
if ( $ groupResult - > { $ nodekey } ) {
$ toPush { $ nodekey } = $ node ;
}
if ( $ groupResult - > { $ attrib } =~ $ nextRecordAtEnd && defined ( $ attribsDone { $ attrib } ) ) {
delete $ attribsDone { $ attrib } ;
}
2010-08-13 14:40:26 +00:00
}
2010-07-23 15:47:21 +00:00
}
2010-08-13 14:40:26 +00:00
}
2010-07-23 15:47:21 +00:00
}
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
if ( keys ( % toPush ) > 0 ) {
2010-08-13 14:40:26 +00:00
#print "pushing ".Dumper(\%toPush)."\n";
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
if ( ! defined ( $ results [ 0 ] ) ) {
shift ( @ results ) ;
}
2010-08-13 14:40:26 +00:00
push ( @ results , \ % toPush ) ;
}
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
#print "pushing results into expanded results\n";
#print "results= ".Dumper(\@results)."\n";
push ( @ expandedResults , @ results ) ;
#print "expandedResults= ".Dumper(\@expandedResults)."\n";
#print "setting results to previous:\n".Dumper(\@prevResCopy)."\n\n\n";
@ results = @ { dclone ( \ @ prevResCopy ) } ;
2010-08-13 14:40:26 +00:00
}
Cross your fingers that this makes the million and a half dependant things happy. Changed the output of getNodeAttributes to merge the data from its different groups and to clone entries from one group for each entry in a folowing group with data in it.
I'm just gonna paste the description Jarrod sent to the group via email:
So if getAttribs ever returns more than one result, then getNodeAttribs will have it's output multiplied by that factor. I'll draw out a table with changes Chris started adding while he was also trying to flatten the recursion. Let's say 'n1' is in groups 'g1,g2,g3', and that we request columns 'c1,c2,c3,c4' Let's also assume the primary key is not simply 'node', allowing the node column to contain duplicates (as in switch table). I don't expect anyone to actually construct something this convoluted in practice, but:
node |c1 |c2 |c3 |c4
n1 |v1 | | |
g2 | |V2+= | |
g2 | |v3 |v4 |
g3 | |v5 | |v6
g3 | |v7 |v8 |v9
First, we get n1s record:
[
{ node => 'n1', c1 => 'v1',}
]
We see that c2-c4 are still unsatisfiad, then we check g1, see there is no record, so no action takes place, then getAttribs node=>'g2' returns two records. As a result, we clone our results so far and independently populate them:
[
{ node => 'n1', c1=> 'v1', c2 => 'v2+=' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4'},
]
Now, in the first record, we are still looking to fill in c3, c4, and we are also still looking at c2 to complete the '+=' operation.
In the second record, we only need c4 now, no other columns will be checked for that record.
Then we hit g3. We also get two results back (two was easier, but arbitrarily many are possible, btw). Since both records are still looking to be completed, the two become 4 records (if one of the two records were satisfied before this point, the two would have become three instead):
[
{ node => 'n1', c1=> 'v1', c2 => 'v2v5',c4=>'v6' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v6'},
{ node => 'n1', c1=> 'v1', c2 => 'v2v7',c3=>v8,c4=>'v9' },
{ node => 'n1', c1 => 'v1', c2=> 'v3', c3 => 'v4',c4=>'v9'},
]
And that would be what getNodesAttribs would return in this case.
git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@7131 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
2010-08-18 18:14:48 +00:00
@ results = @ expandedResults ;
2010-08-13 14:40:26 +00:00
foreach $ attrib ( keys % attribsDone ) {
if ( defined ( $ attribsToDo { $ attrib } ) ) {
delete $ attribsToDo { $ attrib } ;
2010-07-23 15:47:21 +00:00
}
2010-08-13 14:40:26 +00:00
}
if ( ( keys ( % attribsToDo ) ) == 0 ) { #all of the attributes are satisfied, so stop looking at the groups
last ;
}
2010-07-23 15:47:21 +00:00
}
2010-08-13 14:40:26 +00:00
}
2010-07-23 15:47:21 +00:00
2010-08-13 14:40:26 +00:00
#print "results ".Dumper(\@results);
#run through the results and remove any "+=NEXTRECORD" ocurrances
for $ result ( @ results ) {
for my $ key ( keys %$ result ) {
$ result - > { $ key } =~ s/\+=NEXTRECORD//g ;
2010-07-23 15:47:21 +00:00
}
2010-08-13 14:40:26 +00:00
}
2010-07-28 16:19:51 +00:00
2010-08-13 14:40:26 +00:00
return @ results ;
2010-07-23 15:47:21 +00:00
}
2007-10-26 22:44:33 +00:00
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 getAllEntries
2007-10-26 22:44:33 +00:00
Description: Read entire table
Arguments:
2008-02-21 21:10:35 +00:00
Table handle
2009-01-14 19:57:39 +00:00
"all" return all lines ( even disabled )
Default is to return only lines that have not been disabled
2007-10-26 22:44:33 +00:00
Returns:
2008-02-21 21:10:35 +00:00
Hash containing all rows in table
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
my $ tabh = xCAT::Table - > new ( $ table ) ;
2009-01-14 19:57:39 +00:00
my $ recs = $ tabh - > getAllEntries ( ) ; # returns entries not disabled
my $ recs = $ tabh - > getAllEntries ( "all" ) ; # returns all entries
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub getAllEntries
{
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'getAllEntries' , @ _ ) ;
}
2009-01-14 19:57:39 +00:00
my $ allentries = shift ;
2007-10-26 22:44:33 +00:00
my @ rets ;
2009-01-14 19:57:39 +00:00
my $ query ;
2011-01-12 16:23:39 +00:00
# delimit the disable column based on the DB
my $ disable = & delimitcol ( "disable" ) ;
2009-01-14 19:57:39 +00:00
if ( $ allentries ) { # get all lines
$ query = $ self - > { dbh } - > prepare ( 'SELECT * FROM ' . $ self - > { tabname } ) ;
} else { # get only enabled lines
2011-01-12 16:23:39 +00:00
my $ qstring = 'SELECT * FROM ' . $ self - > { tabname } . " WHERE " . $ disable . " is NULL or " . $ disable . " in ('0','no','NO','No','nO')" ;
$ query = $ self - > { dbh } - > prepare ( $ qstring ) ;
2009-01-14 19:57:39 +00:00
}
2007-10-26 22:44:33 +00:00
$ query - > execute ( ) ;
while ( my $ data = $ query - > fetchrow_hashref ( ) )
{
foreach ( keys %$ data )
{
if ( $ data - > { $ _ } =~ /^$/ )
{
$ data - > { $ _ } = undef ;
}
}
push @ rets , $ data ;
}
$ query - > finish ( ) ;
return \ @ rets ;
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 getAllAttribsWhere
2007-10-26 22:44:33 +00:00
Description: Get all attributes with "where" clause
2011-03-22 18:45:45 +00:00
When using a general Where clause with SQL statement then
because we support mulitiple databases ( SQLite , MySQL and DB2 ) that
2010-01-19 19:34:46 +00:00
require different syntax . Any code using this routine , must call the
Utils - > getDBName routine and code the where clause that is appropriate for
each supported database .
2011-03-22 18:45:45 +00:00
When the input is the array of attr <operator> val strings , the routine will
build the correct Where clause for the database we are running .
2007-10-26 22:44:33 +00:00
Arguments:
Database Handle
Where clause
2011-03-22 18:45:45 +00:00
or
array of attr <operator> val strings to be build into a Where clause
2007-10-26 22:44:33 +00:00
Returns:
2008-02-21 21:10:35 +00:00
Array of attributes
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2011-03-22 18:45:45 +00:00
General Where clause:
2008-02-21 21:10:35 +00:00
$ nodelist - > getAllAttribsWhere ( "groups like '%" . $ atom . "%'" , 'node' , 'group' ) ;
2010-04-09 13:37:51 +00:00
returns node and group attributes
$ nodelist - > getAllAttribsWhere ( "groups like '%" . $ atom . "%'" , 'ALL' ) ;
returns all attributes
2011-03-22 18:45:45 +00:00
Input of attr <operator> val strings
2011-03-23 17:02:04 +00:00
$ nodelist - > getAllAttribsWhere ( array of attr <operator> val , 'node' , 'group' ) ;
2011-03-22 18:45:45 +00:00
returns node and group attributes
2011-03-23 17:02:04 +00:00
$ nodelist - > getAllAttribsWhere ( array of attr <operator> val , 'ALL' ) ;
2011-03-22 18:45:45 +00:00
returns all attributes
2011-03-23 17:02:04 +00:00
where operator can be
( == , != , =~ , !~ , > , < , >= , <= )
2011-03-22 18:45:45 +00:00
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub getAllAttribsWhere
{
#Takes a list of attributes, returns all records in the table.
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'getAllAttribsWhere' , @ _ ) ;
}
2011-03-22 18:45:45 +00:00
my $ clause = shift ;
my $ whereclause ;
2007-10-26 22:44:33 +00:00
my @ attribs = @ _ ;
my @ results = ( ) ;
2009-12-01 19:26:56 +00:00
my $ query ;
2010-04-05 19:42:12 +00:00
my $ query2 ;
2011-03-22 18:45:45 +00:00
if ( ref ( $ clause ) eq 'ARRAY' ) {
$ whereclause = & buildWhereClause ( $ clause ) ;
} else {
$ whereclause = $ clause ;
}
2010-04-05 19:42:12 +00:00
2011-01-12 16:23:39 +00:00
# delimit the disable column based on the DB
my $ disable = & delimitcol ( "disable" ) ;
$ query2 = 'SELECT * FROM ' . $ self - > { tabname } . ' WHERE (' . $ whereclause . ") and ($disable is NULL or $disable in ('0','no','NO','No','nO'))" ;
$ query = $ self - > { dbh } - > prepare ( $ query2 ) ;
2007-10-26 22:44:33 +00:00
$ query - > execute ( ) ;
while ( my $ data = $ query - > fetchrow_hashref ( ) )
{
my % newrow = ( ) ;
2010-04-09 13:37:51 +00:00
if ( $ attribs [ 0 ] eq "ALL" ) { # want all attributes
foreach ( keys %$ data ) {
if ( $ data - > { $ _ } =~ /^$/ )
{
$ data - > { $ _ } = undef ;
}
}
push @ results , $ data ;
} else { # want specific attributes
foreach ( @ attribs )
{
2007-10-26 22:44:33 +00:00
unless ( $ data - > { $ _ } =~ /^$/ || ! defined ( $ data - > { $ _ } ) )
{ #The reason we do this is to undef fields in rows that may still be returned..
$ newrow { $ _ } = $ data - > { $ _ } ;
}
2010-04-09 13:37:51 +00:00
}
if ( keys % newrow )
{
push ( @ results , \ % newrow ) ;
}
2007-10-26 22:44:33 +00:00
}
}
$ query - > finish ( ) ;
return @ results ;
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 getAllNodeAttribs
2007-10-26 22:44:33 +00:00
Description: Get all the node attributes values for the input table on the
attribute list
Arguments:
Table handle
2012-08-28 15:07:28 +00:00
Attribute list
optional hash return style
( changes the return hash structure format )
2007-10-26 22:44:33 +00:00
Returns:
Array of attribute values
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2012-08-28 15:07:28 +00:00
my @ entries = $ self - > { switchtab } - > getAllNodeAttribs ( [ 'port' , 'switch' ] ) ;
my @ entries = $ self - > { switchtab } - > getAllNodeAttribs ( [ 'port' , 'switch' ] , 1 ) ;
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub getAllNodeAttribs
{
#Extract and substitute every node record, expanding groups and substituting as getNodeAttribs does
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'getAllNodeAttribs' , @ _ ) ;
}
2007-10-26 22:44:33 +00:00
my $ attribq = shift ;
2009-04-17 16:35:54 +00:00
my $ hashretstyle = shift ;
2012-05-11 19:33:27 +00:00
my % options = @ _ ;
2009-04-17 16:35:54 +00:00
my $ rethash ;
2007-10-26 22:44:33 +00:00
my @ results = ( ) ;
my % donenodes
; #Remember those that have been done once to not return same node multiple times
2010-01-19 19:34:46 +00:00
my $ query ;
2010-09-13 18:54:26 +00:00
my $ nodekey = "node" ;
if ( defined $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol } ) {
$ nodekey = $ xCAT:: Schema:: tabspec { $ self - > { tabname } } - > { nodecol }
} ;
2011-01-12 18:42:03 +00:00
# delimit the disable column based on the DB
my $ disable = & delimitcol ( "disable" ) ;
my $ dnodekey = & delimitcol ( $ nodekey ) ;
my $ qstring = 'SELECT ' . $ dnodekey . ' FROM '
. $ self - > { tabname }
. " WHERE " . $ disable . " is NULL or " . $ disable . " in ('0','no','NO','No','nO')" ;
$ query = $ self - > { dbh } - > prepare ( $ qstring ) ;
2007-10-26 22:44:33 +00:00
$ query - > execute ( ) ;
2008-07-07 22:47:38 +00:00
xCAT::NodeRange:: retain_cache ( 1 ) ;
2012-05-12 12:03:07 +00:00
unless ( $ options { prefetchcache } ) {
2008-07-10 13:40:32 +00:00
$ self - > { _use_cache } = 0 ;
$ self - > { nodelist } - > { _use_cache } = 0 ;
2012-05-11 19:33:27 +00:00
}
2008-07-10 13:40:32 +00:00
$ self - > _build_cache ( $ attribq ) ;
$ self - > { nodelist } - > _build_cache ( [ 'node' , 'groups' ] ) ;
$ self - > { _use_cache } = 1 ;
$ self - > { nodelist } - > { _use_cache } = 1 ;
2007-10-26 22:44:33 +00:00
while ( my $ data = $ query - > fetchrow_hashref ( ) )
{
2010-09-13 18:54:26 +00:00
unless ( $ data - > { $ nodekey } =~ /^$/ || ! defined ( $ data - > { $ nodekey } ) )
2007-10-26 22:44:33 +00:00
{ #ignore records without node attrib, not possible?
2012-05-11 19:33:27 +00:00
my @ nodes ;
unless ( $ self - > { nrcache } - > { $ data - > { $ nodekey } } and ( ( $ self - > { nrcache } - > { $ data - > { $ nodekey } } - > { tstamp } + 5 ) > time ( ) ) ) {
my @ cnodes = xCAT::NodeRange:: noderange ( $ data - > { $ nodekey } ) ;
$ self - > { nrcache } - > { $ data - > { $ nodekey } } - > { value } = \ @ cnodes ;
$ self - > { nrcache } - > { $ data - > { $ nodekey } } - > { tstamp } = time ( ) ;
}
@ nodes = @ { $ self - > { nrcache } - > { $ data - > { $ nodekey } } - > { value } } ; #expand node entry, to make groups expand
2010-09-13 18:54:26 +00:00
unless ( @ nodes ) { #in the event of an entry not in nodelist, use entry value verbatim
@ nodes = ( $ data - > { $ nodekey } ) ;
}
2008-07-10 13:40:32 +00:00
#my $localhash = $self->getNodesAttribs(\@nodes,$attribq); #NOTE: This is stupid, rebuilds the cache for every entry, FIXME
2007-10-26 22:44:33 +00:00
foreach ( @ nodes )
{
if ( $ donenodes { $ _ } ) { next ; }
my $ attrs ;
my $ nde = $ _ ;
#if ($self->{giveand}) { #software requests each attribute be independently inherited
# foreach (@attribs) {
# my $attr = $self->getNodeAttribs($nde,$_);
# $attrs->{$_}=$attr->{$_};
# }
#} else {
2008-01-14 22:19:17 +00:00
my @ attrs =
2008-07-10 13:40:32 +00:00
$ self - > getNodeAttribs ( $ _ , $ attribq ) ; #@{$localhash->{$_}} #$self->getNodeAttribs($_, $attribq)
2007-10-26 22:44:33 +00:00
; #Logic moves to getNodeAttribs
#}
#populate node attribute by default, this sort of expansion essentially requires it.
2008-01-14 22:19:17 +00:00
#$attrs->{node} = $_;
foreach my $ att ( @ attrs ) {
2010-09-13 18:54:26 +00:00
$ att - > { $ nodekey } = $ _ ;
2008-01-14 22:19:17 +00:00
}
2007-10-26 22:44:33 +00:00
$ donenodes { $ _ } = 1 ;
2009-04-17 16:35:54 +00:00
if ( $ hashretstyle ) {
$ rethash - > { $ _ } = \ @ attrs ; #$self->getNodeAttribs($_,\@attribs);
} else {
push @ results , @ attrs ; #$self->getNodeAttribs($_,@attribs);
}
2007-10-26 22:44:33 +00:00
}
}
}
2008-07-10 13:40:32 +00:00
$ self - > { _use_cache } = 0 ;
$ self - > { nodelist } - > { _use_cache } = 0 ;
2007-10-26 22:44:33 +00:00
$ query - > finish ( ) ;
2009-04-17 16:35:54 +00:00
if ( $ hashretstyle ) {
return $ rethash ;
} else {
return @ results ;
}
2007-10-26 22:44:33 +00:00
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 getAllAttribs
2007-10-26 22:44:33 +00:00
Description: Returns a list of records in the input table for the input
list of attributes .
Arguments:
Table handle
List of attributes
Returns:
Array of attribute values
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
$ nodelisttab = xCAT::Table - > new ( "nodelist" ) ;
my @ attribs = ( "node" ) ;
@ nodes = $ nodelisttab - > getAllAttribs ( @ attribs ) ;
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub getAllAttribs
{
#Takes a list of attributes, returns all records in the table.
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'getAllAttribs' , @ _ ) ;
}
2008-07-10 13:40:32 +00:00
#print "Being asked to dump ".$self->{tabname}."for something\n";
2007-10-26 22:44:33 +00:00
my @ attribs = @ _ ;
my @ results = ( ) ;
2008-07-07 22:47:38 +00:00
if ( $ self - > { _use_cache } ) {
2012-05-02 19:57:49 +00:00
if ( $ self - > { _cachestamp } < ( time ( ) - 5 ) ) { #NEVER use a cache older than 5 seconds
$ self - > _refresh_cache ( ) ;
}
2008-07-07 22:47:38 +00:00
my @ results ;
my $ cacheline ;
CACHELINE: foreach $ cacheline ( @ { $ self - > { _tablecache } } ) {
my $ attrib ;
my % rethash ;
foreach $ attrib ( @ attribs )
{
unless ( $ cacheline - > { $ attrib } =~ /^$/ || ! defined ( $ cacheline - > { $ attrib } ) )
{ #To undef fields in rows that may still be returned
$ rethash { $ attrib } = $ cacheline - > { $ attrib } ;
}
}
if ( keys % rethash )
{
push @ results , \ % rethash ;
}
}
if ( @ results )
{
return @ results ; #return wantarray ? @results : $results[0];
}
return undef ;
}
2011-01-12 18:42:03 +00:00
# delimit the disable column based on the DB
my $ disable = & delimitcol ( "disable" ) ;
2009-12-01 19:26:56 +00:00
my $ query ;
2011-01-12 18:42:03 +00:00
my $ qstring = "SELECT * FROM " . $ self - > { tabname }
. " WHERE " . $ disable . " is NULL or " . $ disable . " in ('0','no','NO','No','nO')" ;
$ query = $ self - > { dbh } - > prepare ( $ qstring ) ;
2010-01-19 19:34:46 +00:00
#print $query;
2007-10-26 22:44:33 +00:00
$ query - > execute ( ) ;
while ( my $ data = $ query - > fetchrow_hashref ( ) )
{
my % newrow = ( ) ;
foreach ( @ attribs )
{
unless ( $ data - > { $ _ } =~ /^$/ || ! defined ( $ data - > { $ _ } ) )
{ #The reason we do this is to undef fields in rows that may still be returned..
$ newrow { $ _ } = $ data - > { $ _ } ;
}
}
if ( keys % newrow )
{
push ( @ results , \ % newrow ) ;
}
}
$ query - > finish ( ) ;
return @ results ;
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 delEntries
2007-10-26 22:44:33 +00:00
Description: Delete table entries
Arguments:
Table Handle
Entry to delete
Returns:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2012-05-11 11:40:38 +00:00
my $ table = xCAT::Table - > new ( "nodelist" ) ;
my % keyhash ;
$ keyhash { node } = "node1" ;
$ keyhash { groups } = "compute1" ;
$ table - > delEntries ( \ % keyhash ) ;
2012-05-11 11:55:08 +00:00
$ table - > commit ;
2012-05-11 11:40:38 +00:00
Build delete statement and ' ing the elements of the hash
DELETE FROM nodelist WHERE ( "groups" = "compute1" AND "node" = "node1" )
2012-05-13 10:36:16 +00:00
If called with no attributes , it will delete all entries in the table .
$ table - > delEntries ( ) ;
$ table - > commit ;
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub delEntries
{
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'delEntries' , @ _ ) ;
}
2007-10-26 22:44:33 +00:00
my $ keyref = shift ;
2009-12-30 07:50:28 +00:00
my @ all_keyparis ;
2007-10-26 22:44:33 +00:00
my % keypairs ;
2009-10-26 09:53:35 +00:00
if ( not $ self - > { intransaction } and not $ self - > { autocommit } and $ self - > { realautocommit } ) {
2011-01-05 21:33:03 +00:00
#search this code for the other if statement just like it for an explanation of why I do this
2009-10-26 09:53:35 +00:00
$ self - > { intransaction } = 1 ;
$ self - > { dbh } - > { AutoCommit } = 0 ;
}
2009-12-30 07:50:28 +00:00
if ( ref ( $ keyref ) eq 'ARRAY' )
2007-10-26 22:44:33 +00:00
{
2009-12-30 07:50:28 +00:00
@ all_keyparis = @ { $ keyref } ;
} else {
push @ all_keyparis , $ keyref ;
2007-10-26 22:44:33 +00:00
}
2009-12-30 07:50:28 +00:00
2007-10-26 22:44:33 +00:00
my $ notif = xCAT::NotifHandler - > needToNotify ( $ self - > { tabname } , 'd' ) ;
2009-12-30 07:50:28 +00:00
my $ record_num = 100 ;
my @ pieces = splice ( @ all_keyparis , 0 , $ record_num ) ;
while ( @ pieces ) {
my @ notif_data ;
if ( $ notif == 1 )
2007-10-26 22:44:33 +00:00
{
2009-12-30 07:50:28 +00:00
my $ qstring = "SELECT * FROM " . $ self - > { tabname } ;
if ( $ keyref ) { $ qstring . = " WHERE " ; }
my @ qargs = ( ) ;
foreach my $ keypairs ( @ pieces ) {
$ qstring . = "(" ;
foreach my $ keypair ( keys % { $ keypairs } )
{
2011-01-11 18:38:06 +00:00
# delimit the columns of the table
my $ dkeypair = & delimitcol ( $ keypair ) ;
$ qstring . = "$dkeypair = ? AND " ;
2007-10-26 22:44:33 +00:00
2009-12-30 07:50:28 +00:00
push @ qargs , $ keypairs - > { $ keypair } ;
}
$ qstring =~ s/ AND \z// ;
$ qstring . = ") OR " ;
}
$ qstring =~ s/\(\)// ;
$ qstring =~ s/ OR \z// ;
my $ query = $ self - > { dbh } - > prepare ( $ qstring ) ;
$ query - > execute ( @ qargs ) ;
#prepare the notification data
#put the column names at the very front
push ( @ notif_data , $ query - > { NAME } ) ;
my $ temp_data = $ query - > fetchall_arrayref ( ) ;
foreach ( @$ temp_data )
{
push ( @ notif_data , $ _ ) ;
}
$ query - > finish ( ) ;
}
my @ stargs = ( ) ;
my $ delstring = 'DELETE FROM ' . $ self - > { tabname } ;
if ( $ keyref ) { $ delstring . = ' WHERE ' ; }
foreach my $ keypairs ( @ pieces ) {
$ delstring . = "(" ;
foreach my $ keypair ( keys % { $ keypairs } )
{
2011-01-11 18:38:06 +00:00
my $ dkeypair = & delimitcol ( $ keypair ) ;
$ delstring . = $ dkeypair . ' = ? AND ' ;
2009-12-30 07:50:28 +00:00
if ( ref ( $ keypairs - > { $ keypair } ) )
{ #XML transformed data may come in mangled unreasonably into listrefs
push @ stargs , $ keypairs - > { $ keypair } - > [ 0 ] ;
}
else
{
push @ stargs , $ keypairs - > { $ keypair } ;
}
}
$ delstring =~ s/ AND \z// ;
$ delstring . = ") OR " ;
}
$ delstring =~ s/\(\)// ;
$ delstring =~ s/ OR \z// ;
my $ stmt = $ self - > { dbh } - > prepare ( $ delstring ) ;
$ stmt - > execute ( @ stargs ) ;
$ stmt - > finish ;
2012-11-20 14:51:29 +00:00
$ self - > _refresh_cache ( ) ; #cache is invalid, refresh
2009-12-30 07:50:28 +00:00
#notify the interested parties
if ( $ notif == 1 )
2007-10-26 22:44:33 +00:00
{
2009-12-30 07:50:28 +00:00
xCAT::NotifHandler - > notify ( "d" , $ self - > { tabname } , \ @ notif_data , { } ) ;
2007-10-26 22:44:33 +00:00
}
2009-12-30 07:50:28 +00:00
@ pieces = splice ( @ all_keyparis , 0 , $ record_num ) ;
2007-10-26 22:44:33 +00:00
}
2009-12-30 07:50:28 +00:00
2007-10-26 22:44:33 +00:00
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 getAttribs
2007-10-26 22:44:33 +00:00
Description:
Arguments:
key
List of attributes
Returns:
2008-02-21 21:10:35 +00:00
Hash of requested attributes
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
$ table = xCAT::Table - > new ( 'passwd' ) ;
2011-01-12 18:54:19 +00:00
@ tmp = $ table - > getAttribs ( { 'key' = > 'ipmi' } , ( 'username' , 'password' ) ) ;
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub getAttribs
{
#Takes two arguments:
2012-05-14 13:44:00 +00:00
#-Key(s) name (will be compared against the table key(s) value)
2007-10-26 22:44:33 +00:00
#-List reference of attributes for which calling code wants at least one of defined
# (recurse argument intended only for internal use.)
# Returns a hash reference with requested attributes defined.
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'getAttribs' , @ _ ) ;
}
2007-10-26 22:44:33 +00:00
#my $key = shift;
#my $keyval = shift;
my % keypairs = % { shift ( ) } ;
2008-05-14 00:04:55 +00:00
my @ attribs ;
if ( ref $ _ [ 0 ] ) {
@ attribs = @ { shift ( ) } ;
} else {
@ attribs = @ _ ;
}
2007-10-26 22:44:33 +00:00
my @ return ;
2008-06-30 20:51:41 +00:00
if ( $ self - > { _use_cache } ) {
2012-05-02 19:57:49 +00:00
if ( $ self - > { _cachestamp } < ( time ( ) - 5 ) ) { #NEVER use a cache older than 5 seconds
$ self - > _refresh_cache ( ) ;
}
2008-06-30 20:51:41 +00:00
my @ results ;
my $ cacheline ;
2008-07-09 12:43:57 +00:00
if ( scalar ( keys % keypairs ) == 1 and $ keypairs { node } ) { #99.9% of queries look like this, optimized case
foreach $ cacheline ( @ { $ self - > { _nodecache } - > { $ keypairs { node } } } ) {
my $ attrib ;
my % rethash ;
foreach $ attrib ( @ attribs )
{
unless ( $ cacheline - > { $ attrib } =~ /^$/ || ! defined ( $ cacheline - > { $ attrib } ) )
{ #To undef fields in rows that may still be returned
$ rethash { $ attrib } = $ cacheline - > { $ attrib } ;
}
}
if ( keys % rethash )
{
push @ results , \ % rethash ;
}
2008-06-30 20:51:41 +00:00
}
2008-07-09 12:43:57 +00:00
} else { #SLOW WAY FOR GENERIC CASE
CACHELINE: foreach $ cacheline ( @ { $ self - > { _tablecache } } ) {
foreach ( keys % keypairs ) {
if ( not $ keypairs { $ _ } and $ keypairs { $ _ } ne 0 and $ cacheline - > { $ _ } ) {
next CACHELINE ;
}
unless ( $ keypairs { $ _ } eq $ cacheline - > { $ _ } ) {
next CACHELINE ;
}
2008-06-30 20:51:41 +00:00
}
2008-07-09 12:43:57 +00:00
my $ attrib ;
my % rethash ;
foreach $ attrib ( @ attribs )
{
unless ( $ cacheline - > { $ attrib } =~ /^$/ || ! defined ( $ cacheline - > { $ attrib } ) )
{ #To undef fields in rows that may still be returned
$ rethash { $ attrib } = $ cacheline - > { $ attrib } ;
}
}
if ( keys % rethash )
{
push @ results , \ % rethash ;
}
2008-06-30 20:51:41 +00:00
}
}
if ( @ results )
{
return wantarray ? @ results : $ results [ 0 ] ;
}
return undef ;
}
#print "Uncached access to ".$self->{tabname}."\n";
2007-10-26 22:44:33 +00:00
my $ statement = 'SELECT * FROM ' . $ self - > { tabname } . ' WHERE ' ;
my @ exeargs ;
foreach ( keys % keypairs )
{
2011-01-11 18:56:49 +00:00
my $ dkeypair = & delimitcol ( $ _ ) ;
2007-10-26 22:44:33 +00:00
if ( $ keypairs { $ _ } )
{
2011-01-11 18:56:49 +00:00
$ statement . = $ dkeypair . " = ? and " ;
2007-10-26 22:44:33 +00:00
if ( ref ( $ keypairs { $ _ } ) )
{ #correct for XML process mangling if occurred
push @ exeargs , $ keypairs { $ _ } - > [ 0 ] ;
}
else
{
push @ exeargs , $ keypairs { $ _ } ;
}
}
else
{
2011-01-11 18:56:49 +00:00
$ statement . = $ dkeypair . " is NULL and " ;
2007-10-26 22:44:33 +00:00
}
}
2011-01-12 18:54:19 +00:00
# delimit the disable column based on the DB
my $ disable = & delimitcol ( "disable" ) ;
$ statement . = "(" . $ disable . " is NULL or " . $ disable . " in ('0','no','NO','No','nO'))" ;
2009-12-01 19:26:56 +00:00
#print "This is my statement: $statement \n";
2007-10-26 22:44:33 +00:00
my $ query = $ self - > { dbh } - > prepare ( $ statement ) ;
2008-05-16 18:20:15 +00:00
unless ( defined $ query ) {
return undef ;
}
2007-10-26 22:44:33 +00:00
$ query - > execute ( @ exeargs ) ;
my $ data ;
while ( $ data = $ query - > fetchrow_hashref ( ) )
{
my $ attrib ;
my % rethash ;
foreach $ attrib ( @ attribs )
{
unless ( $ data - > { $ attrib } =~ /^$/ || ! defined ( $ data - > { $ attrib } ) )
{ #To undef fields in rows that may still be returned
$ rethash { $ attrib } = $ data - > { $ attrib } ;
}
}
if ( keys % rethash )
{
push @ return , \ % rethash ;
}
}
$ query - > finish ( ) ;
if ( @ return )
{
2008-02-04 16:34:45 +00:00
return wantarray ? @ return : $ return [ 0 ] ;
2007-10-26 22:44:33 +00:00
}
return undef ;
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 getTable
2007-10-26 22:44:33 +00:00
Description: Read entire Table
Arguments:
Table Handle
Returns:
2008-02-21 21:10:35 +00:00
Array of table rows
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
my $ table = xCAT::Table - > new ( "notification" , - create = > 0 ) ;
my @ row_array = $ table - > getTable ;
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub getTable
{
# Get contents of table
# Takes no arguments
# Returns an array of hashes containing the entire contents of this
# table. Each array entry contains a pointer to a hash which is
# one row of the table. The row hash is keyed by attribute name.
my $ self = shift ;
2009-08-04 21:10:32 +00:00
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'getTable' , @ _ ) ;
}
2007-10-26 22:44:33 +00:00
my @ return ;
my $ statement = 'SELECT * FROM ' . $ self - > { tabname } ;
my $ query = $ self - > { dbh } - > prepare ( $ statement ) ;
$ query - > execute ( ) ;
my $ data ;
while ( $ data = $ query - > fetchrow_hashref ( ) )
{
my $ attrib ;
my % rethash ;
foreach $ attrib ( keys % { $ data } )
{
$ rethash { $ attrib } = $ data - > { $ attrib } ;
}
if ( keys % rethash )
{
push @ return , \ % rethash ;
}
}
$ query - > finish ( ) ;
if ( @ return )
{
return @ return ;
}
return undef ;
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 close
2007-10-26 22:44:33 +00:00
Description: Close out Table transaction
Arguments:
2008-02-21 21:10:35 +00:00
Table Handle
2007-10-26 22:44:33 +00:00
Returns:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
my $ mactab = xCAT::Table - > new ( 'mac' ) ;
$ mactab - > setNodeAttribs ( $ macmap { $ mac } , { mac = > $ mac } ) ;
$ mactab - > close ( ) ;
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub close
{
my $ self = shift ;
2008-01-23 15:52:27 +00:00
#if ($self->{dbh}) { $self->{dbh}->disconnect(); }
#undef $self->{dbh};
2007-11-13 21:38:32 +00:00
if ( $ self - > { tabname } eq 'nodelist' ) {
undef $ self - > { nodelist } ;
} else {
$ self - > { nodelist } - > close ( ) ;
}
2007-10-26 22:44:33 +00:00
}
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 open
2007-10-26 22:44:33 +00:00
Description: Connect to Database
Arguments:
2008-02-21 21:10:35 +00:00
Empty Hash
2007-10-26 22:44:33 +00:00
Returns:
2008-02-21 21:10:35 +00:00
Data Base Handle
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
2009-08-04 18:38:08 +00:00
#UNSUED FUNCTION
#sub open
#{
# my $self = shift;
# $self->{dbh} = DBI->connect($self->{connstring}, "", "");
#}
2007-10-26 22:44:33 +00:00
#--------------------------------------------------------------------------
2008-02-21 21:10:35 +00:00
= head3 DESTROY
2007-10-26 22:44:33 +00:00
Description: Disconnect from Database
Arguments:
2008-02-21 21:10:35 +00:00
Database Handle
2007-10-26 22:44:33 +00:00
Returns:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Globals:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Error:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Example:
2008-02-21 21:10:35 +00:00
2007-10-26 22:44:33 +00:00
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub DESTROY
{
my $ self = shift ;
2008-01-23 15:52:27 +00:00
$ self - > { dbh } = '' ;
2008-01-21 19:39:09 +00:00
undef $ self - > { dbh } ;
#if ($self->{dbh}) { $self->{dbh}->disconnect(); undef $self->{dbh};}
2007-10-26 22:44:33 +00:00
undef $ self - > { nodelist } ; #Could be circular
}
2008-02-21 21:10:35 +00:00
= head3 getTableList
Description: Returns a list of the table names in the xCAT database .
= cut
sub getTableList { return keys % xCAT:: Schema:: tabspec ; }
= head3 getTableSchema
Description: Returns the db schema for the specified table .
Returns: A reference to a hash that contains the cols , keys , etc . for this table . ( See Schema . pm for details . )
= cut
sub getTableSchema { return $ xCAT:: Schema:: tabspec { $ _ [ 1 ] } ; }
= head3 getTableList
Description: Returns a summary description for each table .
Returns: A reference to a hash . Each key is the table name .
Each value is the table description .
= cut
sub getDescriptions {
my $ classname = shift ; # we ignore this because this function is static
# List each table name and the value for table_desc.
my $ ret = { } ;
#my @a = keys %{$xCAT::Schema::tabspec{nodelist}}; print 'a=', @a, "\n";
foreach my $ t ( keys % xCAT:: Schema:: tabspec ) { $ ret - > { $ t } = $ xCAT:: Schema:: tabspec { $ t } - > { table_desc } ; }
return $ ret ;
}
2008-08-26 13:43:31 +00:00
2009-02-04 01:53:34 +00:00
#--------------------------------------------------------------------------
2008-08-26 13:43:31 +00:00
= head3 isAKey
Description: Checks to see if table field is a table key
Arguments:
Table field
List of keys
Returns:
1 = is a key
0 = not a key
Globals:
Error:
Example:
if ( isaKey ( $ key_list , $ col ) ) ;
= cut
#--------------------------------------------------------------------------------
sub isAKey
{
my ( $ keys , $ col ) = @ _ ;
my @ key_list = @$ keys ;
foreach my $ key ( @ key_list )
{
if ( $ col eq $ key ) { # it is a key
return 1 ;
}
}
return 0 ;
}
2008-02-21 21:10:35 +00:00
2009-02-04 01:53:34 +00:00
#--------------------------------------------------------------------------
= head3 getAutoIncrementColumns
get a list of column names that are of type "INTEGER AUTO_INCREMENT" .
Returns:
an array of column names that are auto increment .
= cut
#--------------------------------------------------------------------------------
sub getAutoIncrementColumns {
my $ self = shift ;
my $ descr = $ xCAT:: Schema:: tabspec { $ self - > { tabname } } ;
my $ types = $ descr - > { types } ;
my @ ret = ( ) ;
foreach my $ col ( @ { $ descr - > { cols } } )
{
if ( ( $ types ) && ( $ types - > { $ col } ) ) {
if ( $ types - > { $ col } =~ /INTEGER AUTO_INCREMENT/ ) { push ( @ ret , $ col ) ; }
}
}
return @ ret ;
}
2010-08-23 18:03:45 +00:00
#--------------------------------------------------------------------------
= head3
Description: get_filelist
Arguments:
directory , filelist , type
Returns:
The list of sql files to be processed which consists of all the
2010-08-24 12:24:45 +00:00
files with <name> . sql and <name> _ <databasename> . sql
2010-08-23 18:03:45 +00:00
or
2010-08-24 12:24:45 +00:00
files with <name> . pm and <name> _ <databasename> . pm
2010-08-23 18:03:45 +00:00
Globals:
Error:
Example:
my @ filelist = get_filelist ( $ directory , $ filelist , $ type ) ;
where type = "sql" or "pm"
2010-08-25 15:18:04 +00:00
Note either input a directory path in $ directory of an array of
full path to filenames in $ filelist . See runsqlcmd for example .
2010-08-23 18:03:45 +00:00
= cut
#--------------------------------------------------------------------------------
sub get_filelist
{
use File::Basename ;
my $ self = shift ;
my $ directory = shift ;
my $ files = shift ;
my $ ext = shift ;
my $ dbname = "sqlite" ;
my $ xcatcfg = get_xcatcfg ( ) ;
if ( $ xcatcfg =~ /^DB2:/ )
{
$ dbname = "db2" ;
}
else
{
if ( $ xcatcfg =~ /^mysql:/ )
{
$ dbname = "mysql" ;
}
else
{
if ( $ xcatcfg =~ /^Pg:/ )
{
$ dbname = "pgsql" ;
}
}
}
$ directory . = "/" ;
2010-08-25 15:18:04 +00:00
my @ list ;
# check whether input files or a directory
if ( @$ files ) {
@ list = @$ files ;
} else {
@ list = glob ( $ directory . "*.$ext" ) ; # all files
}
2010-08-23 18:03:45 +00:00
my @ filelist = ( ) ;
foreach my $ file ( @ list )
{
2010-08-25 15:18:04 +00:00
my $ filename = basename ( $ file ) ; # strip filename
my ( $ name , $ ext1 ) = split '\.' , $ filename ;
2011-02-10 14:20:05 +00:00
#my($tmpname,$ext2) = split '\_', $name;
my @ parts = split '\_' , $ name ;
my $ ext2 = $ parts [ - 1 ] ; # get last element
2010-08-25 15:18:04 +00:00
if ( $ ext2 eq $ dbname )
{
2010-08-23 18:03:45 +00:00
push @ filelist , $ file ;
2010-08-25 15:18:04 +00:00
}
else
{
2010-08-23 18:03:45 +00:00
if ( $ ext2 eq "" )
{
push @ filelist , $ file ;
2010-09-08 18:58:59 +00:00
} else { # if not one of the databases, they just have _ in
# the file name
if ( $ ext2 ne "db2" && $ ext2 ne "mysql" && $ ext2 ne "pgsql" && $ ext2 ne "sqlite" ) {
push @ filelist , $ file ;
}
2010-08-23 18:03:45 +00:00
}
2010-08-25 15:18:04 +00:00
}
$ ext2 = "" ;
$ ext1 = "" ;
2010-08-23 18:03:45 +00:00
}
2010-08-24 13:27:45 +00:00
return @ filelist ;
2010-08-23 18:03:45 +00:00
}
2011-01-10 17:50:11 +00:00
#--------------------------------------------------------------------------
= head3
Description: delimitcol
Arguments:
attribute name
Returns:
The attribute ( column )
delimited appropriately for the runnning Database
Globals:
Error:
Example:
2009-02-04 01:53:34 +00:00
2011-01-10 17:50:11 +00:00
my $ delimitedcol = delimitcol ( $ col ) ;
= cut
#--------------------------------------------------------------------------------
sub delimitcol {
my $ attrin = shift ; #input attribute name
my $ attrout ; #output attribute name
my $ xcatcfg = get_xcatcfg ( ) ; # get database
$ attrout = $ attrin ; # for sqlite do nothing
if ( ( $ xcatcfg =~ /^DB2:/ ) || ( $ xcatcfg =~ /^Pg:/ ) ) {
$ attrout = "\"$attrin\"" ; # use double quotes
} else {
if ( $ xcatcfg =~ /^mysql:/ ) { # use backtick
$ attrout = "\`$attrin\`" ;
}
}
return $ attrout ;
}
2011-03-22 18:45:45 +00:00
#--------------------------------------------------------------------------
= head3
Description: buildwhereclause
Arguments:
Array of the following
attr <operator> val where the operator can be the following:
==
!=
=~
!~
>
<
>=
<=
Returns:
Where clause with SQL appropriate for the running DB
Globals:
Error:
Example:
my $ whereclause = buildWhereClause ( @ array ) ;
= cut
#--------------------------------------------------------------------------------
sub buildWhereClause {
my $ attrvalstr = shift ; # array of atr<op>val strings
my $ whereclause ; # Where Clause
my $ firstpass = 1 ;
foreach my $ m ( @ { $ attrvalstr } )
{
my $ attr ;
my $ val ;
my $ operator ;
if ( $ firstpass == 1 ) { # first pass no AND
$ firstpass = 0 ;
} else { # add an AND
$ whereclause . = " AND " ;
}
if ( $ m =~ /^[^=]*\==/ ) { #attr==val
( $ attr , $ val ) = split /==/ , $ m , 2 ;
$ operator = ' = ' ;
} elsif ( $ m =~ /^[^=]*=~/ ) { #attr=~val
( $ attr , $ val ) = split /=~/ , $ m , 2 ;
$ val =~ s/^\/// ;
$ val =~ s/\/$// ;
$ operator = ' like ' ;
} elsif ( $ m =~ /^[^=]*\!=/ ) { #attr!=val
( $ attr , $ val ) = split /!=/ , $ m , 2 ;
$ operator = ' != ' ;
} elsif ( $ m =~ /[^=]*!~/ ) { #attr!~val
( $ attr , $ val ) = split /!~/ , $ m , 2 ;
$ val =~ s/^\/// ;
$ val =~ s/\/$// ;
$ operator = ' not like ' ;
} elsif ( $ m =~ /^[^=]*\<=/ ) { #attr<=val
( $ attr , $ val ) = split /<=/ , $ m , 2 ;
$ operator = ' <= ' ;
} elsif ( $ m =~ /^[^=]*\</ ) { #attr<val
( $ attr , $ val ) = split /</ , $ m , 2 ;
$ operator = ' < ' ;
} elsif ( $ m =~ /^[^=]*\>=/ ) { #attr>=val
( $ attr , $ val ) = split />=/ , $ m , 2 ;
$ operator = ' >= ' ;
} elsif ( $ m =~ /^[^=]*\>/ ) { #attr>val
( $ attr , $ val ) = split />/ , $ m , 2 ;
$ operator = ' > ' ;
} else {
xCAT::MsgUtils - > message ( "S" , "Unsupported operator:$m on -w flag input, could not build a Where Clause." ) ;
$ whereclause = "" ;
return $ whereclause ;
}
my $ delimitedattr = & delimitcol ( $ attr ) ;
$ whereclause . = $ delimitedattr ;
$ whereclause . = $ operator ;
#$whereclause .="(\'";
$ whereclause . = "\'" ;
$ whereclause . = $ val ;
#$whereclause .="\')";
$ whereclause . = "\'" ;
}
return $ whereclause ;
}
2011-05-11 17:50:08 +00:00
#--------------------------------------------------------------------------
= head3 writeAllEntries
Description: Read entire table and writes all entries to file
This routine was written specifically for the tabdump
command .
Arguments:
filename or path
Returns:
0 = good
1 = bad
Globals:
Error:
Example:
my $ tabh = xCAT::Table - > new ( $ table ) ;
my $ recs = $ tabh - > writeAllEntries ( $ filename ) ;
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub writeAllEntries
{
my $ self = shift ;
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'writeAllEntries' , @ _ ) ;
}
my $ filename = shift ;
my $ fh ;
my $ rc ;
# open the file for write
unless ( open ( $ fh , " > $filename" ) ) {
my $ msg = "Unable to open $filename for write \n." ;
2012-04-12 05:27:42 +00:00
`logger -p local4.err -t xcat $msg` ;
2011-05-11 17:50:08 +00:00
return 1 ;
}
my $ query ;
my $ header ;
my $ tabdump_header = sub {
$ header = "#" . join ( "," , @ _ ) ;
} ;
$ tabdump_header - > ( @ { $ self - > { colnames } } ) ;
# write the header to the file
print $ fh $ header ; # write line to file
print $ fh "\n" ;
# delimit the disable column based on the DB
my $ disable = & delimitcol ( "disable" ) ;
$ query = $ self - > { dbh } - > prepare ( 'SELECT * FROM ' . $ self - > { tabname } ) ;
$ query - > execute ( ) ;
while ( my $ data = $ query - > fetchrow_hashref ( ) )
{
foreach ( keys %$ data )
{
if ( $ data - > { $ _ } =~ /^$/ )
{
$ data - > { $ _ } = undef ;
}
}
$ rc = output_table ( $ self - > { tabname } , $ fh , $ self , $ data ) ;
}
$ query - > finish ( ) ;
2011-09-09 05:31:10 +00:00
CORE:: close ( $ fh ) ;
2011-05-11 17:50:08 +00:00
return $ rc ;
}
#--------------------------------------------------------------------------
= head3 writeAllAttribsWhere
Description: writes all attributes to file using the "where" clause
written for the tabdump command
Arguments:
array of attr <operator> val strings to be build into a Where clause
filename or path
Returns:
Outputs to filename the table header and rows
Globals:
Error:
Example:
$ nodelist - > getAllAttribsWhere ( array of attr <operator> val , $ filename ) ;
where operator can be
( == , != , =~ , !~ , > , < , >= , <= )
Comments:
none
= cut
#--------------------------------------------------------------------------------
sub writeAllAttribsWhere
{
#Takes a list of attributes, returns all records in the table.
my $ self = shift ;
if ( $ dbworkerpid ) {
return dbc_call ( $ self , 'writeAllAttribsWhere' , @ _ ) ;
}
my $ clause = shift ;
my $ filename = shift ;
my $ whereclause ;
my @ attribs = @ _ ;
my @ results = ( ) ;
my $ query ;
my $ query2 ;
my $ fh ;
my $ rc ;
# open the file for write
unless ( open ( $ fh , " > $filename" ) ) {
my $ msg = "Unable to open $filename for write \n." ;
2012-04-12 05:27:42 +00:00
`logger -p local4.err -t xcat $msg` ;
2011-05-11 17:50:08 +00:00
return 1 ;
}
my $ header ;
my $ tabdump_header = sub {
$ header = "#" . join ( "," , @ _ ) ;
} ;
$ tabdump_header - > ( @ { $ self - > { colnames } } ) ;
# write the header to the file
print $ fh $ header ; # write line to file
print $ fh "\n" ;
$ whereclause = & buildWhereClause ( $ clause ) ;
# delimit the disable column based on the DB
my $ disable = & delimitcol ( "disable" ) ;
$ query2 = 'SELECT * FROM ' . $ self - > { tabname } . ' WHERE (' . $ whereclause . ") and ($disable is NULL or $disable in ('0','no','NO','No','nO'))" ;
$ query = $ self - > { dbh } - > prepare ( $ query2 ) ;
$ query - > execute ( ) ;
while ( my $ data = $ query - > fetchrow_hashref ( ) )
{
foreach ( keys %$ data ) {
if ( $ data - > { $ _ } =~ /^$/ )
{
$ data - > { $ _ } = undef ;
}
}
$ rc = output_table ( $ self - > { tabname } , $ fh , $ self , $ data ) ;
}
$ query - > finish ( ) ;
2011-09-09 05:31:10 +00:00
CORE:: close ( $ fh ) ;
2011-05-11 17:50:08 +00:00
return $ rc ;
}
#--------------------------------------------------------------------------
= head3 output_table
Description: writes table rows to file
written for the tabdump command
= cut
#--------------------------------------------------------------------------------
sub output_table {
my $ table = shift ;
my $ fh = shift ;
my $ tabh = shift ;
my $ rec = shift ;
my $ line = '' ;
foreach ( @ { $ tabh - > { colnames } } )
{
if ( defined $ rec - > { $ _ } )
{
$ rec - > { $ _ } =~ s/"/""/g ;
$ line = $ line . '"' . $ rec - > { $ _ } . '",' ;
}
else
{
$ line . = ',' ;
}
}
$ line =~ s/,$// ; # remove the extra comma at the end
print $ fh $ line ; # write line to file
print $ fh "\n" ;
return 0 ;
}
2007-10-26 22:44:33 +00:00
1 ;