-Change update strategy. Previous strategy of aggregated update statements acheived the lowest required execution of statements, but required a prepare per execute. This strategy uses a single prepare, though does not reduce execute. One prepare+thousands of executes is cheapare than a few dozen prepares+executes
Final tally: before this change, a particular stress test: real 0m48.731s real 0m54.434s after this change, the same test: real 0m3.231s real 0m2.976s git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/trunk@4675 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
This commit is contained in:
parent
c1a0633419
commit
577661b712
@ -1549,6 +1549,7 @@ sub setNodesAttribs {
|
||||
#at the moment anyway
|
||||
my @currnodes = splice(@$nodelist,0,$nodesatatime); #Do a few at a time to stay under max sql statement length and max variable count
|
||||
my $insertsth; #if insert is needed, this will hold the single prepared insert statement
|
||||
my $upsth;
|
||||
while (scalar @currnodes) {
|
||||
my %updatenodes=();
|
||||
my %insertnodes=();
|
||||
@ -1595,23 +1596,24 @@ sub setNodesAttribs {
|
||||
}
|
||||
$insertsth->execute(@args);
|
||||
}
|
||||
if (not $upsth and keys %updatenodes) { #prepare an insert statement since one will be needed
|
||||
my $upstring = "UPDATE ".$self->{tabname}." set ";
|
||||
foreach my $col (@orderedcols) { #try aggregating requests. Could also see about single prepare, multiple executes instead
|
||||
$upstring .= "$col = ?, ";
|
||||
}
|
||||
$upstring =~ s/, / where $nodekey = ?/;
|
||||
$upsth = $self->{dbh}->prepare($upstring);
|
||||
}
|
||||
if (scalar keys %updatenodes) {
|
||||
my $upstring = "UPDATE ".$self->{tabname}." set ";
|
||||
my @args=();
|
||||
foreach my $col (@orderedcols) { #try aggregating requests. Could also see about single prepare, multiple executes instead
|
||||
$upstring .= "$col = CASE $nodekey ";
|
||||
foreach my $node (keys %updatenodes) {
|
||||
$upstring .= "when '$node' then ? ";
|
||||
foreach my $node (keys %updatenodes) {
|
||||
my @args=();
|
||||
foreach my $col (@orderedcols) { #try aggregating requests. Could also see about single prepare, multiple executes instead
|
||||
push @args,$hashrec->{$node}->{$col};
|
||||
}
|
||||
$upstring .= "END, ";
|
||||
push @args,$node;
|
||||
$upsth->execute(@args);
|
||||
}
|
||||
$upstring =~ s/, $/ where $nodekey in (/;
|
||||
$upstring .= "?,"x scalar(keys %updatenodes);
|
||||
$upstring =~ s/,$/)/;
|
||||
push @args,keys %updatenodes;
|
||||
my $upsth = $self->{dbh}->prepare($upstring);
|
||||
$upsth->execute(@args);
|
||||
}
|
||||
@currnodes = splice(@$nodelist,0,$nodesatatime);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user