From df535ce36d2c2df2344df735c6cd2c39bee67eef Mon Sep 17 00:00:00 2001 From: jbjohnso Date: Fri, 11 May 2012 14:09:29 +0000 Subject: [PATCH] Make sure nodelist never retains a nodelist cache longer than 5 seconds under any condition git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/branches/2.7@12625 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd --- perl-xCAT/xCAT/NodeRange.pm | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/perl-xCAT/xCAT/NodeRange.pm b/perl-xCAT/xCAT/NodeRange.pm index eb9ee8f1e..4c7e0a428 100644 --- a/perl-xCAT/xCAT/NodeRange.pm +++ b/perl-xCAT/xCAT/NodeRange.pm @@ -32,6 +32,7 @@ my $grptab; #my $nodeprefix = "node"; my @allnodeset; +my $allnodesetstamp; my %allnodehash; my @grplist; my $didgrouplist; @@ -180,7 +181,8 @@ sub nodesbycriteria { sub expandatom { #TODO: implement table selection as an atom (nodetype.os==rhels5.3) my $atom = shift; if ($recurselevel > 4096) { die "NodeRange seems to be hung on evaluating $atom, recursion limit hit"; } - unless (scalar(@allnodeset)) { #Build a cache of all nodes, some corner cases will perform worse, but by and large it will do better. We could do tests to see where the breaking points are, and predict how many atoms we have to evaluate to mitigate, for now, implement the strategy that keeps performance from going completely off the rails + unless (scalar(@allnodeset) and (($allnodesetstamp+5) > time())) { #Build a cache of all nodes, some corner cases will perform worse, but by and large it will do better. We could do tests to see where the breaking points are, and predict how many atoms we have to evaluate to mitigate, for now, implement the strategy that keeps performance from going completely off the rails + $allnodesetstamp=time(); @allnodeset = $nodelist->getAllAttribs('node','groups'); %allnodehash = map { $_->{node} => 1 } @allnodeset; }