More aggressively retain cache

git-svn-id: https://svn.code.sf.net/p/xcat/code/xcat-core/branches/2.7@12643 8638fb3e-16cb-4fca-ae20-7b5d299a9bcd
This commit is contained in:
jbjohnso 2012-05-11 18:43:15 +00:00
parent 2ff852b466
commit c122edd400
2 changed files with 3 additions and 1 deletions

View File

@ -184,6 +184,7 @@ sub expandatom { #TODO: implement table selection as an atom (nodetype.os==rhels
if ($recurselevel > 4096) { die "NodeRange seems to be hung on evaluating $atom, recursion limit hit"; }
unless (scalar(@allnodeset) and (($allnodesetstamp+5) > time())) { #Build a cache of all nodes, some corner cases will perform worse, but by and large it will do better. We could do tests to see where the breaking points are, and predict how many atoms we have to evaluate to mitigate, for now, implement the strategy that keeps performance from going completely off the rails
$allnodesetstamp=time();
$nodelist->_set_use_cache(1);
@allnodeset = $nodelist->getAllAttribs('node','groups');
%allnodehash = map { $_->{node} => 1 } @allnodeset;
}

View File

@ -2122,7 +2122,8 @@ sub _clear_cache { #PRIVATE FUNCTION TO EXPIRE CACHED DATA EXPLICITLY
$self->{_cache_ref} -= 1;
return;
} elsif ($self->{_cache_ref} == 1) { #If it is 1, decrement to zero and carry on
$self->{_cache_ref} = 0;
return;
#$self->{_cache_ref} = 0;
}
#it shouldn't have been zero, but whether it was 0 or 1, ensure that the cache is gone
$self->{_use_cache}=0; # Signal slow operation to any in-flight operations that may fail with empty cache