[Checkins] SVN: relstorage/trunk/relstorage/ Handle some more cache edge cases:
Shane Hathaway
shane at hathawaymix.org
Tue Oct 20 16:49:17 EDT 2009
Log message for revision 105180:
Handle some more cache edge cases:
- When copying to checkpoint0, copy to all caches, not
just the cache the object was found in.
- Never propagate SizeOverflow exceptions. They are
meaningless to higher layers.
- Don't blow up if cache_local_mb is 0.
Changed:
U relstorage/trunk/relstorage/cache.py
U relstorage/trunk/relstorage/tests/test_cache.py
-=-
Modified: relstorage/trunk/relstorage/cache.py
===================================================================
--- relstorage/trunk/relstorage/cache.py 2009-10-20 20:13:59 UTC (rev 105179)
+++ relstorage/trunk/relstorage/cache.py 2009-10-20 20:49:17 UTC (rev 105180)
@@ -153,10 +153,10 @@
for client in self.clients_local_first:
cache_data = client.get(cachekey)
if cache_data and len(cache_data) >= 8:
- # cache hit
+ # Cache hit.
assert cache_data[:8] == p64(tid_int)
return cache_data[8:], tid_int
- # cache miss
+ # Cache miss.
state, actual_tid_int = self.adapter.mover.load_current(
cursor, oid_int)
assert actual_tid_int == tid_int
@@ -188,7 +188,7 @@
if response:
cache_data = response.get(cp0_key)
if cache_data and len(cache_data) >= 8:
- # cache hit on the preferred cache key
+ # Cache hit on the preferred cache key.
return cache_data[8:], u64(cache_data[:8])
if da1_key:
@@ -196,12 +196,13 @@
elif cp1_key:
cache_data = response.get(cp1_key)
if cache_data and len(cache_data) >= 8:
- # cache hit, but copy the state to
+ # Cache hit, but copy the state to
# the currently preferred key.
- client.set(cp0_key, cache_data)
+ for client_to_set in self.clients_local_first:
+ client_to_set.set(cp0_key, cache_data)
return cache_data[8:], u64(cache_data[:8])
- # cache miss
+ # Cache miss.
state, tid_int = self.adapter.mover.load_current(cursor, oid_int)
if tid_int:
cache_data = '%s%s' % (p64(tid_int), state or '')
@@ -383,13 +384,11 @@
self.current_tid = new_tid_int
return
- cp0, cp1 = new_checkpoints
allow_shift = True
- if cp0 > new_tid_int:
+ if new_checkpoints[0] > new_tid_int:
# checkpoint0 is in a future that this instance can't
# yet see. Ignore the checkpoint change for now.
new_checkpoints = self.checkpoints
- cp0, cp1 = new_checkpoints
allow_shift = False
if (new_checkpoints == self.checkpoints
@@ -407,6 +406,7 @@
m[oid_int] = tid_int
self.current_tid = new_tid_int
else:
+ cp0, cp1 = new_checkpoints
log.debug("Using new checkpoints: %d %d", cp0, cp1)
# Use the checkpoints specified by the cache.
# Rebuild delta_after0 and delta_after1.
@@ -517,7 +517,7 @@
self._lock = threading.Lock()
self._lock_acquire = self._lock.acquire
self._lock_release = self._lock.release
- self._bucket_limit = 1000000 * options.cache_local_mb / 2
+ self._bucket_limit = int(1000000 * options.cache_local_mb / 2)
self._value_limit = self._bucket_limit / 10
self._bucket0 = LocalClientBucket(self._bucket_limit)
self._bucket1 = LocalClientBucket(self._bucket_limit)
@@ -567,21 +567,28 @@
try:
self._bucket0[key] = value
except SizeOverflow:
- # shift bucket0 to bucket1
+ # Shift bucket0 to bucket1.
self._bucket1 = self._bucket0
self._bucket0 = LocalClientBucket(self._bucket_limit)
- self._bucket0[key] = value
- # Watch for this log message to decide whether the
+ # Watch for the log message below to decide whether the
# cache_local_mb parameter is set to a reasonable value.
# The log message indicates that old cache data has
# been garbage collected.
log.debug("LocalClient buckets shifted")
+ try:
+ self._bucket0[key] = value
+ except SizeOverflow:
+ # The value doesn't fit in the cache at all, apparently.
+ pass
+
def set(self, key, value):
self.set_multi({key: value})
def set_multi(self, d, allow_replace=True):
- res = {}
+ if not self._bucket_limit:
+ # don't bother
+ return
self._lock_acquire()
try:
for key, value in d.iteritems():
@@ -601,16 +608,16 @@
del self._bucket1[key]
self._set_one(key, value)
- res[key] = value
-
finally:
self._lock_release()
- return res
def add(self, key, value):
self.set_multi({key: value}, allow_replace=False)
def incr(self, key):
+ if not self._bucket_limit:
+ # don't bother
+ return None
self._lock_acquire()
try:
value = self._bucket0.get(key)
Modified: relstorage/trunk/relstorage/tests/test_cache.py
===================================================================
--- relstorage/trunk/relstorage/tests/test_cache.py 2009-10-20 20:13:59 UTC (rev 105179)
+++ relstorage/trunk/relstorage/tests/test_cache.py 2009-10-20 20:49:17 UTC (rev 105180)
@@ -388,6 +388,17 @@
self.assertEqual(c.get('abc'), 'def')
self.assertEqual(c.get('xyz'), None)
+ def test_set_with_zero_space(self):
+ options = MockOptions()
+ options.cache_local_mb = 0
+ c = self.getClass()(options)
+ self.assertEqual(c._bucket_limit, 0)
+ self.assertEqual(c._value_limit, 0)
+ c.set('abc', 1)
+ c.set('def', '')
+ self.assertEqual(c.get('abc'), None)
+ self.assertEqual(c.get('def'), None)
+
def test_set_multi_and_get_multi(self):
c = self._makeOne()
c.set_multi({'k0': 'abc', 'k1': 'def'})
@@ -463,7 +474,13 @@
self.assertEqual(c._bucket0.size, 2)
self.assertEqual(c._bucket1.size, 4)
+ def test_incr_with_zero_space(self):
+ options = MockOptions()
+ options.cache_local_mb = 0
+ c = self.getClass()(options)
+ self.assertEqual(c.incr('abc'), None)
+
class MockOptions:
cache_module_name = ''
cache_servers = ''
More information about the checkins
mailing list