[Checkins] SVN: zope.ramcache/trunk/ Added the actual code with much less dependencies :)
Hanno Schlichting
hannosch at hannosch.eu
Thu Jul 23 05:54:44 EDT 2009
Log message for revision 102128:
Added the actual code with much less dependencies :)
Changed:
U zope.ramcache/trunk/bootstrap.py
U zope.ramcache/trunk/setup.py
A zope.ramcache/trunk/src/zope/ramcache/interfaces/
A zope.ramcache/trunk/src/zope/ramcache/interfaces/__init__.py
A zope.ramcache/trunk/src/zope/ramcache/interfaces/ram.py
A zope.ramcache/trunk/src/zope/ramcache/ram.py
A zope.ramcache/trunk/src/zope/ramcache/tests/
A zope.ramcache/trunk/src/zope/ramcache/tests/__init__.py
A zope.ramcache/trunk/src/zope/ramcache/tests/test_icache.py
A zope.ramcache/trunk/src/zope/ramcache/tests/test_ramcache.py
-=-
Modified: zope.ramcache/trunk/bootstrap.py
===================================================================
--- zope.ramcache/trunk/bootstrap.py 2009-07-23 09:24:08 UTC (rev 102127)
+++ zope.ramcache/trunk/bootstrap.py 2009-07-23 09:54:44 UTC (rev 102128)
@@ -1,6 +1,6 @@
##############################################################################
#
-# Copyright (c) 2006 Zope Corporation and Contributors.
+# Copyright (c) 2006-2009 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
@@ -16,8 +16,6 @@
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
-
-$Id: bootstrap.py 73626 2007-03-26 13:08:47Z baijum $
"""
import os, shutil, sys, tempfile, urllib2
Modified: zope.ramcache/trunk/setup.py
===================================================================
--- zope.ramcache/trunk/setup.py 2009-07-23 09:24:08 UTC (rev 102127)
+++ zope.ramcache/trunk/setup.py 2009-07-23 09:54:44 UTC (rev 102128)
@@ -1,6 +1,6 @@
##############################################################################
#
-# Copyright (c) 2006 Zope Corporation and Contributors.
+# Copyright (c) 2006-2009 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
@@ -12,8 +12,6 @@
#
##############################################################################
"""Setup for zope.ramcache package
-
-$Id$
"""
import os
from setuptools import setup, find_packages
@@ -49,6 +47,10 @@
namespace_packages=['zope'],
install_requires = [
'setuptools',
+ 'zope.interface',
+ 'zope.location',
+ 'zope.testing',
+ 'ZODB3',
],
include_package_data = True,
zip_safe = False,
Copied: zope.ramcache/trunk/src/zope/ramcache/interfaces/__init__.py (from rev 102127, zope.app.cache/trunk/src/zope/app/cache/interfaces/__init__.py)
===================================================================
--- zope.ramcache/trunk/src/zope/ramcache/interfaces/__init__.py (rev 0)
+++ zope.ramcache/trunk/src/zope/ramcache/interfaces/__init__.py 2009-07-23 09:54:44 UTC (rev 102128)
@@ -0,0 +1,44 @@
+##############################################################################
+#
+# Copyright (c) 2002-2009 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Interfaces for cache.
+"""
+__docformat__ = 'restructuredtext'
+
+from zope.interface import Interface
+
+
+class ICache(Interface):
+ """Interface for caches."""
+
+ def invalidate(ob, key=None):
+ """Invalidates cached entries that apply to the given object.
+
+ `ob` is an object location. If `key` is specified, only
+ invalidates entry for the given key. Otherwise invalidates
+ all entries for the object.
+ """
+
+ def invalidateAll():
+ """Invalidates all cached entries."""
+
+ def query(ob, key=None, default=None):
+ """Returns the cached data previously stored by `set()`.
+
+ `ob` is the location of the content object being cached. `key` is
+ a mapping of keywords and values which should all be used to
+ select a cache entry.
+ """
+
+ def set(data, ob, key=None):
+ """Stores the result of executing an operation."""
Copied: zope.ramcache/trunk/src/zope/ramcache/interfaces/ram.py (from rev 102127, zope.app.cache/trunk/src/zope/app/cache/interfaces/ram.py)
===================================================================
--- zope.ramcache/trunk/src/zope/ramcache/interfaces/ram.py (rev 0)
+++ zope.ramcache/trunk/src/zope/ramcache/interfaces/ram.py 2009-07-23 09:54:44 UTC (rev 102128)
@@ -0,0 +1,43 @@
+##############################################################################
+#
+# Copyright (c) 2002-2009 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""RAM cache interface.
+"""
+__docformat__ = 'restructuredtext'
+
+from zope.interface import Attribute
+
+from zope.ramcache.interfaces import ICache
+
+
+class IRAMCache(ICache):
+ """Interface for the RAM Cache."""
+
+ maxEntries = Attribute("""A maximum number of cached values.""")
+
+ maxAge = Attribute("""Maximum age for cached values in seconds.""")
+
+ cleanupInterval = Attribute("""An interval between cache cleanups
+ in seconds.""")
+
+ def getStatistics():
+ """Reports on the contents of a cache.
+
+ The returned value is a sequence of dictionaries with the
+ following keys:
+
+ `path`, `hits`, `misses`, `size`, `entries`
+ """
+
+ def update(maxEntries, maxAge, cleanupInterval):
+ """Saves the parameters available to the user"""
Copied: zope.ramcache/trunk/src/zope/ramcache/ram.py (from rev 102127, zope.app.cache/trunk/src/zope/app/cache/ram.py)
===================================================================
--- zope.ramcache/trunk/src/zope/ramcache/ram.py (rev 0)
+++ zope.ramcache/trunk/src/zope/ramcache/ram.py 2009-07-23 09:54:44 UTC (rev 102128)
@@ -0,0 +1,330 @@
+##############################################################################
+#
+# Copyright (c) 2002-2009 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""RAM cache implementation.
+"""
+__docformat__ = 'restructuredtext'
+
+from time import time
+from threading import Lock
+from cPickle import dumps
+
+from persistent import Persistent
+from zope.interface import implements
+from zope.location.interfaces import IContained
+
+from zope.ramcache.interfaces.ram import IRAMCache
+
+
+# A global caches dictionary shared between threads
+caches = {}
+
+# A writelock for caches dictionary
+writelock = Lock()
+
+# A counter for cache ids and its lock
+cache_id_counter = 0
+cache_id_writelock = Lock()
+
+
+class RAMCache(Persistent):
+ """The design of this class is heavily based on RAMCacheManager in Zope2.
+
+ The idea behind the `RAMCache` is that it should be shared between threads,
+ so that the same objects are not cached in each thread. This is achieved by
+ storing the cache data structure itself as a module level variable
+ (`RAMCache.caches`). This, of course, requires locking on modifications of
+ that data structure.
+
+ `RAMCache` is a persistent object. The actual data storage is a volatile
+ object, which can be acquired/created by calling ``_getStorage()``. Storage
+ objects are shared between threads and handle their blocking internally.
+ """
+
+ implements(IRAMCache, IContained)
+
+ __parent__ = __name__ = None
+
+ def __init__(self):
+ # A timestamp and a counter are used here because using just a
+ # timestamp and an id (address) produced unit test failures on
+ # Windows (where ticks are 55ms long). If we want to use just
+ # the counter, we need to make it persistent, because the
+ # RAMCaches are persistent.
+
+ cache_id_writelock.acquire()
+ try:
+ global cache_id_counter
+ cache_id_counter +=1
+ self._cacheId = "%s_%f_%d" % (id(self), time(), cache_id_counter)
+ finally:
+ cache_id_writelock.release()
+
+ self.requestVars = ()
+ self.maxEntries = 1000
+ self.maxAge = 3600
+ self.cleanupInterval = 300
+
+ def getStatistics(self):
+ s = self._getStorage()
+ return s.getStatistics()
+
+ def update(self, maxEntries=None, maxAge=None, cleanupInterval=None):
+ if maxEntries is not None:
+ self.maxEntries = maxEntries
+
+ if maxAge is not None:
+ self.maxAge = maxAge
+
+ if cleanupInterval is not None:
+ self.cleanupInterval = cleanupInterval
+
+ self._getStorage().update(maxEntries, maxAge, cleanupInterval)
+
+ def invalidate(self, ob, key=None):
+ s = self._getStorage()
+ if key:
+ key = self._buildKey(key)
+ s.invalidate(ob, key)
+ else:
+ s.invalidate(ob)
+
+ def invalidateAll(self):
+ s = self._getStorage()
+ s.invalidateAll()
+
+ def query(self, ob, key=None, default=None):
+ s = self._getStorage()
+ key = self._buildKey(key)
+ try:
+ return s.getEntry(ob, key)
+ except KeyError:
+ return default
+
+ def set(self, data, ob, key=None):
+ s = self._getStorage()
+ key = self._buildKey(key)
+ s.setEntry(ob, key, data)
+
+ def _getStorage(self):
+ """Finds or creates a storage object."""
+ cacheId = self._cacheId
+ writelock.acquire()
+ try:
+ if cacheId not in caches:
+ caches[cacheId] = Storage(self.maxEntries, self.maxAge,
+ self.cleanupInterval)
+ return caches[cacheId]
+ finally:
+ writelock.release()
+
+ def _buildKey(kw):
+ """Build a tuple which can be used as an index for a cached value"""
+ if kw:
+ items = kw.items()
+ items.sort()
+ return tuple(items)
+ return ()
+
+ _buildKey = staticmethod(_buildKey)
+
+
+class Storage(object):
+ """Storage keeps the count and does the aging and cleanup of cached
+ entries.
+
+ This object is shared between threads. It corresponds to a single
+ persistent `RAMCache` object. Storage does the locking necessary
+ for thread safety.
+ """
+
+ def __init__(self, maxEntries=1000, maxAge=3600, cleanupInterval=300):
+ self._data = {}
+ self._misses = {}
+ self._invalidate_queue = []
+ self.maxEntries = maxEntries
+ self.maxAge = maxAge
+ self.cleanupInterval = cleanupInterval
+ self.writelock = Lock()
+ self.lastCleanup = time()
+
+ def update(self, maxEntries=None, maxAge=None, cleanupInterval=None):
+ """Set the registration options. ``None`` values are ignored."""
+ if maxEntries is not None:
+ self.maxEntries = maxEntries
+
+ if maxAge is not None:
+ self.maxAge = maxAge
+
+ if cleanupInterval is not None:
+ self.cleanupInterval = cleanupInterval
+
+ def getEntry(self, ob, key):
+ if self.lastCleanup <= time() - self.cleanupInterval:
+ self.cleanup()
+
+ try:
+ data = self._data[ob][key]
+ except KeyError:
+ if ob not in self._misses:
+ self._misses[ob] = 0
+ self._misses[ob] += 1
+ raise
+ else:
+ data[2] += 1 # increment access count
+ return data[0]
+
+
+ def setEntry(self, ob, key, value):
+ """Stores a value for the object. Creates the necessary
+ dictionaries."""
+
+ if self.lastCleanup <= time() - self.cleanupInterval:
+ self.cleanup()
+
+ self.writelock.acquire()
+ try:
+ if ob not in self._data:
+ self._data[ob] = {}
+
+ timestamp = time()
+ # [data, ctime, access count]
+ self._data[ob][key] = [value, timestamp, 0]
+ finally:
+ self.writelock.release()
+ self._invalidate_queued()
+
+ def _do_invalidate(self, ob, key=None):
+ """This does the actual invalidation, but does not handle the locking.
+
+ This method is supposed to be called from `invalidate`
+ """
+ try:
+ if key is None:
+ del self._data[ob]
+ self._misses[ob] = 0
+ else:
+ del self._data[ob][key]
+ if not self._data[ob]:
+ del self._data[ob]
+ except KeyError:
+ pass
+
+ def _invalidate_queued(self):
+ """This method should be called after each writelock release."""
+
+ while self._invalidate_queue:
+ obj, key = self._invalidate_queue.pop()
+ self.invalidate(obj, key)
+
+ def invalidate(self, ob, key=None):
+ """Drop the cached values.
+
+ Drop all the values for an object if no key is provided or
+ just one entry if the key is provided.
+ """
+ if self.writelock.acquire(0):
+ try:
+ self._do_invalidate(ob, key)
+ finally:
+ self.writelock.release()
+ else:
+ self._invalidate_queue.append((ob, key))
+
+ def invalidateAll(self):
+ """Drop all the cached values.
+ """
+ self.writelock.acquire()
+ try:
+ self._data = {}
+ self._misses = {}
+ self._invalidate_queue = []
+ finally:
+ self.writelock.release()
+
+ def removeStaleEntries(self):
+ """Remove the entries older than `maxAge`"""
+
+ if self.maxAge > 0:
+ punchline = time() - self.maxAge
+ self.writelock.acquire()
+ try:
+ data = self._data
+ for object, dict in data.items():
+ for key in dict.keys():
+ if dict[key][1] < punchline:
+ del dict[key]
+ if not dict:
+ del data[object]
+ finally:
+ self.writelock.release()
+ self._invalidate_queued()
+
+ def cleanup(self):
+ """Cleanup the data"""
+ self.removeStaleEntries()
+ self.removeLeastAccessed()
+ self.lastCleanup = time()
+
+ def removeLeastAccessed(self):
+ self.writelock.acquire()
+ try:
+ data = self._data
+ keys = [(ob, k) for ob, v in data.iteritems() for k in v]
+
+ if len(keys) > self.maxEntries:
+ def getKey(item):
+ ob, key = item
+ return data[ob][key][2]
+ keys.sort(key=getKey)
+
+ ob, key = keys[self.maxEntries]
+ maxDropCount = data[ob][key][2]
+
+ keys.reverse()
+
+ for ob, key in keys:
+ if data[ob][key][2] <= maxDropCount:
+ del data[ob][key]
+ if not data[ob]:
+ del data[ob]
+
+ self._clearAccessCounters()
+ finally:
+ self.writelock.release()
+ self._invalidate_queued()
+
+ def _clearAccessCounters(self):
+ for dict in self._data.itervalues():
+ for val in dict.itervalues():
+ val[2] = 0
+ for k in self._misses:
+ self._misses[k] = 0
+
+ def getKeys(self, object):
+ return self._data[object].keys()
+
+ def getStatistics(self):
+ objects = self._data.keys()
+ objects.sort()
+ result = []
+
+ for ob in objects:
+ size = len(dumps(self._data[ob]))
+ hits = sum(entry[2] for entry in self._data[ob].itervalues())
+ result.append({'path': ob,
+ 'hits': hits,
+ 'misses': self._misses.get(ob, 0),
+ 'size': size,
+ 'entries': len(self._data[ob])})
+ return tuple(result)
Added: zope.ramcache/trunk/src/zope/ramcache/tests/__init__.py
===================================================================
--- zope.ramcache/trunk/src/zope/ramcache/tests/__init__.py (rev 0)
+++ zope.ramcache/trunk/src/zope/ramcache/tests/__init__.py 2009-07-23 09:54:44 UTC (rev 102128)
@@ -0,0 +1 @@
+#
Property changes on: zope.ramcache/trunk/src/zope/ramcache/tests/__init__.py
___________________________________________________________________
Added: svn:eol-style
+ native
Copied: zope.ramcache/trunk/src/zope/ramcache/tests/test_icache.py (from rev 102127, zope.app.cache/trunk/src/zope/app/cache/tests/test_icache.py)
===================================================================
--- zope.ramcache/trunk/src/zope/ramcache/tests/test_icache.py (rev 0)
+++ zope.ramcache/trunk/src/zope/ramcache/tests/test_icache.py 2009-07-23 09:54:44 UTC (rev 102128)
@@ -0,0 +1,74 @@
+##############################################################################
+#
+# Copyright (c) 2001-2009 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Unit tests for ICache interface
+"""
+from unittest import TestSuite, main
+from zope.interface.verify import verifyObject
+
+from zope.ramcache.interfaces import ICache
+
+
+class BaseICacheTest(object):
+ """Base class for ICache unit tests. Subclasses should provide a
+ _Test__new() method that returns a new empty cache object.
+ """
+
+ def testVerifyICache(self):
+ # Verify that the object implements ICache
+ verifyObject(ICache, self._Test__new())
+
+ def testCaching(self):
+ # Verify basic caching
+ cache = self._Test__new()
+ ob = "obj"
+ data = "data"
+ marker = []
+ self.failIf(cache.query(ob, None, default=marker) is not marker,
+ "empty cache should not contain anything")
+
+ cache.set(data, ob, key={'id': 35})
+ self.assertEquals(cache.query(ob, {'id': 35}), data,
+ "should return cached result")
+ self.failIf(cache.query(ob, {'id': 33}, default=marker) is not marker,
+ "should not return cached result for a different key")
+
+ cache.invalidate(ob, {"id": 33})
+ self.assertEquals(cache.query(ob, {'id': 35}), data,
+ "should return cached result")
+ self.failIf(cache.query(ob, {'id': 33}, default=marker) is not marker,
+ "should not return cached result after invalidate")
+
+ def testInvalidateAll(self):
+ cache = self._Test__new()
+ ob1 = object()
+ ob2 = object()
+ cache.set("data1", ob1)
+ cache.set("data2", ob2, key={'foo': 1})
+ cache.set("data3", ob2, key={'foo': 2})
+ cache.invalidateAll()
+ marker = []
+ self.failIf(cache.query(ob1, default=marker) is not marker,
+ "should not return cached result after invalidateAll")
+ self.failIf(cache.query(ob2, {'foo': 1}, default=marker) is not marker,
+ "should not return cached result after invalidateAll")
+ self.failIf(cache.query(ob2, {'foo': 2}, default=marker) is not marker,
+ "should not return cached result after invalidateAll")
+
+
+def test_suite():
+ return TestSuite((
+ ))
+
+if __name__=='__main__':
+ main(defaultTest='test_suite')
Copied: zope.ramcache/trunk/src/zope/ramcache/tests/test_ramcache.py (from rev 102127, zope.app.cache/trunk/src/zope/app/cache/tests/test_ramcache.py)
===================================================================
--- zope.ramcache/trunk/src/zope/ramcache/tests/test_ramcache.py (rev 0)
+++ zope.ramcache/trunk/src/zope/ramcache/tests/test_ramcache.py 2009-07-23 09:54:44 UTC (rev 102128)
@@ -0,0 +1,512 @@
+#############################################################################
+#
+# Copyright (c) 2001-2009 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Unit tests for RAM Cache.
+"""
+from time import time
+from unittest import TestCase, TestSuite, main, makeSuite
+
+from zope.interface.verify import verifyClass, verifyObject
+from zope.testing.cleanup import CleanUp
+
+from zope.ramcache.ram import RAMCache
+from zope.ramcache.ram import Storage
+from zope.ramcache.tests.test_icache import BaseICacheTest
+from zope.ramcache.interfaces import ICache
+from zope.ramcache.interfaces.ram import IRAMCache
+
+
+class TestRAMCache(CleanUp, TestCase, BaseICacheTest):
+
+ def _Test__new(self):
+ return RAMCache()
+
+ def test_interface(self):
+ verifyObject(IRAMCache, RAMCache())
+ verifyClass(ICache, RAMCache)
+
+ def test_init(self):
+ c1 = RAMCache()._cacheId
+ c2 = RAMCache()._cacheId
+ self.assertNotEquals(c1, c2, "The cacheId is not unique")
+
+ def test_getStatistics(self):
+ c = RAMCache()
+ c.set(42, "object", key={'foo': 'bar'})
+ c.set(43, "object", key={'foo': 'bar'})
+ c.query("object")
+ c.query("object", key={'foo': 'bar'})
+ r1 = c._getStorage().getStatistics()
+ r2 = c.getStatistics()
+ self.assertEqual(r1, r2, "see Storage.getStatistics() tests")
+
+ def test_update(self):
+ c = RAMCache()
+ c.update(1, 2, 3)
+ s = c._getStorage()
+ self.assertEqual(s.maxEntries, 1, "maxEntries not set")
+ self.assertEqual(s.maxAge, 2, "maxAge not set")
+ self.assertEqual(s.cleanupInterval, 3, "cleanupInterval not set")
+
+ def test_timedCleanup(self):
+ from time import sleep
+ c = RAMCache()
+ c.update(cleanupInterval=1, maxAge=2)
+ lastCleanup = c._getStorage().lastCleanup
+ sleep(2)
+ c.set(42, "object", key={'foo': 'bar'})
+ # last cleanup should now be updated
+ self.failUnless(lastCleanup < c._getStorage().lastCleanup)
+
+ def test_cache(self):
+ from zope.ramcache import ram
+ self.assertEqual(type(ram.caches), type({}),
+ 'no module level cache dictionary')
+
+ def test_getStorage(self):
+ c = RAMCache()
+ c.maxAge = 123
+ c.maxEntries = 2002
+ c.cleanupInterval = 42
+ storage1 = c._getStorage()
+ storage2 = c._getStorage()
+ self.assertEqual(storage1, storage2,
+ "_getStorage returns different storages")
+
+ self.assertEqual(storage1.maxAge, 123,
+ "maxAge not set (expected 123, got %s)"
+ % storage1.maxAge)
+ self.assertEqual(storage1.maxEntries, 2002,
+ "maxEntries not set (expected 2002, got %s)"
+ % storage1.maxEntries)
+ self.assertEqual(storage1.cleanupInterval, 42,
+ "cleanupInterval not set (expected 42, got %s)"
+ % storage1.cleanupInterval)
+
+ # Simulate persisting and restoring the RamCache which removes
+ # all _v_ attributes.
+ for k in c.__dict__.keys():
+ if k.startswith('_v_'):
+ del c.__dict__[k]
+ storage2 = c._getStorage()
+ self.assertEqual(storage1, storage2,
+ "_getStorage returns different storages")
+
+ def test_buildKey(self):
+ kw = {'foo': 1, 'bar': 2, 'baz': 3}
+ key = RAMCache._buildKey(kw)
+ self.assertEqual(key, (('bar',2), ('baz',3), ('foo',1)), "wrong key")
+
+ def test_query(self):
+ ob = ('aaa',)
+
+ keywords = {"answer": 42}
+ value = "true"
+ c = RAMCache()
+ key = RAMCache._buildKey(keywords)
+ c._getStorage().setEntry(ob, key, value)
+
+ self.assertEqual(c.query(ob, keywords), value,
+ "incorrect value")
+
+ self.assertEqual(c.query(ob, None), None, "defaults incorrect")
+ self.assertEqual(c.query(ob, {"answer": 2}, default="bummer"),
+ "bummer", "default doesn't work")
+
+ def test_set(self):
+ ob = ('path',)
+ keywords = {"answer": 42}
+ value = "true"
+ c = RAMCache()
+ c.requestVars = ('foo', 'bar')
+ key = RAMCache._buildKey(keywords)
+
+ c.set(value, ob, keywords)
+ self.assertEqual(c._getStorage().getEntry(ob, key), value,
+ "Not stored correctly")
+
+ def test_invalidate(self):
+ ob1 = ("loc1",)
+ ob2 = ("loc2",)
+ keywords = {"answer": 42}
+ keywords2 = {"answer": 41}
+ value = "true"
+ c = RAMCache()
+ key1 = RAMCache._buildKey(keywords)
+ key2 = RAMCache._buildKey(keywords)
+ key3 = RAMCache._buildKey(keywords2)
+
+ # Test invalidating entries with a keyword
+ c._getStorage().setEntry(ob1, key1, value)
+ c._getStorage().setEntry(ob2, key2, value)
+ c._getStorage().setEntry(ob2, key3, value)
+
+ c.invalidate(ob2, keywords)
+
+ c._getStorage().getEntry(ob1, key1)
+ self.assertRaises(KeyError, c._getStorage().getEntry, ob2, key2)
+ c._getStorage().getEntry(ob2, key3)
+
+ # Test deleting the whole object
+ c._getStorage().setEntry(ob1, key1, value)
+ c._getStorage().setEntry(ob2, key2, value)
+ c._getStorage().setEntry(ob2, key3, value)
+
+ c.invalidate(ob2)
+ self.assertRaises(KeyError, c._getStorage().getEntry, ob2, key2)
+ self.assertRaises(KeyError, c._getStorage().getEntry, ob2, key3)
+ c._getStorage().getEntry(ob1, key1)
+
+ # Try something that's not there
+ c.invalidate(('yadda',))
+
+
+class TestStorage(TestCase):
+
+ def test_getEntry(self):
+ s = Storage()
+ object = 'object'
+ key = ('view', (), ('answer', 42))
+ value = 'yes'
+ timestamp = time()
+
+ s._data = {object: {key: [value, timestamp, 1]}}
+ self.assertEqual(s.getEntry(object, key), value, 'got wrong value')
+
+ self.assert_(s._data[object][key][2] == 2, 'access count not updated')
+
+ # See if _misses are updated
+ try:
+ s.getEntry(object, "Nonexistent")
+ except KeyError:
+ pass
+ else:
+ raise Exception("ExpectedKeyError")
+
+ self.assertEqual(s._misses[object], 1)
+
+ object2 = "second"
+ self.assert_(not s._misses.has_key(object2))
+ try:
+ s.getEntry(object2, "Nonexistent")
+ except KeyError:
+ pass
+ else:
+ raise Exception("ExpectedKeyError")
+ self.assertEqual(s._misses[object2], 1)
+
+ def test_getEntry_do_cleanup(self):
+ s = Storage(cleanupInterval=300, maxAge=300)
+ object = 'object'
+ key = ('view', (), ('answer', 42))
+ value = 'yes'
+
+ s.setEntry(object, key, value)
+
+ s._data[object][key][1] = time() - 400
+ s.lastCleanup = time() - 400
+
+ self.assertRaises(KeyError, s.getEntry, object, key)
+
+ def test_setEntry(self):
+ s = Storage(cleanupInterval=300, maxAge=300)
+ object = 'object'
+ key = ('view', (), ('answer', 42))
+ key2 = ('view2', (), ('answer', 42))
+ value = 'yes'
+
+ t1 = time()
+ s.setEntry(object, key, value)
+ t2 = time()
+
+ timestamp = s._data[object][key][1]
+ self.failUnless(t1 <= timestamp <= t2, 'wrong timestamp')
+
+ self.assertEqual(s._data, {object: {key: [value, timestamp, 0]}},
+ 'stored data incorrectly')
+
+ s._data[object][key][1] = time() - 400
+ s.lastCleanup = time() - 400
+
+ s.setEntry(object, key2, value)
+
+ timestamp = s._data[object][key2][1]
+ self.assertEqual(s._data, {object: {key2: [value, timestamp, 0]}},
+ 'cleanup not called')
+
+ def test_set_get(self):
+ s = Storage()
+ object = 'object'
+ key = ('view', (), ('answer', 42))
+ value = 'yes'
+ s.setEntry(object, key, value)
+ self.assertEqual(s.getEntry(object, key), value,
+ 'got something other than set')
+
+ def test_do_invalidate(self):
+ s = Storage()
+ object = 'object'
+ object2 = 'object2'
+ key = ('view', (), ('answer', 41))
+ key2 = ('view2', (), ('answer', 42))
+ value = 'yes'
+ ts = time()
+ s._data = {object: {key: [value, ts, 0],
+ key2: [value, ts, 0]},
+ object2: {key: [value, ts, 0]}}
+ s._misses[object] = 42
+ s._do_invalidate(object)
+ self.assertEqual(s._data, {object2: {key: [value, ts, 0]}},
+ 'invalidation failed')
+ self.assertEqual(s._misses[object], 0, "misses counter not cleared")
+
+ s._data = {object: {key: [value, ts, 0],
+ key2: [value, ts, 0]},
+ object2: {key: [value, ts, 0]}}
+ s._do_invalidate(object, key2)
+ self.assertEqual(s._data,
+ {object: {key: [value, ts, 0]},
+ object2: {key: [value, ts, 0]}},
+ 'invalidation of one key failed')
+
+ def test_invalidate(self):
+ s = Storage()
+ object = 'object'
+ object2 = 'object2'
+ key = ('view', (), ('answer', 41))
+ key2 = ('view2', (), ('answer', 42))
+ value = 'yes'
+ ts = time()
+ s._data = {object: {key: [value, ts, 0],
+ key2: [value, ts, 0]},
+ object2: {key: [value, ts, 0]}}
+
+ s.writelock.acquire()
+ try:
+ s.invalidate(object)
+ finally:
+ s.writelock.release()
+ self.assertEqual(s._invalidate_queue, [(object, None)],
+ "nothing in the invalidation queue")
+
+ s._data = {object: {key: [value, ts, 0],
+ key2: [value, ts, 0]},
+ object2: {key: [value, ts, 0]}}
+ s.invalidate(object)
+ self.assertEqual(s._data, {object2: {key: [value, ts, 0]}},
+ "not invalidated")
+
+ def test_invalidate_queued(self):
+ s = Storage()
+ object = 'object'
+ object2 = 'object2'
+ object3 = 'object3'
+ key = ('view', (), ('answer', 41))
+ key2 = ('view2', (), ('answer', 42))
+ value = 'yes'
+ ts = time()
+ s._data = {object: {key: [value, ts, 0],
+ key2: [value, ts, 0]},
+ object2: {key: [value, ts, 0]},
+ object3: "foo" }
+ s._invalidate_queue = [(object2, None), (object3, None)]
+ s._invalidate_queued()
+ self.assertEqual(s._data,
+ {object: {key: [value, ts, 0], key2: [value, ts, 0]}},
+ "failed to invalidate queued")
+
+ def test_invalidateAll(self):
+ s = Storage()
+ object = 'object'
+ object2 = 'object2'
+ key = ('view', (), ('answer', 41))
+ key2 = ('view2', (), ('answer', 42))
+ value = 'yes'
+ ts = time()
+ s._data = {object: {key: [value, ts, 0],
+ key2: [value, ts, 0]},
+ object2: {key: [value, ts, 0]}}
+ s._invalidate_queue = [(object, None)]
+ s._misses = {object: 10, object2: 100}
+ s.invalidateAll()
+ self.assertEqual(s._data, {}, "not invalidated")
+ self.assertEqual(s._misses, {}, "miss counters not reset")
+ self.assertEqual(s._invalidate_queue, [], "invalidate queue not empty")
+
+ def test_getKeys(self):
+ s = Storage()
+ object = 'object'
+ object2 = 'object2'
+ key = ('view', (), ('answer', 41))
+ key2 = ('view2', (), ('answer', 42))
+ value = 'yes'
+ ts = time()
+ s._data = {object: {key: [value, ts, 0],
+ key2: [value, ts, 0]},
+ object2: {key: [value, ts, 0]}}
+ keys = s.getKeys(object)
+ expected = [key, key2]
+ keys.sort()
+ expected.sort()
+ self.assertEqual(keys, expected, 'bad keys')
+
+ def test_removeStale(self):
+ s = Storage(maxAge=100)
+ object = 'object'
+ object2 = 'object2'
+ key = ('view', (), ('answer', 42))
+ value = 'yes'
+ timestamp = time()
+ s._data = {object: {key: [value, timestamp-101, 2]},
+ object2: {key: [value, timestamp-90, 0]}}
+ s.removeStaleEntries()
+ self.assertEqual(s._data, {object2: {key: [value, timestamp-90, 0]}},
+ 'stale records removed incorrectly')
+
+ s = Storage(maxAge=0)
+ s._data = {object: {key: [value, timestamp, 2]},
+ object2: {key: [value, timestamp-90, 0]}}
+ d = s._data.copy()
+ s.removeStaleEntries()
+ self.assertEqual(s._data, d, 'records removed when maxAge == 0')
+
+ def test_locking(self):
+ s = Storage(maxAge=100)
+ s.writelock.acquire()
+ try:
+ self.assert_(s.writelock.locked(), "locks don't work")
+ finally:
+ s.writelock.release()
+
+ def test_removeLeastAccessed(self):
+ s = Storage(maxEntries=3)
+ object = 'object'
+ object2 = 'object2'
+ key1 = ('view1', (), ('answer', 42))
+ key2 = ('view2', (), ('answer', 42))
+ key3 = ('view3', (), ('answer', 42))
+ value = 'yes'
+ timestamp = time()
+ s._data = {object: {key1: [value, 1, 10],
+ key2: [value, 6, 5],
+ key3: [value, 2, 2]},
+ object2: {key1: [value, 5, 2],
+ key2: [value, 3, 1],
+ key3: [value, 4, 1]}}
+ s.removeLeastAccessed()
+ self.assertEqual(s._data,
+ {object: {key1: [value, 1, 0],
+ key2: [value, 6, 0]}},
+ 'least records removed incorrectly')
+
+ s = Storage(maxEntries=6)
+ s._data = {object: {key1: [value, timestamp, 10],
+ key2: [value, timestamp, 5],
+ key3: [value, timestamp, 2]},
+ object2: {key1: [value, timestamp, 2],
+ key2: [value, timestamp, 1],
+ key3: [value, timestamp, 1]}}
+ c = s._data.copy()
+ s.removeLeastAccessed()
+ self.assertEqual(s._data, c, "modified list even though len < max")
+
+ def test__clearAccessCounters(self):
+ s = Storage(maxEntries=3)
+ object = 'object'
+ object2 = 'object2'
+ key1 = ('view1', (), ('answer', 42))
+ key2 = ('view2', (), ('answer', 42))
+ key3 = ('view3', (), ('answer', 42))
+ value = 'yes'
+ timestamp = time()
+ s._data = {object: {key1: [value, 1, 10],
+ key2: [value, 2, 5],
+ key3: [value, 3, 2]},
+ object2: {key1: [value, 4, 2],
+ key2: [value, 5, 1],
+ key3: [value, 6, 1]}}
+ s._misses = {object: 4, object2: 2}
+
+ cleared = {object: {key1: [value, 1, 0],
+ key2: [value, 2, 0],
+ key3: [value, 3, 0]},
+ object2: {key1: [value, 4, 0],
+ key2: [value, 5, 0],
+ key3: [value, 6, 0]}}
+ clearMisses = {object: 0, object2: 0}
+
+ s._clearAccessCounters()
+ self.assertEqual(s._data, cleared, "access counters not cleared")
+ self.assertEqual(s._misses, clearMisses, "misses counter not cleared")
+
+ def test_getStatistics(self):
+ from cPickle import dumps
+ s = Storage(maxEntries=3)
+ object = 'object'
+ object2 = 'object2'
+ key1 = ('view1', (), ('answer', 42))
+ key2 = ('view2', (), ('answer', 42))
+ key3 = ('view3', (), ('answer', 42))
+ value = 'yes'
+ timestamp = time()
+ s._data = {object: {key1: [value, 1, 10],
+ key2: [value, 2, 5],
+ key3: [value, 3, 2]},
+ object2: {key1: [value, 4, 2],
+ key2: [value, 5, 1],
+ key3: [value, 6, 1]}}
+ s._misses = {object: 11, object2: 42}
+ len1 = len(dumps(s._data[object]))
+ len2 = len(dumps(s._data[object2]))
+
+ expected = ({'path': object,
+ 'hits': 17,
+ 'misses': 11,
+ 'size': len1,
+ 'entries': 3
+ },
+ {'path': object2,
+ 'hits': 4,
+ 'misses': 42,
+ 'size': len2,
+ 'entries': 3
+ },
+ )
+
+ result = s.getStatistics()
+ self.assertEqual(result, expected)
+
+
+class TestModule(TestCase):
+
+ def test_locking(self):
+ from zope.ramcache.ram import writelock
+ writelock.acquire()
+ try:
+ self.failUnless(writelock.locked(), "locks don't work")
+ finally:
+ writelock.release()
+
+
+class Test(TestCase):
+ pass
+
+def test_suite():
+ return TestSuite((
+ makeSuite(TestRAMCache),
+ makeSuite(TestStorage),
+ makeSuite(TestModule),
+ ))
+
+if __name__=='__main__':
+ main(defaultTest='test_suite')
More information about the Checkins
mailing list