[Checkins] SVN: zope.fssync/trunk/s
Uwe Oestermeier
u.oestermeier at iwm-kmrc.de
Wed Jun 13 11:24:11 EDT 2007
Log message for revision 76667:
Changed:
U zope.fssync/trunk/setup.py
A zope.fssync/trunk/src/zope/fssync/README.txt
A zope.fssync/trunk/src/zope/fssync/caseinsensitivity.txt
D zope.fssync/trunk/src/zope/fssync/command.py
U zope.fssync/trunk/src/zope/fssync/copier.py
U zope.fssync/trunk/src/zope/fssync/fsmerger.py
D zope.fssync/trunk/src/zope/fssync/fssync.py
U zope.fssync/trunk/src/zope/fssync/fsutil.py
A zope.fssync/trunk/src/zope/fssync/interfaces.py
D zope.fssync/trunk/src/zope/fssync/main.py
U zope.fssync/trunk/src/zope/fssync/merger.py
U zope.fssync/trunk/src/zope/fssync/metadata.py
D zope.fssync/trunk/src/zope/fssync/passwd.py
A zope.fssync/trunk/src/zope/fssync/pickle.py
A zope.fssync/trunk/src/zope/fssync/repository.py
D zope.fssync/trunk/src/zope/fssync/server/
U zope.fssync/trunk/src/zope/fssync/snarf.py
A zope.fssync/trunk/src/zope/fssync/synchronizer.py
A zope.fssync/trunk/src/zope/fssync/task.py
U zope.fssync/trunk/src/zope/fssync/tests/mockmetadata.py
D zope.fssync/trunk/src/zope/fssync/tests/test_command.py
A zope.fssync/trunk/src/zope/fssync/tests/test_docs.py
D zope.fssync/trunk/src/zope/fssync/tests/test_network.py
D zope.fssync/trunk/src/zope/fssync/tests/test_passwd.py
A zope.fssync/trunk/src/zope/fssync/tests/test_task.py
-=-
Modified: zope.fssync/trunk/setup.py
===================================================================
--- zope.fssync/trunk/setup.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/setup.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -37,6 +37,7 @@
install_requires=['setuptools',
'zope.interface',
'zope.proxy',
+ 'zope.traversing',
'zope.xmlpickle'],
include_package_data = True,
Added: zope.fssync/trunk/src/zope/fssync/README.txt
===================================================================
--- zope.fssync/trunk/src/zope/fssync/README.txt (rev 0)
+++ zope.fssync/trunk/src/zope/fssync/README.txt 2007-06-13 15:24:10 UTC (rev 76667)
@@ -0,0 +1,828 @@
+==========================
+Filesystem Synchronization
+==========================
+
+This package provides an API for the synchronization of Python objects
+with a serialized filesystem representation. This API does not adress
+security issues. (See zope.app.fssync for a protected web-based API).
+This API is Zope and ZODB independent.
+
+The main use cases are
+
+ - data export / import (e.g. moving data from one place to another)
+
+ - content management (e.g. managing a wiki or other collections of
+ documents offline)
+
+The target representation depends on your use case. In the use case of
+data export/import, for instance, it is crucial that all data are
+exported as completely as possible. Since the data need not be read
+by humans in most circumstances a pickle format may be the most
+complete and easy one to use.
+In the use case of content management it may be more important that
+all metadata are readable by humans. In this case a another format,
+e.g. RDFa, may be more appropriate.
+
+Main components
+===============
+
+A synchronizer serializes content objects and stores the serialized
+data in a repository in an application specific format. It uses
+deserializers to read the object back into the content space.
+The serialization format must be rich enough to preserve various forms
+of references which should be reestablished on deserialization.
+
+All these components should be replaceable. Application may use
+different serialization formats with different references for
+different purposes (e.g. backup vs. content management) and different
+target systems (e.g. a zip archive vs. a svn repository).
+
+The main components are:
+
+ - ISyncTasks like Checkout, Check, and Commit which synchronize
+ a content space with a repository. These tasks uses serializers to
+ produce serialized data for a repository in an application
+ specific format. They use deserializers to read the data back.
+ The default implementation uses xmlpickle for python objects,
+ data streams for file contents, and special directories for extras
+ and metadata. Alternative implementations may be use standard pickle,
+ a human readable format like RDFa, or application specific formats.
+
+ - ISynchronizer: Synchronizer produce serialized pieces of a
+ Python object (the ISerializer part of a synchronizer) and
+ consume serialized data to (re-)create Python objects (the
+ IDeserializer part of a synchronizer).
+
+ - IPickler: An adapter that determines the pickle format.
+
+ - IRepository: represents a target system that can be used
+ to read and write serialized data.
+
+
+Let's take some samples:
+
+ >>> from StringIO import StringIO
+ >>> from zope import interface
+ >>> from zope import component
+ >>> from zope.fssync import interfaces
+ >>> from zope.fssync import task
+ >>> from zope.fssync import synchronizer
+ >>> from zope.fssync import repository
+ >>> from zope.fssync import pickle
+
+ >>> class A(object):
+ ... data = 'data of a'
+ >>> class B(A):
+ ... pass
+ >>> a = A()
+ >>> b = B()
+ >>> b.data = 'data of b'
+ >>> b.extra = 'extra of b'
+ >>> root = dict(a=a, b=b)
+
+
+Persistent References
+=====================
+
+Many applications use more than one system of persistent references.
+Zope, for instance, uses p_oids, int ids, key references,
+traversal paths, dotted names, named utilities, etc.
+
+Other systems might use generic reference systems like global unique
+ids or primary keys together with domain specific references, like
+emails, URI, postal addresses, code numbers, etc.
+All these references are candidates for exportable references as long
+as they can be resolved on import or reimport.
+
+In our example we use simple integer ids:
+
+ >>> class GlobalIds(object):
+ ... ids = dict()
+ ... count = 0
+ ... def getId(self, obj):
+ ... for k, v in self.ids.iteritems():
+ ... if obj == v:
+ ... return k
+ ... def register(self, obj):
+ ... uid = self.getId(obj)
+ ... if uid is not None:
+ ... return uid
+ ... self.count += 1
+ ... self.ids[self.count] = obj
+ ... return self.count
+ ... def resolve(self, uid):
+ ... return self.ids.get(int(uid), None)
+
+ >>> globalIds = GlobalIds()
+ >>> globalIds.register(a)
+ 1
+ >>> globalIds.register(b)
+ 2
+ >>> globalIds.register(root)
+ 3
+
+In our example we use the int ids as a substitute for the default path
+references which are the most common references in Zope.
+
+In our examples we use a SnarfRepository which can easily be examined:
+
+>>> snarf = repository.SnarfRepository(StringIO())
+>>> checkout = task.Checkout(synchronizer.getSynchronizer, snarf)
+
+
+Entry Ids
+=========
+
+Persistent ids are also used in the metadata files of fssync.
+The references are generated by an IEntryId adapter which must
+have a string representation in order to be saveable in a text file.
+Typically these object ids correspond to the persistent pickle ids, but
+this is not necessarily the case.
+
+Since we do not have paths we use our integer ids:
+
+ >>> @component.adapter(interface.Interface)
+ ... @interface.implementer(interfaces.IEntryId)
+ ... def entryId(obj):
+ ... global globalIds
+ ... return globalIds.getId(obj)
+ >>> component.provideAdapter(entryId)
+
+
+Synchronizer
+============
+
+In the use case of data export / import it is crucial that fssync is able
+to serialize "all" object data. Note that it isn't always obvious what data is
+intrinsic to an object. Therefore we must provide special serialization /
+de-serialization tools which take care of writing and reading "all"
+data. An obvious solution would be to use inheriting synchronization
+adapters. But this solution bears a risk. If someone created a subclass
+and forgot to create an adapter, then their data would be serialized
+incompletely.
+
+A better solution is to provide class based adapters for special object
+types and a default serializer which tries to capture the forgotten
+serialization specifications of subclasses. We register the adapter classes
+as named ISynchronizerFactory utilities and use the dotted name
+of the class as lookup key.
+
+The default synchronizer is registered as a unnamed ISynchronizerFactory
+utility. This synchronizer ensures that all data are pickled to the
+target repository.
+
+ >>> component.provideUtility(synchronizer.DefaultSynchronizer,
+ ... provides=interfaces.ISynchronizerFactory)
+
+All special synchronizers are registered for a specific content class and
+not an abstract interface. The class is represented by the dotted class
+name in the factory registration:
+
+ >>> class AFileSynchronizer(synchronizer.Synchronizer):
+ ... interface.implements(interfaces.IFileSynchronizer)
+ ... def dump(self, writeable):
+ ... writeable.write(self.context.data)
+ ... def load(self, readable):
+ ... self.context.data = readable.read()
+
+ >>> component.provideUtility(AFileSynchronizer,
+ ... interfaces.ISynchronizerFactory,
+ ... name=synchronizer.dottedname(A))
+
+The lookup of the utilities by the dotted class name is handled
+by the getSynchronizer function, which first tries to find
+a named utility. The IDefaultSynchronizer utility is used as a fallback:
+
+ >>> synchronizer.getSynchronizer(a)
+ <zope.fssync.doctest.AFileSynchronizer object at ...>
+
+If no named adapter is registered it returns the registered unnamed default
+adapter (as long as the permissions allow this):
+
+ >>> synchronizer.getSynchronizer(b)
+ <zope.fssync.synchronizer.DefaultSynchronizer object at ...>
+
+This default serializer typically uses a pickle format, which is determined
+by the IPickler adapter. Here we use Zope's xmlpickle.
+
+ >>> component.provideAdapter(pickle.XMLPickler)
+ >>> component.provideAdapter(pickle.XMLUnpickler)
+
+For container like objects we must provide an adapter that maps the
+container to a directory. In our example we use the buildin dict class:
+
+ >>> component.provideUtility(synchronizer.DirectorySynchronizer,
+ ... interfaces.ISynchronizerFactory,
+ ... name=synchronizer.dottedname(dict))
+
+
+Now we can export the object to the snarf archive:
+
+ >>> checkout.perform(root, 'test')
+ >>> print snarf.stream.getvalue()
+ 00000213 @@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="test"
+ keytype="__builtin__.str"
+ type="__builtin__.dict"
+ factory="__builtin__.dict"
+ id="3"
+ />
+ </entries>
+ 00000339 test/@@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="a"
+ keytype="__builtin__.str"
+ type="zope.fssync.doctest.A"
+ factory="zope.fssync.doctest.A"
+ id="1"
+ />
+ <entry name="b"
+ keytype="__builtin__.str"
+ type="zope.fssync.doctest.B"
+ id="2"
+ />
+ </entries>
+ 00000009 test/a
+ data of a00000370 test/b
+ <?xml version="1.0" encoding="utf-8" ?>
+ <pickle>
+ <object>
+ <klass>
+ <global name="B" module="zope.fssync.doctest"/>
+ </klass>
+ <attributes>
+ <attribute name="data">
+ <string>data of b</string>
+ </attribute>
+ <attribute name="extra">
+ <string>extra of b</string>
+ </attribute>
+ </attributes>
+ </object>
+ </pickle>
+ <BLANKLINE>
+
+After the registration of the necessary generators we can reimport the serialized
+data from the repository:
+
+ >>> component.provideUtility(synchronizer.FileGenerator(),
+ ... provides=interfaces.IFileGenerator)
+
+ >>> target = {}
+ >>> commit = task.Commit(synchronizer.getSynchronizer, snarf)
+ >>> commit.perform(target, 'root', 'test')
+ >>> sorted(target.keys())
+ ['root']
+ >>> sorted(target['root'].keys())
+ ['a', 'b']
+
+ >>> target['root']['a'].data
+ 'data of a'
+
+ >>> target['root']['b'].extra
+ 'extra of b'
+
+If we want to commit the data back into the original place we must check
+whether the repository is still consistent with the original content.
+We modify the objects in place to see what happens:
+
+ >>> check = task.Check(synchronizer.getSynchronizer, snarf)
+ >>> check.check(root, '', 'test')
+ >>> check.errors()
+ []
+
+ >>> root['a'].data = 'overwritten'
+ >>> root['b'].extra = 'overwritten'
+
+ >>> check = task.Check(synchronizer.getSynchronizer, snarf)
+ >>> check.check(root, '', 'test')
+ >>> check.errors()
+ ['test/a', 'test/b']
+
+ >>> commit.perform(root, '', 'test')
+ >>> sorted(root.keys())
+ ['a', 'b']
+ >>> root['a'].data
+ 'data of a'
+ >>> root['b'].extra
+ 'extra of b'
+
+ >>> del root['a']
+ >>> commit.perform(root, '', 'test')
+ >>> sorted(root.keys())
+ ['a', 'b']
+
+ >>> del root['b']
+ >>> commit.perform(root, '', 'test')
+ >>> sorted(root.keys())
+ ['a', 'b']
+
+ >>> del root['a']
+ >>> del root['b']
+ >>> commit.perform(root, '', 'test')
+ >>> sorted(root.keys())
+ ['a', 'b']
+
+
+Pickling
+========
+
+In many data structures, large, complex objects are composed of smaller objects.
+These objects are typically stored in one of two ways:
+
+ 1. The smaller objects are stored inside the larger object.
+
+ 2. The smaller objects are allocated in their own location,
+ and the larger object stores references to them.
+
+In case 1 the object is self-contained and can be pickled completely. This is the
+default behavior of the fssync pickler:
+
+ >>> pickler = interfaces.IPickler([42])
+ >>> pickler
+ <zope.fssync.pickle.XMLPickler object at ...>
+ >>> print pickler.dumps()
+ <?xml version="1.0" encoding="utf-8" ?>
+ <pickle>
+ <list>
+ <int>42</int>
+ </list>
+ </pickle>
+ <BLANKLINE>
+
+Case 2 is more complex since the pickler has to take persistent
+references into account.
+
+ >>> class Complex(object):
+ ... def __init__(self, part1, part2):
+ ... self.part1 = part1
+ ... self.part2 = part2
+
+Everthing here depends on the definition of what we consider to be an intrinsic
+reference. In the examples above we simply considered all objects as intrinsic.
+
+ >>> from zope.fssync import pickle
+ >>> c = root['c'] = Complex(a, b)
+ >>> stream = StringIO()
+ >>> print interfaces.IPickler(c).dumps()
+ <?xml version="1.0" encoding="utf-8" ?>
+ <pickle>
+ <initialized_object>
+ <klass>
+ <global id="o0" name="_reconstructor" module="copy_reg"/>
+ </klass>
+ <arguments>
+ <tuple>
+ <global name="Complex" module="zope.fssync.doctest"/>
+ <global id="o1" name="object" module="__builtin__"/>
+ <none/>
+ </tuple>
+ </arguments>
+ <state>
+ <dictionary>
+ <item key="part1">
+ <object>
+ <klass>
+ <global name="A" module="zope.fssync.doctest"/>
+ </klass>
+ <attributes>
+ <attribute name="data">
+ <string>data of a</string>
+ </attribute>
+ </attributes>
+ </object>
+ </item>
+ <item key="part2">
+ <object>
+ <klass>
+ <global name="B" module="zope.fssync.doctest"/>
+ </klass>
+ <attributes>
+ <attribute name="data">
+ <string>data of b</string>
+ </attribute>
+ <attribute name="extra">
+ <string>overwritten</string>
+ </attribute>
+ </attributes>
+ </object>
+ </item>
+ </dictionary>
+ </state>
+ </initialized_object>
+ </pickle>
+ <BLANKLINE>
+
+In order to use persistent references we must define a
+PersistentIdGenerator for our pickler, which determines whether
+an object should be pickled completely or only by reference:
+
+ >>> class PersistentIdGenerator(object):
+ ... interface.implements(interfaces.IPersistentIdGenerator)
+ ... component.adapts(interfaces.IPickler)
+ ... def __init__(self, pickler):
+ ... self.pickler = pickler
+ ... def id(self, obj):
+ ... if isinstance(obj, Complex):
+ ... return None
+ ... return globalIds.getId(obj)
+
+ >>> component.provideAdapter(PersistentIdGenerator)
+
+ >>> globalIds.register(a)
+ 1
+ >>> globalIds.register(b)
+ 2
+ >>> globalIds.register(root)
+ 3
+
+ >>> xml = interfaces.IPickler(c).dumps()
+ >>> print xml
+ <?xml version="1.0" encoding="utf-8" ?>
+ <pickle>
+ <object>
+ <klass>
+ <global name="Complex" module="zope.fssync.doctest"/>
+ </klass>
+ <attributes>
+ <attribute name="part1">
+ <persistent> <string>1</string> </persistent>
+ </attribute>
+ <attribute name="part2">
+ <persistent> <string>2</string> </persistent>
+ </attribute>
+ </attributes>
+ </object>
+ </pickle>
+ <BLANKLINE>
+
+The persistent ids can be loaded if we define and register
+a IPersistentIdLoader adapter first:
+
+ >>> class PersistentIdLoader(object):
+ ... interface.implements(interfaces.IPersistentIdLoader)
+ ... component.adapts(interfaces.IUnpickler)
+ ... def __init__(self, unpickler):
+ ... self.unpickler = unpickler
+ ... def load(self, id):
+ ... global globalIds
+ ... return globalIds.resolve(id)
+
+ >>> component.provideAdapter(PersistentIdLoader)
+ >>> c2 = interfaces.IUnpickler(None).loads(xml)
+ >>> c2.part1 == a
+ True
+
+
+Annotations, Extras, and Metadata
+=================================
+
+Complex objects often combine metadata and content data in various ways.
+The fssync package allows to distinguish between file content, extras,
+annotations, and fssync specific metadata:
+
+ - The file content or body is directly stored in a corresponding
+ file.
+ - The extras are object attributes which are part of the object but not
+ part of the file content. They are typically store in extra files.
+ - Annotations are content related metadata which can be stored as
+ attribute annotations or outside the object itself. They are typically
+ stored in seperate pickles for each annotation namespace.
+ - Metadata directly related to fssync are stored in Entries.xml
+ files.
+
+Where exactly these aspects are stored is defined in the
+synchronization format. The default format uses a @@Zope directory with
+subdirectories for object extras and annotations. These @@Zope directories
+also contain an Entries.xml metadata file which defines the following
+attributes:
+
+ - id: the system id of the object, in Zope typically a traversal path
+ - name: the filename of the serialized object
+ - factory: the factory of the object, typically a dotted name of a class
+ - type: a type identifier for pickled objects without factory
+ - provides: directly provided interfaces of the object
+ - key: the original name in the content space which is used
+ in cases where the repository is not able to store this key
+ unambigously
+ - binary: a flag that prevents merging of binary data
+ - flag: a status flag with the values 'added' or 'removed'
+
+In part the metadata have to be delivered by the synchronizer. The base
+synchronizer, for instance, returns the directly provided interfaces
+of an object as part of it's metadata:
+
+ >>> class IMarkerInterface(interface.Interface):
+ ... pass
+ >>> interface.directlyProvides(a, IMarkerInterface)
+ >>> pprint(synchronizer.Synchronizer(a).metadata())
+ {'factory': 'zope.fssync.doctest.A',
+ 'provides': 'zope.fssync.doctest.IMarkerInterface'}
+
+The setmetadata method can be used to write metadata
+back to an object. Which metadata are consumed is up to the
+synchronizer:
+
+ >>> metadata = {'provides': 'zope.fssync.doctest.IMarkerInterface'}
+ >>> synchronizer.Synchronizer(b).setmetadata(metadata)
+ >>> [x for x in interface.directlyProvidedBy(b)]
+ [<InterfaceClass zope.fssync.doctest.IMarkerInterface>]
+
+In order to serialize annotations we must first provide a
+ISynchronizableAnnotations adapter:
+
+ >>> snarf = repository.SnarfRepository(StringIO())
+ >>> checkout = task.Checkout(synchronizer.getSynchronizer, snarf)
+
+ >>> from zope import annotation
+ >>> from zope.annotation.attribute import AttributeAnnotations
+ >>> component.provideAdapter(AttributeAnnotations)
+ >>> class IAnnotatableSample(interface.Interface):
+ ... pass
+ >>> class AnnotatableSample(object):
+ ... interface.implements(IAnnotatableSample,
+ ... annotation.interfaces.IAttributeAnnotatable)
+ ... data = 'Main file content'
+ ... extra = None
+ >>> sample = AnnotatableSample()
+
+ >>> class ITestAnnotations(interface.Interface):
+ ... a = interface.Attribute('A')
+ ... b = interface.Attribute('B')
+ >>> import persistent
+ >>> class TestAnnotations(persistent.Persistent):
+ ... interface.implements(ITestAnnotations,
+ ... annotation.interfaces.IAnnotations)
+ ... component.adapts(IAnnotatableSample)
+ ... def __init__(self):
+ ... self.a = None
+ ... self.b = None
+
+ >>> component.provideAdapter(synchronizer.SynchronizableAnnotations)
+
+
+
+ >>> from zope.annotation.factory import factory
+ >>> component.provideAdapter(factory(TestAnnotations))
+ >>> ITestAnnotations(sample).a = 'annotation a'
+ >>> ITestAnnotations(sample).a
+ 'annotation a'
+ >>> sample.extra = 'extra'
+
+Without a special serializer the annotations are pickled since
+the annotations are stored in the __annotions__ attribute:
+
+ >>> root = dict()
+ >>> root['test'] = sample
+ >>> checkout.perform(root, 'test')
+ >>> print snarf.stream.getvalue()
+ 00000197 @@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="test"
+ keytype="__builtin__.str"
+ type="__builtin__.dict"
+ factory="__builtin__.dict"
+ />
+ </entries>
+ 00000182 test/@@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="test"
+ keytype="__builtin__.str"
+ type="zope.fssync.doctest.AnnotatableSample"
+ />
+ </entries>
+ 00001929 test/test
+ <?xml version="1.0" encoding="utf-8" ?>
+ <pickle>
+ <object>
+ <klass>
+ <global name="AnnotatableSample" module="zope.fssync.doctest"/>
+ </klass>
+ ...
+ </attributes>
+ </object>
+ </pickle>
+ <BLANKLINE>
+
+If we provide a directory serializer for annotations and extras we get a a file for
+each extra attribute and annotation namespace.
+
+ >>> component.provideUtility(
+ ... synchronizer.DirectorySynchronizer,
+ ... interfaces.ISynchronizerFactory,
+ ... name=synchronizer.dottedname(synchronizer.Extras))
+
+ >>> component.provideUtility(
+ ... synchronizer.DirectorySynchronizer,
+ ... interfaces.ISynchronizerFactory,
+ ... name=synchronizer.dottedname(synchronizer.SynchronizableAnnotations))
+
+Since the annotations are already handled by the Synchronizer base class
+we only need to specify the extra attribute here:
+
+ >>> class SampleFileSynchronizer(synchronizer.Synchronizer):
+ ... interface.implements(interfaces.IFileSynchronizer)
+ ... def dump(self, writeable):
+ ... writeable.write(self.context.data)
+ ... def extras(self):
+ ... return synchronizer.Extras(extra=self.context.extra)
+ ... def load(self, readable):
+ ... self.context.data = readable.read()
+ >>> component.provideUtility(SampleFileSynchronizer,
+ ... interfaces.ISynchronizerFactory,
+ ... name=synchronizer.dottedname(AnnotatableSample))
+
+ >>> interface.directlyProvides(sample, IMarkerInterface)
+ >>> root['test'] = sample
+ >>> checkout.perform(root, 'test')
+ >>> print snarf.stream.getvalue()
+ 00000197 @@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="test"
+ keytype="__builtin__.str"
+ type="__builtin__.dict"
+ factory="__builtin__.dict"
+ />
+ </entries>
+ 00000182 test/@@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="test"
+ keytype="__builtin__.str"
+ type="zope.fssync.doctest.AnnotatableSample"
+ />
+ </entries>
+ 00001929 test/test
+ <?xml version="1.0" encoding="utf-8" ?>
+ <pickle>
+ <object>
+ <klass>
+ <global name="AnnotatableSample" module="zope.fssync.doctest"/>
+ </klass>
+ <attributes>
+ <attribute name="__annotations__">
+ <initialized_object>
+ <klass>
+ <global id="o0" name="__newobj__" module="copy_reg"/>
+ </klass>
+ <arguments>
+ <tuple>
+ <global name="OOBTree" module="BTrees.OOBTree"/>
+ </tuple>
+ </arguments>
+ <state>
+ <tuple>
+ <tuple>
+ <tuple>
+ <tuple>
+ <string>zope.fssync.doctest.TestAnnotations</string>
+ <initialized_object>
+ <klass> <reference id="o0"/> </klass>
+ <arguments>
+ <tuple>
+ <global name="TestAnnotations" module="zope.fssync.doctest"/>
+ </tuple>
+ </arguments>
+ <state>
+ <dictionary>
+ <item>
+ <key> <string>a</string> </key>
+ <value> <string>annotation a</string> </value>
+ </item>
+ <item>
+ <key> <string>b</string> </key>
+ <value> <none/> </value>
+ </item>
+ </dictionary>
+ </state>
+ </initialized_object>
+ </tuple>
+ </tuple>
+ </tuple>
+ </tuple>
+ </state>
+ </initialized_object>
+ </attribute>
+ <attribute name="extra">
+ <string>extra</string>
+ </attribute>
+ </attributes>
+ </object>
+ </pickle>
+ 00000197 @@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="test"
+ keytype="__builtin__.str"
+ type="__builtin__.dict"
+ factory="__builtin__.dict"
+ />
+ </entries>
+ 00000296 test/@@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="test"
+ keytype="__builtin__.str"
+ type="zope.fssync.doctest.AnnotatableSample"
+ factory="zope.fssync.doctest.AnnotatableSample"
+ provides="zope.fssync.doctest.IMarkerInterface"
+ />
+ </entries>
+ 00000211 test/@@Zope/Annotations/test/@@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="zope.fssync.doctest.TestAnnotations"
+ keytype="__builtin__.str"
+ type="zope.fssync.doctest.TestAnnotations"
+ />
+ </entries>
+ 00000617 test/@@Zope/Annotations/test/zope.fssync.doctest.TestAnnotations
+ <?xml version="1.0" encoding="utf-8" ?>
+ <pickle>
+ ...
+ </pickle>
+ 00000161 test/@@Zope/Extra/test/@@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="extra"
+ keytype="__builtin__.str"
+ type="__builtin__.str"
+ />
+ </entries>
+ 00000082 test/@@Zope/Extra/test/extra
+ <?xml version="1.0" encoding="utf-8" ?>
+ <pickle> <string>extra</string> </pickle>
+ 00000017 test/test
+ Main file content
+
+The annotations and extras can of course also be deserialized. The default
+deserializer handles both cases:
+
+ >>> target = {}
+ >>> commit = task.Commit(synchronizer.getSynchronizer, snarf)
+ >>> commit.perform(target, 'root', 'test')
+ >>> result = target['root']['test']
+ >>> result.extra
+ 'extra'
+ >>> ITestAnnotations(result).a
+ 'annotation a'
+
+Since we use an IDirectorySynchronizer each extra attribute and
+annotation namespace get's it's own file:
+
+ >>> for path in sorted(snarf.iterPaths()):
+ ... print path
+ @@Zope/Entries.xml
+ test/@@Zope/Annotations/test/@@Zope/Entries.xml
+ test/@@Zope/Annotations/test/zope.fssync.doctest.TestAnnotations
+ test/@@Zope/Entries.xml
+ test/@@Zope/Extra/test/@@Zope/Entries.xml
+ test/@@Zope/Extra/test/extra
+ test/test
+
+The number of files can be reduced if we provide the default synchronizer
+which uses a single file for all annotations and a single file for all extras:
+
+ >>> component.provideUtility(
+ ... synchronizer.DefaultSynchronizer,
+ ... interfaces.ISynchronizerFactory,
+ ... name=synchronizer.dottedname(synchronizer.Extras))
+
+ >>> component.provideUtility(
+ ... synchronizer.DefaultSynchronizer,
+ ... interfaces.ISynchronizerFactory,
+ ... name=synchronizer.dottedname(synchronizer.SynchronizableAnnotations))
+
+ >>> root['test'] = sample
+ >>> snarf = repository.SnarfRepository(StringIO())
+ >>> checkout.repository = snarf
+ >>> checkout.perform(root, 'test')
+ >>> for path in sorted(snarf.iterPaths()):
+ ... print path
+ @@Zope/Entries.xml
+ test/@@Zope/Annotations/test
+ test/@@Zope/Entries.xml
+ test/@@Zope/Extra/test
+ test/test
+
+The annotations and extras can of course also be deserialized. The default
+deserializer handles both
+
+ >>> target = {}
+ >>> commit = task.Commit(synchronizer.getSynchronizer, snarf)
+ >>> commit.perform(target, 'root', 'test')
+ >>> result = target['root']['test']
+ >>> result.extra
+ 'extra'
+ >>> ITestAnnotations(result).a
+ 'annotation a'
+ >>> [x for x in interface.directlyProvidedBy(result)]
+ [<InterfaceClass zope.fssync.doctest.IMarkerInterface>]
+
+
Added: zope.fssync/trunk/src/zope/fssync/caseinsensitivity.txt
===================================================================
--- zope.fssync/trunk/src/zope/fssync/caseinsensitivity.txt (rev 0)
+++ zope.fssync/trunk/src/zope/fssync/caseinsensitivity.txt 2007-06-13 15:24:10 UTC (rev 76667)
@@ -0,0 +1,252 @@
+============================
+Case-insensitive Filesystems
+============================
+
+Typical Zope objects have unicode names which are not case sensitive.
+If these names are used on case-insensitive filesystems it may happen
+that objects are overwritten silently. To avoid this risk one has to
+ensure that normalized filenames are used and ambiguities are resolved
+before the data are written to a filesystem.
+
+Let's look at a basic ambiguity:
+
+ >>> from StringIO import StringIO
+ >>> from zope import interface
+ >>> from zope import component
+ >>> from zope import location
+ >>> from zope import traversing
+ >>> from zope.fssync import interfaces
+ >>> from zope.fssync import task
+ >>> from zope.fssync import synchronizer
+ >>> from zope.fssync import repository
+ >>> from zope.fssync import pickle
+
+ >>> class Locatable(object):
+ ... interface.implements(location.interfaces.ILocation)
+ ... __name__ = __parent__ = None
+ >>> class File(Locatable):
+ ... data = ''
+ >>> class RootDirectory(Locatable, dict):
+ ... interface.implements(traversing.interfaces.IContainmentRoot)
+ >>> a = File()
+ >>> A = File()
+ >>> a.data = 'data of a'
+ >>> A.data = 'data of A'
+ >>> root = RootDirectory({u'a.txt': a, u'A.txt': A})
+ >>> root.__name__ = None
+ >>> root.__parent__ = None
+
+We must set up the names and parents and the necessary adapters to mimic
+Zope's behavior:
+
+ >>> from zope.location.traversing import LocationPhysicallyLocatable
+ >>> from zope.traversing.interfaces import IPhysicallyLocatable
+ >>> component.provideAdapter(LocationPhysicallyLocatable, None, IPhysicallyLocatable)
+
+ >>> a.__parent__ = root; a.__name__ = u'a.txt'
+ >>> A.__parent__ = root; A.__name__ = u'A.txt'
+ >>> traversing.api.getPath(a)
+ u'/a.txt'
+ >>> traversing.api.getPath(A)
+ u'/A.txt'
+
+The default entry ids are also paths:
+
+ >>> component.provideAdapter(task.EntryId)
+
+We must register the serializers:
+
+ >>> class FileSynchronizer(synchronizer.Synchronizer):
+ ... interface.implements(interfaces.IFileSynchronizer,
+ ... interfaces.IFileDeserializer)
+ ... def dump(self, writeable):
+ ... writeable.write(self.context.data)
+ ... def load(self, readable):
+ ... self.context.data = readable.read()
+
+ >>> component.provideUtility(FileSynchronizer,
+ ... interfaces.ISynchronizerFactory,
+ ... name=synchronizer.dottedname(File))
+ >>> component.provideUtility(synchronizer.DirectorySynchronizer,
+ ... interfaces.ISynchronizerFactory,
+ ... name=synchronizer.dottedname(RootDirectory))
+
+ >>> component.provideAdapter(pickle.PathPersistentIdGenerator)
+
+A SnarfRepository is case sensitive by default and preserves the original
+names and paths:
+
+ >>> snarf = repository.SnarfRepository(StringIO())
+ >>> checkout = task.Checkout(synchronizer.getSynchronizer, snarf)
+ >>> checkout.perform(root, 'test')
+ >>> print snarf.stream.getvalue()
+ 00000247 @@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="test"
+ keytype="__builtin__.str"
+ type="zope.fssync.doctest.RootDirectory"
+ factory="zope.fssync.doctest.RootDirectory"
+ id="/"
+ />
+ </entries>
+ 00000418 test/@@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="A.txt"
+ keytype="__builtin__.unicode"
+ type="zope.fssync.doctest.File"
+ factory="zope.fssync.doctest.File"
+ id="/A.txt"
+ />
+ <entry name="a.txt"
+ keytype="__builtin__.unicode"
+ type="zope.fssync.doctest.File"
+ factory="zope.fssync.doctest.File"
+ id="/a.txt"
+ />
+ </entries>
+ 00000009 test/A.txt
+ data of A00000009 test/a.txt
+ data of a
+
+If we use a case insensitive SnarfRepository the filenames are disambiguated.
+Note that the reference paths in the entries metdata sections are still
+the same:
+
+ >>> snarf = repository.SnarfRepository(StringIO(), case_insensitive=True)
+ >>> checkout = task.Checkout(synchronizer.getSynchronizer, snarf)
+ >>> checkout.perform(root, 'test')
+ >>> print snarf.stream.getvalue()
+ 00000247 @@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="test"
+ keytype="__builtin__.str"
+ type="zope.fssync.doctest.RootDirectory"
+ factory="zope.fssync.doctest.RootDirectory"
+ id="/"
+ />
+ </entries>
+ 00000441 test/@@Zope/Entries.xml
+ <?xml version='1.0' encoding='utf-8'?>
+ <entries>
+ <entry name="A-1.txt"
+ keytype="__builtin__.unicode"
+ type="zope.fssync.doctest.File"
+ factory="zope.fssync.doctest.File"
+ key="A.txt"
+ id="/A.txt"
+ />
+ <entry name="a.txt"
+ keytype="__builtin__.unicode"
+ type="zope.fssync.doctest.File"
+ factory="zope.fssync.doctest.File"
+ id="/a.txt"
+ />
+ </entries>
+ 00000009 test/A-1.txt
+ data of A00000009 test/a.txt
+ data of a
+
+After the registration of the necessary deserializers we can reimport the serialized
+data from the repository:
+
+ >>> target = {}
+ >>> commit = task.Commit(synchronizer.getSynchronizer, snarf)
+ >>> commit.perform(target, 'root', 'test')
+ >>> sorted(target.keys())
+ ['root']
+ >>> sorted(target['root'].keys())
+ [u'A.txt', u'a.txt']
+
+ >>> target['root']['a.txt'].data
+ 'data of a'
+
+ >>> target['root']['A.txt'].data
+ 'data of A'
+
+
+Plattform Issues
+================
+
+Mac OS X can work with several filesystems. Some of them case-sensitive,
+some of them case-insensitive. The popular HFS+ can be configured to
+behave in a case-preserving, case-insensitive or case-sensitive manner.
+
+TODO: Unfortunally os.normcase cannot be used on OS X. This has still to be
+fixed.
+
+Another Darwin-specific problem is the special utf-8 encoding which is used by OS X.
+Linux and (most?) other Unix-like operating systems use the normalization
+form C (NFC) for UTF-8 encoding by default but do not enforce this.
+Darwin, the base of Macintosh OSX, enforces normalization form D (NFD),
+where a few characters (especially umlauts) are encoded in a different way.
+The NFD encoding basically says that an umlaut is encoded as a front vowel
+followed by a backspace and a diaresis. 'ä' for instance, is represented
+as '\xc3\xa4' in NFC and as 'a\xcc\x88' in NFD:
+
+ >>> nfd = u'a\u0308'
+ >>> nfd.encode('utf-8')
+ 'a\xcc\x88'
+
+ >>> nfc = u'\xe4'
+ >>> nfc.encode('utf-8')
+ '\xc3\xa4'
+
+Both can live together in Python dics or Zope containers:
+
+ >>> root = RootDirectory({nfd: a, nfc: A})
+ >>> a.__parent__ = root; a.__name__ = nfd
+ >>> A.__parent__ = root; A.__name__ = nfc
+ >>> sorted(root.keys())
+ [u'a\u0308', u'\xe4']
+
+Let's see how these are stored in a NFD enforced SNARF archive:
+
+ >>> snarf = repository.SnarfRepository(StringIO())
+ >>> snarf.enforce_nfd = True
+ >>> snarf.case_insensitive = True
+ >>> checkout = task.Checkout(synchronizer.getSynchronizer, snarf)
+ >>> checkout.perform(root, 'test')
+
+ >>> sorted(snarf.files.keys())
+ ['@@Zope/Entries.xml', 'test/@@Zope/Entries.xml', u'test/a\u0308', u'test/a\u0308-1']
+
+ >>> metadata = snarf.getMetadata()
+
+The first entry keeps it's original name:
+
+ >>> pprint(metadata.getentry(u'test/a\u0308'))
+ {u'factory': u'zope.fssync.doctest.File',
+ u'id': u'/a\u0308',
+ u'keytype': u'__builtin__.unicode',
+ u'type': u'zope.fssync.doctest.File'}
+
+The second entry is disambiguated from the first:
+
+ >>> pprint(metadata.getentry(u'test/a\u0308-1'))
+ {u'factory': u'zope.fssync.doctest.File',
+ u'id': u'/\xe4',
+ u'key': u'\xe4',
+ u'keytype': u'__builtin__.unicode',
+ u'type': u'zope.fssync.doctest.File'}
+
+
+Now we write the data back:
+
+ >>> target = {}
+ >>> commit = task.Commit(synchronizer.getSynchronizer, snarf)
+ >>> commit.debug = True
+ >>> commit.perform(target, 'root', 'test')
+
+
+ >>> sorted(target.keys())
+ ['root']
+
+ >>> target['root'][u'a\u0308'].data
+ 'data of a'
+
+ >>> target['root'][u'\xe4'].data
+ 'data of A'
+
Deleted: zope.fssync/trunk/src/zope/fssync/command.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/command.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/command.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -1,149 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Table-based program command dispatcher.
-
-This dispatcher supports a 'named command' dispatch similar to that
-found in the standard CVS and Subversion client applications.
-
-$Id$
-"""
-import getopt
-import os.path
-import sys
-
-
-from zope.fssync.fsutil import Error
-
-
-class Usage(Error):
- """Subclass for usage error (command-line syntax).
-
- You should return an exit status of 2 rather than 1 when catching this.
- """
-
-
-class Command(object):
-
- def __init__(self, name=None, usage=None):
- if name is None:
- name = os.path.basename(sys.argv[0])
- self.program = name
- if usage is None:
- import __main__
- usage = __main__.__doc__
- self.helptext = usage
- self.command_table = {}
- self.global_options = []
- self.local_options = []
- self.command = None
-
- def addCommand(self, name, function, short="", long="", aliases=""):
- names = [name] + aliases.split()
- cmdinfo = short, long.split(), function
- for n in names:
- assert n not in self.command_table
- self.command_table[n] = cmdinfo
-
- def main(self, args=None):
- try:
- self.realize()
- self.run()
-
- except Usage, msg:
- self.usage(sys.stderr, msg)
- self.usage(sys.stderr, 'for help use "%(program)s help"')
- return 2
-
- except Error, msg:
- self.usage(sys.stderr, msg)
- return 1
-
- except SystemExit:
- raise
-
- else:
- return None
-
- def realize(self, args=None):
- if "help" not in self.command_table:
- self.addCommand("help", self.help)
- short, long, func = self.command_table["help"]
- for alias in ("h", "?"):
- if alias not in self.command_table:
- self.addCommand(alias, func, short, " ".join(long))
- if args is None:
- args = sys.argv[1:]
- self.global_options, args = self.getopt("global",
- args, "h", ["help"],
- self.helptext)
- if not args:
- raise Usage("missing command argument")
- self.command = args.pop(0)
- if self.command not in self.command_table:
- raise Usage("unrecognized command")
- cmdinfo = self.command_table[self.command]
- short, long, func = cmdinfo
- short = "h" + short
- long = ["help"] + list(long)
- self.local_options, self.args = self.getopt(self.command,
- args, short, long,
- func.__doc__)
-
- def getopt(self, cmd, args, short, long, helptext):
- try:
- opts, args = getopt.getopt(args, short, long)
- except getopt.error, e:
- raise Usage("%s option error: %s", cmd, e)
- for opt, arg in opts:
- if opt in ("-h", "--help"):
- self.usage(sys.stdout, helptext)
- sys.exit()
- return opts, args
-
- def run(self):
- _, _, func = self.command_table[self.command]
- func(self.local_options, self.args)
-
- def usage(self, file, text):
- text = str(text)
- try:
- text = text % {"program": self.program}
- except:
- pass
- print >>file, text
-
- def help(self, opts, args):
- """%(program)s help [COMMAND ...]
-
- Display help text. If COMMAND is specified, help text about
- each named command is displayed, otherwise general help about
- using %(program)s is shown.
- """
- if not args:
- self.usage(sys.stdout, self.helptext)
- else:
- for cmd in args:
- if cmd not in self.command_table:
- print >>sys.stderr, "unknown command:", cmd
- first = True
- for cmd in args:
- cmdinfo = self.command_table.get(cmd)
- if cmdinfo is None:
- continue
- _, _, func = cmdinfo
- if first:
- first = False
- else:
- print
- self.usage(sys.stdout, func.__doc__)
Modified: zope.fssync/trunk/src/zope/fssync/copier.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/copier.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/copier.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -56,7 +56,7 @@
def listDirectory(self, dir):
return [fn
- for fn in fsutil.listdir(dir)
+ for fn in os.listdir(dir)
if fn != "@@Zope"
if not self.sync.fsmerger.ignore(fn)]
Modified: zope.fssync/trunk/src/zope/fssync/fsmerger.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/fsmerger.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/fsmerger.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -51,11 +51,25 @@
(isdir(remote) or not exists(remote))):
self.merge_dirs(local, remote)
else:
- # One is a file, the other is a directory
- # TODO: We should be able to deal with this case, too
- self.reporter("TODO: %s" % local)
- # TODO: probably for the best; we *don't* know the right
- # thing to do anyway
+ # One is a file, the other is a directory.
+ if self.local_modifications(local):
+ # We have local modifications, so we cannot replace
+ # the local object without loss.
+ # SVN reports a failure here and so do we
+ self.reporter("C %s" % local)
+ return
+ # Since this is more
+ # like a conflict that should be resolved one could
+ # also make a backup and report a warning.
+
+ # self.backup(local)
+ # self.reporter("B %s" % local)
+ else:
+ # If we have no local modifications we
+ # simply replace the local object with it's
+ # remote counterpart.
+ self.remove(local)
+ self.merge(local, remote)
return
flag = self.metadata.getentry(local).get("flag")
self.merge_extra(local, remote, flag)
@@ -65,6 +79,51 @@
self.remove_special(local, "Annotations")
self.remove_special(local, "Original")
+ def local_modifications(self, local):
+ """Helper to check for local modifications."""
+ lentry = self.metadata.getentry(local)
+ flag = lentry.get("flag")
+ if flag == 'added':
+ return True
+ if isdir(local):
+ locals = [join(local, name)
+ for name in os.listdir(local)]
+ for path in locals:
+ if self.local_modifications(path):
+ return True
+ return False
+ else:
+ original = fsutil.getoriginal(local)
+ if not exists(original):
+ return True
+ return not self.merger.cmpfile(local, original)
+
+ def backup(self, local):
+ """Helper to preserve unmergeable local files."""
+ appendix = '.OLD'
+ target = local + appendix
+ count = 0
+ while exists(target):
+ count += 1
+ target = "%s%s%s" % (local, appendix, count)
+ shutil.move(local, target)
+
+ def remove(self, local):
+ """Helper to remove a local file or directory."""
+ if isdir(local):
+ try:
+ shutil.rmtree(local)
+ self.reportdir("D", local)
+ except os.error:
+ self.reportdir("?", local)
+ else:
+ try:
+ os.remove(local)
+ self.remove_special(local, "Original")
+ self.reporter("D %s" % local)
+ except:
+ self.reporter("? %s" % local)
+
def merge_extra(self, local, remote, flag):
"""Helper to merge the Extra trees."""
lextra = fsutil.getextra(local)
@@ -181,7 +240,7 @@
return
lnames = dict([(normcase(name), name)
- for name in fsutil.listdir(localdir)])
+ for name in os.listdir(localdir)])
else:
if flag == "removed":
self.reportdir("R", localdir)
@@ -198,7 +257,7 @@
if exists(remotedir):
rnames = dict([(normcase(name), name)
- for name in fsutil.listdir(remotedir)])
+ for name in os.listdir(remotedir)])
else:
rnames = {}
Deleted: zope.fssync/trunk/src/zope/fssync/fssync.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/fssync.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/fssync.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -1,760 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Highest-level classes to support filesystem synchronization:
-
-class Network -- handle network connection
-class FSSync -- implement various commands (checkout, commit etc.)
-
-$Id$
-"""
-
-import os
-import sys
-import shutil
-import urllib
-import filecmp
-import htmllib
-import httplib
-import tempfile
-import urlparse
-import formatter
-
-from StringIO import StringIO
-
-from os.path import exists, isfile, isdir
-from os.path import dirname, basename, split, join
-from os.path import realpath, normcase, normpath
-
-from zope.fssync.metadata import Metadata, dump_entries
-from zope.fssync.fsmerger import FSMerger
-from zope.fssync.fsutil import Error
-from zope.fssync import fsutil
-from zope.fssync.passwd import PasswordManager
-from zope.fssync.snarf import Snarfer, Unsnarfer
-
-
-if sys.platform[:3].lower() == "win":
- DEV_NULL = r".\nul"
-else:
- DEV_NULL = "/dev/null"
-
-
-class Network(PasswordManager):
-
- """Handle network communication.
-
- This class has various methods for managing the root url (which is
- stored in a file @@Zope/Root) and has a method to send an HTTP(S)
- request to the root URL, expecting a snarf file back (that's all the
- application needs).
-
- Public instance variables:
-
- rooturl -- full root url, e.g. 'http://user:passwd@host:port/path'
- roottype -- 'http' or 'https'
- user_passwd -- 'user:passwd'
- host_port -- 'host:port'
- rootpath -- '/path'
- """
-
- def __init__(self, rooturl=None):
- """Constructor. Optionally pass the root url."""
- super(Network, self).__init__()
- self.setrooturl(rooturl)
-
- def loadrooturl(self, target):
- """Load the root url for the given target.
-
- This calls findrooturl() to find the root url for the target,
- and then calls setrooturl() to set it. If self.findrooturl()
- can't find a root url, Error() is raised.
- """
- rooturl = self.findrooturl(target)
- if not rooturl:
- raise Error("can't find root url for target", target)
- self.setrooturl(rooturl)
-
- def saverooturl(self, target):
- """Save the root url in the target's @@Zope directory.
-
- This writes the file <target>/@@Zope/Root; the directory
- <target>/@@Zope must already exist.
- """
- if self.rooturl:
- dir = join(target, "@@Zope")
- if not exists(dir):
- os.mkdir(dir)
- fn = join(dir, "Root")
- self.writefile(self.rooturl + "\n",
- fn)
-
- def findrooturl(self, target):
- """Find the root url for the given target.
-
- This looks in <target>/@@Zope/Root, and then in the
- corresponding place for target's parent, and then further
- ancestors, until the filesystem root is reached.
-
- If no root url is found, return None.
- """
- dir = realpath(target)
- while dir:
- rootfile = join(dir, "@@Zope", "Root")
- try:
- data = self.readfile(rootfile)
- except IOError:
- pass
- else:
- data = data.strip()
- if data:
- return data
- head, tail = split(dir)
- if tail in fsutil.unwanted:
- break
- dir = head
- return None
-
- def setrooturl(self, rooturl):
- """Set the root url.
-
- If the argument is None or empty, self.rooturl and all derived
- instance variables are set to None. Otherwise, self.rooturl
- is set to the argument the broken-down root url is stored in
- the other instance variables.
- """
- if not rooturl:
- rooturl = roottype = rootpath = user_passwd = host_port = None
- else:
- roottype, rest = urllib.splittype(rooturl)
- if roottype not in ("http", "https"):
- raise Error("root url must be 'http' or 'https'", rooturl)
- if roottype == "https" and not hasattr(httplib, "HTTPS"):
- raise Error("https not supported by this Python build")
- netloc, rootpath = urllib.splithost(rest)
- if not rootpath:
- rootpath = "/"
- user_passwd, host_port = urllib.splituser(netloc)
-
- self.rooturl = rooturl
- self.roottype = roottype
- self.rootpath = rootpath
- self.user_passwd = user_passwd
- self.host_port = host_port
-
- def readfile(self, file, mode="r"):
- # Internal helper to read a file
- f = open(file, mode)
- try:
- return f.read()
- finally:
- f.close()
-
- def writefile(self, data, file, mode="w"):
- # Internal helper to write a file
- f = open(file, mode)
- try:
- f.write(data)
- finally:
- f.close()
-
- def httpreq(self, path, view, datasource=None,
- content_type="application/x-snarf",
- expected_type="application/x-snarf"):
- """Issue an HTTP or HTTPS request.
-
- The request parameters are taken from the root url, except
- that the requested path is constructed by concatenating the
- path and view arguments.
-
- If the optional 'datasource' argument is not None, it should
- be a callable with a stream argument which, when called,
- writes data to the stream. In this case, a POST request is
- issued, and the content-type header is set to the
- 'content_type' argument, defaulting to 'application/x-snarf'.
- Otherwise (if datasource is None), a GET request is issued and
- no input document is sent.
-
- If the request succeeds and returns a document whose
- content-type is 'application/x-snarf', the return value is a tuple
- (fp, headers) where fp is a non-seekable stream from which the
- return document can be read, and headers is a case-insensitive
- mapping giving the response headers.
-
- If the request returns an HTTP error, the Error exception is
- raised. If it returns success (error code 200) but the
- content-type of the result is not 'application/x-snarf', the Error
- exception is also raised. In these error cases, if the result
- document's content-type is a text type (anything starting with
- 'text/'), the text of the result document is included in the
- Error exception object; in the specific case that the type is
- text/html, HTML formatting is removed using a primitive
- formatter.
-
- TODO: This doesn't support proxies or redirect responses.
- """
- # TODO: Don't change the case of the header names; httplib might
- # not treat them in a properly case-insensitive manner.
- assert self.rooturl
- if not path.endswith("/"):
- path += "/"
- path = urllib.quote(path)
- path += view
- if self.roottype == "https":
- conn = httplib.HTTPSConnection(self.host_port)
- else:
- conn = httplib.HTTPConnection(self.host_port)
-
- if datasource is None:
- conn.putrequest("GET", path)
- else:
- conn.putrequest("POST", path)
- conn.putheader("Content-type", content_type)
- #conn.putheader("Transfer-encoding", "chunked")
- #XXX Chunking works only with the zserver. Twisted responds with
- # HTTP error 400 (Bad Request); error document:
- # Excess 4 bytes sent in chunk transfer mode
- #We use a buffer as workaround and compute the Content-Length in
- #advance
- tmp = tempfile.TemporaryFile('w+b')
- datasource(tmp)
- conn.putheader("Content-Length", str(tmp.tell()))
-
- if self.user_passwd:
- if ":" not in self.user_passwd:
- auth = self.getToken(self.roottype,
- self.host_port,
- self.user_passwd)
- else:
- auth = self.createToken(self.user_passwd)
- conn.putheader('Authorization', 'Basic %s' % auth)
- conn.putheader("Host", self.host_port)
- conn.putheader("Connection", "close")
- conn.endheaders()
- if datasource is not None:
- #XXX If chunking works again, replace the following lines with
- # datasource(PretendStream(conn))
- # conn.send("0\r\n\r\n")
- tmp.seek(0)
- data = tmp.read(1<<16)
- while data:
- conn.send(data)
- data = tmp.read(1<<16)
- tmp.close()
-
- response = conn.getresponse()
- if response.status != 200:
- raise Error("HTTP error %s (%s); error document:\n%s",
- response.status, response.reason,
- self.slurptext(response.fp, response.msg))
- elif expected_type and response.msg["Content-type"] != expected_type:
- raise Error(self.slurptext(response.fp, response.msg))
- else:
- return response.fp, response.msg
-
- def slurptext(self, fp, headers):
- """Helper to read the result document.
-
- This removes the formatting from a text/html document; returns
- other text documents as-is; and for non-text documents,
- returns just a string giving the content-type.
- """
- # Too often, we just get HTTP response code 200 (OK), with an
- # HTML document that explains what went wrong.
- data = fp.read()
- ctype = headers.get("Content-type", 'unknown')
- if ctype == "text/html":
- s = StringIO()
- f = formatter.AbstractFormatter(formatter.DumbWriter(s))
- p = htmllib.HTMLParser(f)
- p.feed(data)
- p.close()
- return s.getvalue().strip()
- if ctype.startswith("text/"):
- return data.strip()
- return "Content-type: %s" % ctype
-
-class PretendStream(object):
-
- """Helper class to turn writes into chunked sends."""
-
- def __init__(self, conn):
- self.conn = conn
-
- def write(self, s):
- self.conn.send("%x\r\n" % len(s))
- self.conn.send(s)
-
-class DataSource(object):
-
- """Helper class to provide a data source for httpreq."""
-
- def __init__(self, head, tail):
- self.head = head
- self.tail = tail
-
- def __call__(self, f):
- snf = Snarfer(f)
- snf.add(join(self.head, self.tail), self.tail)
- snf.addtree(join(self.head, "@@Zope"), "@@Zope/")
-
-class FSSync(object):
-
- def __init__(self, metadata=None, network=None, rooturl=None):
- if metadata is None:
- metadata = Metadata()
- if network is None:
- network = Network()
- self.metadata = metadata
- self.network = network
- self.network.setrooturl(rooturl)
- self.fsmerger = FSMerger(self.metadata, self.reporter)
-
- def login(self, url=None, user=None):
- scheme, host_port, user = self.get_login_info(url, user)
- token = self.network.getToken(scheme, host_port, user)
- self.network.addToken(scheme, host_port, user, token)
-
- def logout(self, url=None, user=None):
- scheme, host_port, user = self.get_login_info(url, user)
- if scheme:
- ok = self.network.removeToken(scheme, host_port, user)
- else:
- # remove both, if present
- ok1 = self.network.removeToken("http", host_port, user)
- ok2 = self.network.removeToken("https", host_port, user)
- ok = ok1 or ok2
- if not ok:
- raise Error("matching login info not found")
-
- def get_login_info(self, url, user):
- if url:
- parts = urlparse.urlsplit(url)
- scheme = parts[0]
- host_port = parts[1]
- if not (scheme and host_port):
- raise Error(
- "URLs must include both protocol (http or https)"
- " and host information")
- if "@" in host_port:
- user_passwd, host_port = host_port.split("@", 1)
- if not user:
- if ":" in user_passwd:
- user = user_passwd.split(":", 1)[0]
- else:
- user = user_passwd
- else:
- self.network.loadrooturl(os.curdir)
- scheme = self.network.roottype
- host_port = self.network.host_port
- if not user:
- upw = self.network.user_passwd
- if ":" in upw:
- user = upw.split(":", 1)[0]
- else:
- user = upw
- if not user:
- user = raw_input("Username: ").strip()
- if not user:
- raise Error("username cannot be empty")
- return scheme, host_port, user
-
- def checkout(self, target):
- rootpath = self.network.rootpath
- if not rootpath:
- raise Error("root url not set")
- if self.metadata.getentry(target):
- raise Error("target already registered", target)
- if exists(target) and not isdir(target):
- raise Error("target should be a directory", target)
- fsutil.ensuredir(target)
- i = rootpath.rfind("/")
- tail = rootpath[i+1:]
- tail = tail or "root"
- fp, headers = self.network.httpreq(rootpath, "@@toFS.snarf")
- try:
- self.merge_snarffile(fp, target, tail)
- finally:
- fp.close()
- self.network.saverooturl(target)
-
- def multiple(self, args, method, *more):
- if not args:
- args = [os.curdir]
- for target in args:
- if self.metadata.getentry(target):
- method(target, *more)
- else:
- names = self.metadata.getnames(target)
- if not names:
- # just raise Error directly?
- method(target, *more) # Will raise an exception
- else:
- for name in names:
- method(join(target, name), *more)
-
- def commit(self, target, note="fssync_commit", raise_on_conflicts=False):
- entry = self.metadata.getentry(target)
- if not entry:
- raise Error("nothing known about", target)
- self.network.loadrooturl(target)
- path = entry["path"]
- view = "@@fromFS.snarf?note=%s" % urllib.quote(note)
- if raise_on_conflicts:
- view += "&raise=1"
- head, tail = split(realpath(target))
- data = DataSource(head, tail)
- fp, headers = self.network.httpreq(path, view, data)
- try:
- self.merge_snarffile(fp, head, tail)
- finally:
- fp.close()
-
- def checkin(self, target, note="fssync_checkin"):
- rootpath = self.network.rootpath
- if not rootpath:
- raise Error("root url not set")
- if rootpath == "/":
- raise Error("root url should name an inferior object")
- i = rootpath.rfind("/")
- path, name = rootpath[:i], rootpath[i+1:]
- if not path:
- path = "/"
- if not name:
- raise Error("root url should not end in '/'")
- entry = self.metadata.getentry(target)
- if not entry:
- raise Error("nothing known about", target)
- qnote = urllib.quote(note)
- qname = urllib.quote(name)
- head, tail = split(realpath(target))
- qsrc = urllib.quote(tail)
- view = "@@checkin.snarf?note=%s&name=%s&src=%s" % (qnote, qname, qsrc)
- data = DataSource(head, tail)
- fp, headers = self.network.httpreq(path, view, data,
- expected_type=None)
- message = self.network.slurptext(fp, headers)
- if message:
- print message
-
- def update(self, target):
- entry = self.metadata.getentry(target)
- if not entry:
- raise Error("nothing known about", target)
- self.network.loadrooturl(target)
- head, tail = fsutil.split(target)
- path = entry["path"]
- fp, headers = self.network.httpreq(path, "@@toFS.snarf")
- try:
- self.merge_snarffile(fp, head, tail)
- finally:
- fp.close()
-
- def merge_snarffile(self, fp, localdir, tail):
- uns = Unsnarfer(fp)
- tmpdir = tempfile.mktemp()
- try:
- os.mkdir(tmpdir)
- uns.unsnarf(tmpdir)
- self.fsmerger.merge(join(localdir, tail), join(tmpdir, tail))
- self.metadata.flush()
- print "All done."
- finally:
- if isdir(tmpdir):
- shutil.rmtree(tmpdir)
-
- def resolve(self, target):
- entry = self.metadata.getentry(target)
- if "conflict" in entry:
- del entry["conflict"]
- self.metadata.flush()
- elif isdir(target):
- self.dirresolve(target)
-
- def dirresolve(self, target):
- assert isdir(target)
- names = self.metadata.getnames(target)
- for name in names:
- t = join(target, name)
- e = self.metadata.getentry(t)
- if e:
- self.resolve(t)
-
- def revert(self, target):
- entry = self.metadata.getentry(target)
- if not entry:
- raise Error("nothing known about", target)
- flag = entry.get("flag")
- orig = fsutil.getoriginal(target)
- if flag == "added":
- entry.clear()
- elif flag == "removed":
- if exists(orig):
- shutil.copyfile(orig, target)
- del entry["flag"]
- elif "conflict" in entry:
- if exists(orig):
- shutil.copyfile(orig, target)
- del entry["conflict"]
- elif isfile(orig):
- if filecmp.cmp(target, orig, shallow=False):
- return
- shutil.copyfile(orig, target)
- elif isdir(target):
- # TODO: how to recurse?
- self.dirrevert(target)
- self.metadata.flush()
- if os.path.isdir(target):
- target = join(target, "")
- self.reporter("Reverted " + target)
-
- def dirrevert(self, target):
- assert isdir(target)
- names = self.metadata.getnames(target)
- for name in names:
- t = join(target, name)
- e = self.metadata.getentry(t)
- if e:
- self.revert(t)
-
- def reporter(self, msg):
- if msg[0] not in "/*":
- print msg
-
- def diff(self, target, mode=1, diffopts="", need_original=True):
- assert mode == 1, "modes 2 and 3 are not yet supported"
- entry = self.metadata.getentry(target)
- if not entry:
- raise Error("diff target '%s' doesn't exist", target)
- if "flag" in entry and need_original:
- raise Error("diff target '%s' is added or deleted", target)
- if isdir(target):
- self.dirdiff(target, mode, diffopts, need_original)
- return
- orig = fsutil.getoriginal(target)
- if not isfile(target):
- if entry.get("flag") == "removed":
- target = DEV_NULL
- else:
- raise Error("diff target '%s' is file nor directory", target)
- have_original = True
- if not isfile(orig):
- if entry.get("flag") != "added":
- raise Error("can't find original for diff target '%s'", target)
- have_original = False
- orig = DEV_NULL
- if have_original and filecmp.cmp(target, orig, shallow=False):
- return
- print "Index:", target
- sys.stdout.flush()
- cmd = ("diff %s %s %s" % (diffopts, quote(orig), quote(target)))
- os.system(cmd)
-
- def dirdiff(self, target, mode=1, diffopts="", need_original=True):
- assert isdir(target)
- names = self.metadata.getnames(target)
- for name in names:
- t = join(target, name)
- e = self.metadata.getentry(t)
- if e and (("flag" not in e) or not need_original):
- self.diff(t, mode, diffopts, need_original)
-
- def add(self, path, type=None, factory=None):
- entry = self.basicadd(path, type, factory)
- head, tail = fsutil.split(path)
- pentry = self.metadata.getentry(head)
- if not pentry:
- raise Error("can't add '%s': its parent is not registered", path)
- if "path" not in pentry:
- raise Error("can't add '%s': its parent has no 'path' key", path)
- zpath = fsutil.encode(pentry["path"])
- if not zpath.endswith("/"):
- zpath += "/"
- zpath += tail
- entry["path"] = zpath
- self.metadata.flush()
- if isdir(path):
- # Force Entries.xml to exist, even if it wouldn't normally
- zopedir = join(path, "@@Zope")
- efile = join(zopedir, "Entries.xml")
- if not exists(efile):
- if not exists(zopedir):
- os.makedirs(zopedir)
- self.network.writefile(dump_entries({}), efile)
- print "A", join(path, "")
- else:
- print "A", path
-
- def basicadd(self, path, type=None, factory=None):
- if not exists(path):
- raise Error("nothing known about '%s'", path)
- entry = self.metadata.getentry(path)
- if entry:
- raise Error("path '%s' is already registered", path)
- entry["flag"] = "added"
- if type:
- entry["type"] = type
- if factory:
- entry["factory"] = factory
- return entry
-
- def copy(self, src, dst=None, children=True):
- if not exists(src):
- raise Error("%s does not exist" % src)
- dst = dst or ''
- if (not dst) or isdir(dst):
- target_dir = dst
- target_name = basename(os.path.abspath(src))
- else:
- target_dir, target_name = os.path.split(dst)
- if target_dir:
- if not exists(target_dir):
- raise Error("destination directory does not exist: %r"
- % target_dir)
- if not isdir(target_dir):
- import errno
- err = IOError(errno.ENOTDIR, "Not a directory", target_dir)
- raise Error(str(err))
- if not self.metadata.getentry(target_dir):
- raise Error("nothing known about '%s'" % target_dir)
- srcentry = self.metadata.getentry(src)
- from zope.fssync import copier
- if srcentry:
- # already known to fssync; we need to deal with metadata,
- # Extra, and Annotations
- copier = copier.ObjectCopier(self)
- else:
- copier = copier.FileCopier(self)
- copier.copy(src, join(target_dir, target_name), children)
-
- def mkdir(self, path):
- dir, name = split(path)
- if dir:
- if not exists(dir):
- raise Error("directory %r does not exist" % dir)
- if not isdir(dir):
- raise Error("%r is not a directory" % dir)
- else:
- dir = os.curdir
- entry = self.metadata.getentry(dir)
- if not entry:
- raise Error("know nothing about container for %r" % path)
- if exists(path):
- raise Error("%r already exists" % path)
- os.mkdir(path)
- self.add(path)
-
- def remove(self, path):
- if exists(path):
- raise Error("'%s' still exists", path)
- entry = self.metadata.getentry(path)
- if not entry:
- raise Error("nothing known about '%s'", path)
- zpath = entry.get("path")
- if not zpath:
- raise Error("can't remote '%s': its zope path is unknown", path)
- if entry.get("flag") == "added":
- entry.clear()
- else:
- entry["flag"] = "removed"
- self.metadata.flush()
- print "R", path
-
- def status(self, target, descend_only=False):
- entry = self.metadata.getentry(target)
- flag = entry.get("flag")
- if isfile(target):
- if not entry:
- if not self.fsmerger.ignore(target):
- print "?", target
- elif flag == "added":
- print "A", target
- elif flag == "removed":
- print "R(reborn)", target
- else:
- original = fsutil.getoriginal(target)
- if isfile(original):
- if filecmp.cmp(target, original):
- print "=", target
- else:
- print "M", target
- else:
- print "M(lost-original)", target
- elif isdir(target):
- pname = join(target, "")
- if not entry:
- if not descend_only and not self.fsmerger.ignore(target):
- print "?", pname
- elif flag == "added":
- print "A", pname
- elif flag == "removed":
- print "R(reborn)", pname
- else:
- print "/", pname
- if entry:
- # Recurse down the directory
- namesdir = {}
- for name in fsutil.listdir(target):
- ncname = normcase(name)
- if ncname != fsutil.nczope:
- namesdir[ncname] = name
- for name in self.metadata.getnames(target):
- ncname = normcase(name)
- namesdir[ncname] = name
- ncnames = namesdir.keys()
- ncnames.sort()
- for ncname in ncnames:
- self.status(join(target, namesdir[ncname]))
- elif exists(target):
- if not entry:
- if not self.fsmerger.ignore(target):
- print "?", target
- elif flag:
- print flag[0].upper() + "(unrecognized)", target
- else:
- print "M(unrecognized)", target
- else:
- if not entry:
- print "nonexistent", target
- elif flag == "removed":
- print "R", target
- elif flag == "added":
- print "A(lost)", target
- else:
- print "lost", target
- annotations = fsutil.getannotations(target)
- if isdir(annotations):
- self.status(annotations, True)
- extra = fsutil.getextra(target)
- if isdir(extra):
- self.status(extra, True)
-
-def quote(s):
- """Helper to put quotes around arguments passed to shell if necessary."""
- if os.name == "posix":
- meta = "\\\"'*?[&|()<>`#$; \t\n"
- else:
- meta = " "
- needquotes = False
- for c in meta:
- if c in s:
- needquotes = True
- break
- if needquotes:
- if os.name == "posix":
- # use ' to quote, replace ' by '"'"'
- s = "'" + s.replace("'", "'\"'\"'") + "'"
- else:
- # (Windows) use " to quote, replace " by ""
- s = '"' + s.replace('"', '""') + '"'
- return s
Modified: zope.fssync/trunk/src/zope/fssync/fsutil.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/fsutil.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/fsutil.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -147,15 +147,6 @@
if encoding is None:
encoding = fsencoding
if isinstance(path, unicode):
- return normalize(path).encode(encoding)
+ return path.encode(encoding)
return unicode(path, encoding=fsencoding).encode(encoding)
-def listdir(path):
- """Returns normalized filenames on OS X (see normalize above).
-
- The standard file and os.path operations seem to work with both
- encodings on OS X. Therefore we provide our own listdir, making sure
- that the more common NFC encoding is used.
- """
- return [normalize(name) for name in os.listdir(path)]
-
Added: zope.fssync/trunk/src/zope/fssync/interfaces.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/interfaces.py (rev 0)
+++ zope.fssync/trunk/src/zope/fssync/interfaces.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -0,0 +1,402 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Interfaces for filesystem synchronization.
+
+$Id: interfaces.py 73003 2007-03-06 10:34:19Z oestermeier $
+"""
+__docformat__ = "reStructuredText"
+
+from zope import interface
+from zope import component
+from zope import schema
+from zope import lifecycleevent
+
+class ISynchronizableExtras(interface.common.mapping.IMapping):
+ """A mapping of selected object attributes."""
+
+
+class ISynchronizableAnnotations(interface.common.mapping.IMapping):
+ """A mapping of synchronizable annotation namespaces."""
+
+ def modify(target):
+ """Modifies the target annotations.
+
+ Transfers the synchronizable namespaces to the target annotations.
+ Returns an lifecycleevent.interfaces.IModificationDescription
+ if changes were detected, None othewise.
+ """
+
+
+class IObjectSynchronized(lifecycleevent.interfaces.IModificationDescription):
+ """A unspecific modification description.
+
+ Basically says that an object has changed during a sync
+ operation. If you can say more specific things you should
+ use other modification descriptions.
+ """
+
+class IRepository(interface.Interface):
+ """A target system that stores objects as files or directories."""
+
+ chunk_size = schema.Int(
+ title=u"Chunk Size",
+ description=u"The chunk size.",
+ default=32768)
+
+ case_insensitive = schema.Bool(
+ title=u"Case Insensitive",
+ description=u"Is this repository case insensitive?",
+ default=False)
+
+ def getMetadata():
+ """Returns a metadata database for the repository.
+ """
+
+ def disambiguate(dirpath, name):
+ """Disambiguates a name in a directory.
+ """
+
+ def dirname(path):
+ """Returns the dirname."""
+
+ def join(path, *names):
+ """Returns a joined path."""
+
+ def normalize(name):
+ """Normalize a filename.
+ """
+
+ def encode(path, encoding=None):
+ """Encodes a path in its normalized form."""
+
+ def writeable(path):
+ """Returns a writeable file handler."""
+
+ def readable(path):
+ """Returns a readable file handler."""
+
+
+class IPickler(interface.Interface):
+ """A pickler."""
+
+ def dump(writeable):
+ """Dumps a pickable object to a writeable file-like object."""
+
+
+class IUnpickler(interface.Interface):
+ """An unpickler."""
+
+ def load(readable):
+ """Loads a pickled object from a readable file-like object."""
+
+
+class IEntryId(interface.Interface):
+ """Returns an id that can be saved in a metadata database.
+
+ The id must be 'stringifiable'.
+ """
+
+ def __str__():
+ """Returns a string representation.
+
+ The encoding should be 'UTF-8'.
+ """
+
+
+class IPersistentIdGenerator(interface.Interface):
+ """Generates a pickable persistent references."""
+
+ def id(self, obj):
+ """Returns a persistent reference."""
+
+
+class IPersistentIdLoader(interface.Interface):
+
+ def load(self, id):
+ """Resolves a persistent reference."""
+
+
+class IWriteable(interface.Interface):
+ """A writeable file handle."""
+
+ def write(data):
+ """Writes the data."""
+
+ def close():
+ """Closes the file-like object.
+
+ Ensures that pending data are written.
+ """
+
+
+class IReadable(interface.Interface):
+ """A readable file handle."""
+
+ def read(bytes=None):
+ """Reads the number of bytes or all data if bytes is None."""
+
+ def close():
+ """Closes the file handle."""
+
+
+class ISyncTask(interface.Interface):
+ """Base interface for ICheckout, ICommit, and ICheck.
+
+ The repository may be a filesystem, an archive, a database,
+ or something else that is able to store serialized data.
+ """
+
+ repository = schema.Object(
+ IRepository,
+ title=u"Repository",
+ description=u"The repository that contains the serialized data.")
+
+ context = schema.Object(
+ interface.Interface,
+ title=u"Context",
+ description=u"Context of reference")
+
+ def __init__(getSynchronizer, repository, context=None):
+ """Inits the task with a getSynchronizer lookup function,
+ a repository, and an optional context.
+ """
+
+
+class ICheckout(ISyncTask):
+ """Checkout objects from a content space to a repository.
+ """
+
+ def perform(obj, name, location=''):
+ """Check an object out to the repository.
+
+ obj -- The object to be checked out
+
+ name -- The name of the object
+
+ location -- The directory or path where the object will go
+ """
+
+
+class ICheckin(ISyncTask):
+ """Import objects from the repository to a content space.
+ """
+
+ def perform(obj, name, location=''):
+ """Performs a checkin.
+
+ obj -- The object to be checked in
+
+ name -- The name of the object
+
+ location -- The location where the object will go
+
+ Raises a ``SynchronizationError`` if the object
+ already exists at the given location.
+ """
+
+
+class ICheck(ISyncTask):
+ """Check that the repository is consistent with the object database."""
+
+ def perform(container, name, fspath):
+ """Compare an object or object tree from a repository.
+
+ If the originals in the repository are not uptodate, errors
+ are reported by a errors() call.
+
+ Invalid object names are reported by raising
+ ``SynchronizationError``.
+ """
+
+ def errors():
+ """Returns a list of paths with errors."""
+
+
+class ICommit(ISyncTask):
+ """Commits a repository to a content space."""
+
+ def perform(container, name, fspath, context=None):
+ """Synchronize an object or object tree from a repository.
+ """
+
+
+class IFileSystemRepository(IRepository):
+ """A filesystem repository.
+
+ Stores the data in a directory tree on the filesystem.
+ """
+
+
+class IArchiveRepository(IRepository):
+ """A repository that stores the data in a single file."""
+
+ def iterPaths():
+ """Iterates over all paths in the archive."""
+
+
+class ISynchronizerFactory(component.interfaces.IFactory):
+ """A factory for synchronizer, i.e. serializers/de-serializers.
+
+ The factory should be registered as a named utility with
+ the dotted name of the adapted class as the lookup key.
+
+ The default factory should be registered without a name.
+
+ The call of the factory should return
+
+ - an `IDirectorySynchronizer` adapter for the object if the
+ object is represented as a directory.
+
+ - an `IFileSynchronizer` adapter for the object if the
+ object is represented as a file.
+ """
+
+
+class ISerializer(interface.Interface):
+ """Base interface for object serializer."""
+
+ def getObject():
+ """Return the serializable entry."""
+
+ def metadata():
+ """Returns a mapping with metadata.
+
+ The keys must be attribute names, the values utf-8 encoded strings.
+ """
+
+ def annotations():
+ """Return annotations for the entry.
+
+ Returns None if the serializer provides
+ it's own representation
+ """
+
+ def extras():
+ """Return extra data for the entry.
+
+ Returns None if the serializer provides it's own
+ representation of extras.
+ """
+
+
+class IDeserializer(interface.Interface):
+ """The inverse operator of an ISerializer.
+
+ Deserializer consume serialized data and provide
+ write access to parts of the deserialized objects.
+ """
+
+ def setmetadata(metadata):
+ """Sets entry metadata.
+
+ Returns an lifecycleevent.interfaces.IModificationDescription
+ if relevant changes were detected, None othewise.
+ """
+
+ def setannotations(annotations):
+ """Sets deserialized annotations.
+
+ Returns an lifecycleevent.interfaces.IModificationDescription
+ if relevant changes were detected, None othewise.
+ """
+
+ def setextras(extras):
+ """Sets deserialized extra data.
+
+ Returns an lifecycleevent.interfaces.IModificationDescription
+ if relevant changes were detected, None othewise.
+ """
+
+
+class ISynchronizer(ISerializer, IDeserializer):
+ """A base interface for synchronizers."""
+
+
+class IFileSerializer(ISerializer):
+ """Writes data to a file-like object."""
+
+ def dump(writeable):
+ """Dump the file content to a writeable file handle."""
+
+
+class IFileDeserializer(IDeserializer):
+ """Reads data from a file-like object."""
+
+ def load(readable):
+ """Reads serialized file content."""
+
+
+class IFileSynchronizer(IFileSerializer, IFileDeserializer):
+ """A sycnronizer for file-like objects."""
+
+
+class IDefaultSynchronizer(IFileSynchronizer):
+ """A serializer that uses an IPickler."""
+
+
+class IDirectorySerializer(ISerializer):
+ """Provides access to a dirctory listing."""
+
+ def __getitem__(key, value):
+ """Gets an item."""
+
+ def iteritems():
+ """Return an iterable directory listing of name, obj tuples."""
+
+
+class IDirectoryDeserializer(IDeserializer):
+ """Writes deserialized data into a directory-like object."""
+
+ def __setitem__(key, value):
+ """Sets an item."""
+
+ def __delitem__(key):
+ """Deletes an item."""
+
+
+class IDirectorySynchronizer(ISynchronizer,
+ IDirectorySerializer, IDirectoryDeserializer):
+ """A synchronizer for directory-like objects."""
+
+ def traverseName(name):
+ """Traverses the name."""
+
+
+class IObjectGenerator(interface.Interface):
+ """A generator for objects with a special create protocol."""
+
+ def create(context, name):
+ """Creates the object in the given context."""
+
+
+class IFileGenerator(interface.Interface):
+ """A generator that applies if no other file deserializers can be found."""
+
+ def create(context, readable, extension=None):
+ """Creates a new file object and initializes it with the readable data.
+
+ Uses the optional file extension to determine the file type.
+ """
+
+ def load(file, readable):
+ """Consumes readable data for the generated file."""
+
+
+class IDirectoryGenerator(IObjectGenerator):
+ """A generator that applies if no other directory factories can be found."""
+
+
+def getSynchronizer(obj):
+ """Returns the class based synchronizer or the default synchronizer."""
+
Deleted: zope.fssync/trunk/src/zope/fssync/main.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/main.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/main.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -1,396 +0,0 @@
-#! /usr/bin/env python
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Filesystem synchronization utility for Zope 3.
-
-Command line syntax summary:
-
-%(program)s add [options] PATH ...
-%(program)s checkin [options] URL [TARGETDIR]
-%(program)s checkout [options] URL [TARGETDIR]
-%(program)s commit [options] [TARGET ...]
-%(program)s copy [options] SOURCE [TARGET]
-%(program)s diff [options] [TARGET ...]
-%(program)s login [options] URL
-%(program)s logout [options] URL
-%(program)s mkdir PATH ...
-%(program)s remove [options] TARGET ...
-%(program)s resolve PATH ...
-%(program)s revert PATH ...
-%(program)s status [TARGET ...]
-%(program)s update [TARGET ...]
-
-``%(program)s help'' prints the global help (this message)
-``%(program)s help command'' prints the local help for the command
-"""
-"""
-$Id$
-"""
-
-import os
-import urlparse
-
-from zope.fssync.command import Command, Usage
-from zope.fssync.fssync import FSSync
-from zope.fssync import fsutil
-
-def main():
- """Main program.
-
- The return value is the suggested sys.exit() status code:
- 0 or None for success
- 2 for command line syntax errors
- 1 or other for later errors
- """
- cmd = Command(usage=__doc__)
- for func, aliases, short, long in command_table:
- cmd.addCommand(func.__name__, func, short, long, aliases)
-
- return cmd.main()
-
-def checkout(opts, args):
- """%(program)s checkout [-u user] URL [TARGETDIR]
-
- URL should be of the form ``http://user:password@host:port/path''.
- Only http and https are supported (and https only where Python has
- been built to support SSL). This should identify a Zope 3 server;
- user:password should have management privileges; /path should be
- the traversal path to an existing object, not including views or
- skins. The user may be specified using the -u option instead of
- encoding it in the URL, since the URL syntax for username and
- password isn't so well known. The password may be omitted; if so,
- an authentication token stored using '%(program)s login' will be
- used if available; otherwise you will be propted for the password.
-
- TARGETDIR should be a directory; if it doesn't exist, it will be
- created. The object tree rooted at /path is copied to a
- subdirectory of TARGETDIR whose name is the last component of
- /path. TARGETDIR defaults to the current directory. A metadata
- directory named @@Zope is also created in TARGETDIR.
- """
- if not args:
- raise Usage("checkout requires a URL argument")
- rooturl = args[0]
- if len(args) > 1:
- target = args[1]
- if len(args) > 2:
- raise Usage("checkout requires at most one TARGETDIR argument")
- else:
- target = os.curdir
- user = _getuseroption(opts)
- if user:
- parts = list(urlparse.urlsplit(rooturl))
- netloc = parts[1]
- if "@" in netloc:
- user_passwd, host_port = netloc.split("@", 1)
- if ":" in user_passwd:
- u, p = user_passwd.split(":", 1)
- else:
- u = user_passwd
- # only scream if the -u option and the URL disagree:
- if u != user:
- raise Usage("-u/--user option and URL disagree on user name")
- else:
- # no username in URL; insert
- parts[1] = "%s@%s" % (user, netloc)
- rooturl = urlparse.urlunsplit(tuple(parts))
- fs = FSSync(rooturl=rooturl)
- fs.checkout(target)
-
-def commit(opts, args):
- """%(program)s commit [-m message] [-r] [TARGET ...]
-
- Commit the TARGET files or directories to the Zope 3 server
- identified by the checkout command. TARGET defaults to the
- current directory. Each TARGET is committed separately. Each
- TARGET should be up-to-date with respect to the state of the Zope
- 3 server; if not, a detailed error message will be printed, and
- you should use the update command to bring your working directory
- in sync with the server.
-
- The -m option specifies a message to label the transaction.
- The default message is 'fssync_commit'.
- """
- message, opts = extract_message(opts, "commit")
- raise_on_conflicts = False
- for o, a in opts:
- if o in ("-r", "--raise-on-conflicts"):
- raise_on_conflicts = True
- fs = FSSync()
- fs.multiple(args, fs.commit, message, raise_on_conflicts)
-
-def update(opts, args):
- """%(program)s update [TARGET ...]
-
- Bring the TARGET files or directories in sync with the
- corresponding objects on the Zope 3 server identified by the
- checkout command. TARGET defaults to the current directory. Each
- TARGET is updated independently. This command will merge your
- changes with changes made on the server; merge conflicts will be
- indicated by diff3 markings in the file and noted by a 'C' in the
- update output.
- """
- fs = FSSync()
- fs.multiple(args, fs.update)
-
-def add(opts, args):
- """%(program)s add [-t TYPE] [-f FACTORY] TARGET ...
-
- Add the TARGET files or directories to the set of registered
- objects. Each TARGET must exist. The next commit will add them
- to the Zope 3 server.
-
- The options -t and -f can be used to set the type and factory of
- the newly created object; these should be dotted names of Python
- objects. Usually only the factory needs to be specified.
-
- If no factory is specified, the type will be guessed when the
- object is inserted into the Zope 3 server based on the filename
- extension and the contents of the data. For example, some common
- image types are recognized by their contents, and the extensions
- .pt and .dtml are used to create page templates and DTML
- templates, respectively.
- """
- type = None
- factory = None
- for o, a in opts:
- if o in ("-t", "--type"):
- type = a
- elif o in ("-f", "--factory"):
- factory = a
- if not args:
- raise Usage("add requires at least one TARGET argument")
- fs = FSSync()
- for a in args:
- fs.add(a, type, factory)
-
-def copy(opts, args):
- """%(program)s copy [-l | -R] SOURCE [TARGET]
-
- """
- recursive = None
- for o, a in opts:
- if o in ("-l", "--local"):
- if recursive:
- raise Usage("%r conflicts with %r" % (o, recursive))
- recursive = False
- elif o in ("-R", "--recursive"):
- if recursive is False:
- raise Usage("%r conflicts with -l" % o)
- recursive = o
- if not args:
- raise Usage("copy requires at least one argument")
- if len(args) > 2:
- raise Usage("copy allows at most two arguments")
- source = args[0]
- if len(args) == 2:
- target = args[1]
- else:
- target = None
- if recursive is None:
- recursive = True
- else:
- recursive = bool(recursive)
- fs = FSSync()
- fs.copy(source, target, children=recursive)
-
-def remove(opts, args):
- """%(program)s remove TARGET ...
-
- Remove the TARGET files or directories from the set of registered
- objects. No TARGET must exist. The next commit will remove them
- from the Zope 3 server.
- """
- if not args:
- raise Usage("remove requires at least one TARGET argument")
- fs = FSSync()
- for a in args:
- fs.remove(a)
-
-diffflags = ["-b", "-B", "--brief", "-c", "-C", "--context",
- "-i", "-u", "-U", "--unified"]
-def diff(opts, args):
- """%(program)s diff [diff_options] [TARGET ...]
-
- Write a diff listing for the TARGET files or directories to
- standard output. This shows the differences between the working
- version and the version seen on the server by the last update.
- Nothing is printed for files that are unchanged from that version.
- For directories, a recursive diff is used.
-
- Various GNU diff options can be used, in particular -c, -C NUMBER,
- -u, -U NUMBER, -b, -B, --brief, and -i.
- """
- diffopts = []
- mode = 1
- need_original = True
- for o, a in opts:
- if o == '-1':
- mode = 1
- elif o == '-2':
- mode = 2
- elif o == '-3':
- mode = 3
- elif o == '-N':
- need_original = False
- elif o in diffflags:
- if a:
- diffopts.append(o + " " + a)
- else:
- diffopts.append(o)
- diffopts = " ".join(diffopts)
- fs = FSSync()
- fs.multiple(args, fs.diff, mode, diffopts, need_original)
-
-def status(opts, args):
- """%(program)s status [TARGET ...]
-
- Print brief (local) status for each target, without changing any
- files or contacting the Zope server.
- """
- fs = FSSync()
- fs.multiple(args, fs.status)
-
-def checkin(opts, args):
- """%(program)s checkin [-m message] URL [TARGETDIR]
-
- URL should be of the form ``http://user:password@host:port/path''.
- Only http and https are supported (and https only where Python has
- been built to support SSL). This should identify a Zope 3 server;
- user:password should have management privileges; /path should be
- the traversal path to a non-existing object, not including views
- or skins.
-
- TARGETDIR should be a directory; it defaults to the current
- directory. The object tree rooted at TARGETDIR is copied to
- /path. subdirectory of TARGETDIR whose name is the last component
- of /path.
- """
- message, opts = extract_message(opts, "checkin")
- if not args:
- raise Usage("checkin requires a URL argument")
- rooturl = args[0]
- if len(args) > 1:
- target = args[1]
- if len(args) > 2:
- raise Usage("checkin requires at most one TARGETDIR argument")
- else:
- target = os.curdir
- fs = FSSync(rooturl=rooturl)
- fs.checkin(target, message)
-
-def login(opts, args):
- """%(program)s login [-u user] [URL]
-
- Save a basic authentication token for a URL that doesn't include a
- password component.
- """
- _loginout(opts, args, "login", FSSync().login)
-
-def logout(opts, args):
- """%(program)s logout [-u user] [URL]
-
- Remove a saved basic authentication token for a URL.
- """
- _loginout(opts, args, "logout", FSSync().logout)
-
-def _loginout(opts, args, cmdname, cmdfunc):
- url = user = None
- if args:
- if len(args) > 1:
- raise Usage("%s allows at most one argument" % cmdname)
- url = args[0]
- user = _getuseroption(opts)
- cmdfunc(url, user)
-
-def _getuseroption(opts):
- user = None
- for o, a in opts:
- if o in ("-u", "--user"):
- if user:
- raise Usage("-u/--user may only be specified once")
- user = a
- return user
-
-def mkdir(opts, args):
- """%(program)s mkdir PATH ...
-
- Create new directories in directories that are already known to
- %(program)s and schedule the new directories for addition.
- """
- fs = FSSync()
- fs.multiple(args, fs.mkdir)
-
-def resolve(opts, args):
- """%(program)s resolve [PATH ...]
-
- Clear any conflict markers associated with PATH. This would allow
- commits to proceed for the relevant files.
- """
- fs = FSSync()
- fs.multiple(args, fs.resolve)
-
-def revert(opts, args):
- """%(program)s revert [TARGET ...]
-
- Revert changes to targets. Modified files are overwritten by the
- unmodified copy cached in @@Zope/Original/ and scheduled additions
- and deletions are de-scheduled. Additions that are de-scheduled
- do not cause the working copy of the file to be removed.
- """
- fs = FSSync()
- fs.multiple(args, fs.revert)
-
-def extract_message(opts, cmd):
- L = []
- message = None
- msgfile = None
- for o, a in opts:
- if o in ("-m", "--message"):
- if message:
- raise Usage(cmd + " accepts at most one -m/--message option")
- message = a
- elif o in ("-F", "--file"):
- if msgfile:
- raise Usage(cmd + " accepts at most one -F/--file option")
- msgfile = a
- else:
- L.append((o, a))
- if not message:
- if msgfile:
- message = open(msgfile).read()
- else:
- message = "fssync_" + cmd
- elif msgfile:
- raise Usage(cmd + " requires at most one of -F/--file or -m/--message")
- return message, L
-
-command_table = [
- # name is taken from the function name
- # function, aliases, short opts, long opts
- (add, "", "f:t:", "factory= type="),
- (checkin, "", "F:m:", "file= message="),
- (checkout, "co", "u:", "user="),
- (commit, "ci", "F:m:r", "file= message= raise-on-conflicts"),
- (copy, "cp", "lR", "local recursive"),
- (diff, "di", "bBcC:iNuU:", "brief context= unified="),
- (login, "", "u:", "user="),
- (logout, "", "u:", "user="),
- (mkdir, "", "", ""),
- (remove, "del delete rm", "", ""),
- (resolve, "", "", ""),
- (revert, "", "", ""),
- (status, "stat st", "", ""),
- (update, "up", "", ""),
- ]
Modified: zope.fssync/trunk/src/zope/fssync/merger.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/merger.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/merger.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -142,7 +142,11 @@
return None
def merge_files_copy(self, local, original, remote):
- shutil.copy(remote, local)
+ try:
+ shutil.copy(remote, local)
+ except IOError, msg:
+ import pdb; pdb.set_trace()
+
fsutil.ensuredir(dirname(original))
shutil.copy(remote, original)
self.getentry(local).update(self.getentry(remote))
@@ -204,15 +208,13 @@
"""
-
-
lmeta = self.getentry(local)
rmeta = self.getentry(remote)
-
+
# Special-case sticky conflict
if "conflict" in lmeta:
return ("Nothing", "Conflict")
-
+
# Sort out cases involving additions or removals
if not lmeta and not rmeta:
@@ -234,7 +236,10 @@
return ("Fix", "Uptodate")
else:
# CVS would say "move local file out of the way"
- return ("Merge", "Modified")
+ if rmeta.get("binary") == "true" or lmeta.get("binary") == "true":
+ return ("Nothing", "Conflict")
+ else:
+ return ("Merge", "Modified")
if rmeta and not lmeta:
# Added remotely
@@ -284,7 +289,10 @@
return ("Fix", "Uptodate")
else:
# Changes on both sides, three-way merge needed
- return ("Merge", "Modified")
+ if rmeta.get("binary") == "true" or lmeta.get("binary") == "true":
+ return ("Nothing", "Conflict")
+ else:
+ return ("Merge", "Modified")
def cmpfile(self, file1, file2):
"""Helper to compare two files.
Modified: zope.fssync/trunk/src/zope/fssync/metadata.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/metadata.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/metadata.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -85,6 +85,11 @@
else:
raise IOError(tuple(errors))
+ def added(self):
+ """Adds an 'added' flag to all metadata entries."""
+ for dirinfo in self.cache.itervalues():
+ for entry in dirinfo.entries.itervalues():
+ entry["flag"] = "added"
class DirectoryManager(object):
def __init__(self, dir):
Deleted: zope.fssync/trunk/src/zope/fssync/passwd.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/passwd.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/passwd.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -1,196 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Password manager for fssync clients.
-
-$Id$
-"""
-
-import base64
-import httplib
-import os
-
-from cStringIO import StringIO
-
-
-DEFAULT_FILENAME = os.path.expanduser(os.path.join("~", ".zsyncpass"))
-
-
-class PasswordManager(object):
- """Manager for a cache of basic authentication tokens for zsync.
-
- This stores tokens in a file, and allows them to be retrieved by
- the zsync application. The tokens are stored in their 'cooked'
- form, so while someone could easily decode them or use them to
- make requests, the casual reader won't be able to use them easily.
-
- The cache file is created with restricted permissions, so other
- users should not be able to read it unless the permissions are
- modified.
- """
-
- def __init__(self, filename=None):
- if not filename:
- filename = DEFAULT_FILENAME
- self.authfile = filename
-
- def getPassword(self, user, host_port):
- """Read a password from the user."""
- import getpass
- prompt = "Password for %s at %s: " % (user, host_port)
- return getpass.getpass(prompt)
-
- def createToken(self, user_passwd):
- """Generate a basic authentication token from 'user:password'."""
- return base64.encodestring(user_passwd).strip()
-
- def getToken(self, scheme, host_port, user):
- """Get an authentication token for the user for a specific server.
-
- If a corresponding token exists in the cache, that is retured,
- otherwise the user is prompted for their password and a new
- token is generated. A new token is not automatically stored
- in the cache.
- """
- host_port = _normalize_host(scheme, host_port)
- prefix = [scheme, host_port, user]
-
- if os.path.exists(self.authfile):
- f = open(self.authfile, "r")
- try:
- for line in f:
- line = line.strip()
- if line[:1] in ("#", ""):
- continue
- parts = line.split()
- if parts[:3] == prefix:
- return parts[3]
- finally:
- f.close()
-
- # not in ~/.zsyncpass
- pw = self.getPassword(user, host_port)
- user_passwd = "%s:%s" % (user, pw)
- return self.createToken(user_passwd)
-
- def addToken(self, scheme, host_port, user, token):
- """Add a token to the persistent cache.
-
- If a corresponding token already exists in the cache, it is
- replaced.
- """
- host_port = _normalize_host(scheme, host_port)
- record = "%s %s %s %s\n" % (scheme, host_port, user, token)
-
- if os.path.exists(self.authfile):
- prefix = [scheme, host_port, user]
- f = open(self.authfile)
- sio = StringIO()
- found = False
- for line in f:
- parts = line.split()
- if parts[:3] == prefix:
- sio.write(record)
- found = True
- else:
- sio.write(line)
- f.close()
- if not found:
- sio.write(record)
- text = sio.getvalue()
- else:
- text = record
- f = self.createAuthFile()
- f.write(text)
- f.close()
-
- def removeToken(self, scheme, host_port, user):
- """Remove a token from the authentication database.
-
- Returns True if a token was found and removed, or False if no
- matching token was found.
-
- If the resulting cache file contains only blank lines, it is
- removed.
- """
- if not os.path.exists(self.authfile):
- return False
- host_port = _normalize_host(scheme, host_port)
- prefix = [scheme, host_port, user]
- found = False
- sio = StringIO()
- f = open(self.authfile)
- nonblank = False
- for line in f:
- parts = line.split()
- if parts[:3] == prefix:
- found = True
- else:
- if line.strip():
- nonblank = True
- sio.write(line)
- f.close()
- if found:
- if nonblank:
- text = sio.getvalue()
- f = self.createAuthFile()
- f.write(text)
- f.close()
- else:
- # nothing left in the file but blank lines; remove it
- os.unlink(self.authfile)
- return found
-
- def createAuthFile(self):
- """Create the token cache file with the right permissions."""
- new = not os.path.exists(self.authfile)
- if os.name == "posix":
- old_umask = os.umask(0077)
- try:
- f = open(self.authfile, "w", 0600)
- finally:
- os.umask(old_umask)
- else:
- f = open(self.authfile, "w")
- if new:
- f.write(_NEW_FILE_HEADER)
- return f
-
-_NEW_FILE_HEADER = """\
-#
-# Stored authentication tokens for zsync.
-# Manipulate this data using the 'zsync login' and 'zsync logout';
-# read the zsync documentation for more information.
-#
-"""
-
-def _normalize_host(scheme, host_port):
- if scheme == "http":
- return _normalize_port(host_port, httplib.HTTP_PORT)
- elif scheme == "https":
- return _normalize_port(host_port, httplib.HTTPS_PORT)
- else:
- raise fsutil.Error("unsupported URL scheme: %r" % scheme)
-
-def _normalize_port(host_port, default_port):
- if ":" in host_port:
- host, port = host_port.split(":", 1)
- try:
- port = int(port)
- except ValueError:
- raise fsutil.Error("invalid port specification: %r" % port)
- if port <= 0:
- raise fsutil.Error("invalid port: %d" % port)
- if port == default_port:
- host_port = host
- return host_port.lower()
Added: zope.fssync/trunk/src/zope/fssync/pickle.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/pickle.py (rev 0)
+++ zope.fssync/trunk/src/zope/fssync/pickle.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -0,0 +1,296 @@
+##############################################################################
+#
+# Copyright (c) 2007 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Pickle support functions for fssync.
+
+Defines a standard pickle format and an XML variant thereof.
+The pickles preserve persistent references.
+
+The default implementation generate pickles that understand their location in
+the object tree without causing the entire tree to be stored in the
+pickle. Persistent objects stored inside the outermost object are
+stored entirely in the pickle, and objects stored outside by outermost
+object but referenced from within are stored as persistent references.
+The parent of the outermost object is treated specially so that the
+pickle can be 'unpacked' with a new parent to create a copy in the new
+location; unpacking a pickle containing a parent reference requires
+passing an object to use as the parent as the second argument to the
+`loads()` function. The name of the outermost object is not stored in
+the pickle unless it is stored in the object.
+
+>>> from zope.location.tests import TLocation
+>>> root = TLocation()
+>>> interface.directlyProvides(root, IContainmentRoot)
+>>> o1 = DataLocation('o1', root, 12)
+>>> o2 = DataLocation('o2', root, 24)
+>>> o3 = DataLocation('o3', o1, 36)
+>>> o4 = DataLocation('o4', o3, 48)
+>>> o1.foo = o2
+
+We must register the path id generator and loader:
+
+>>> component.provideAdapter(PathPersistentIdGenerator)
+>>> component.provideAdapter(PathPersistentLoader)
+
+>>> s = StandardPickler(o1).dumps()
+>>> unpickler = StandardUnpickler(o1.__parent__)
+>>> c1 = unpickler.loads(s)
+>>> c1 is not o1
+1
+>>> c1.data == o1.data
+1
+>>> c1.__parent__ is o1.__parent__
+1
+>>> c1.foo is o2
+1
+>>> c3 = c1.o3
+>>> c3 is o3
+0
+>>> c3.__parent__ is c1
+1
+>>> c3.data == o3.data
+1
+>>> c4 = c3.o4
+>>> c4 is o4
+0
+>>> c4.data == o4.data
+1
+>>> c4.__parent__ is c3
+1
+
+See README.txt for an example how to replace paths by different ids.
+
+
+$Id: pickle.py 73003 2007-03-06 10:34:19Z oestermeier $
+"""
+__docformat__ = 'restructuredtext'
+
+from cStringIO import StringIO
+from cPickle import Unpickler
+
+from zope import component
+from zope import interface
+from zope import traversing
+from zope import location
+
+from zope.location.interfaces import ILocation
+from zope.location.traversing import LocationPhysicallyLocatable
+from zope.location.tests import TLocation
+from zope.traversing.interfaces import ITraverser
+from zope.traversing.interfaces import IContainmentRoot
+
+from zope.xmlpickle import xmlpickle
+
+import interfaces
+
+PARENT_MARKER = ".."
+
+def getPath(obj):
+ path = LocationPhysicallyLocatable(obj).getPath()
+ return path.encode('utf-8')
+
+class PathPersistentIdGenerator(object):
+ """Uses traversal paths as persistent ids.
+
+ >>> from zope.location.tests import TLocation
+ >>> root = TLocation()
+ >>> interface.directlyProvides(root, IContainmentRoot)
+ >>> o1 = TLocation(); o1.__parent__ = root; o1.__name__ = 'o1'
+ >>> o2 = TLocation(); o2.__parent__ = root; o2.__name__ = 'o2'
+ >>> o3 = TLocation(); o3.__parent__ = o1; o3.__name__ = 'o3'
+ >>> root.o1 = o1
+ >>> root.o2 = o2
+ >>> o1.foo = o2
+ >>> o1.o3 = o3
+
+ >>> gen = PathPersistentIdGenerator(StandardPickler(o1))
+ >>> gen.id(root)
+ '..'
+ >>> gen.id(o2)
+ '/o2'
+ >>> gen.id(o3)
+ >>> gen.id(o1)
+
+ >>> gen = PathPersistentIdGenerator(StandardPickler(o3))
+ >>> gen.id(root)
+ '/'
+
+ """
+
+ interface.implements(interfaces.IPersistentIdGenerator)
+ component.adapts(interfaces.IPickler)
+
+ root = None
+
+ def __init__(self, pickler):
+
+ self.pickler = pickler
+ top = self.location = pickler.context
+ self.parent = getattr(top, "__parent__", None)
+ if ILocation.providedBy(top):
+ try:
+ self.root = LocationPhysicallyLocatable(top).getRoot()
+ except TypeError:
+ pass
+
+ def id(self, object):
+ if self.parent is None:
+ return None
+
+ if ILocation.providedBy(object):
+ if location.inside(object, self.location):
+ return None
+ elif object is self.parent:
+ # emit special parent marker
+ return PARENT_MARKER
+ elif location.inside(object, self.root):
+ return getPath(object)
+ elif object.__parent__ is None:
+ return None
+ raise ValueError(
+ "object implementing ILocation found outside tree")
+ else:
+ return None
+
+
+class PathPersistentLoader(object):
+ """Loads objects from paths.
+
+ Uses path traversal if the context of the adapted
+ Unpickler is locatable."""
+
+ interface.implements(interfaces.IPersistentIdLoader)
+ component.adapts(interfaces.IUnpickler)
+
+ def __init__(self, unpickler):
+ context = self.parent = unpickler.context
+ self.root = None
+ if ILocation.providedBy(context):
+ locatable = LocationPhysicallyLocatable(context)
+ __traceback_info__ = (context, locatable)
+ try:
+ self.root = locatable.getRoot()
+ except TypeError:
+ pass
+ if self.root is not None:
+ traverser = traversing.interfaces.ITraverser(self.root)
+ self.traverse = traverser.traverse
+
+ def load(self, path):
+ """Loads the object.
+
+ Returns the context of the adapted Unpickler if a PARENT_MARKER
+ is found.
+ """
+
+ if path == PARENT_MARKER:
+ return self.parent
+ if path[:1] == "/":
+ # outside object:
+ if path == "/":
+ return self.root
+ else:
+ return self.traverse(path[1:])
+ raise ValueError("unknown persistent object reference: %r" % path)
+
+
+class StandardPickler(object):
+ """A pickler that uses the standard pickle format.
+
+ Calls an IPersistentIdGenerator multi adapter.
+
+ Uses the _PicklerThatSortsDictItems to ensure that pickles
+ are repeatable.
+ """
+
+ interface.implements(interfaces.IPickler)
+ component.adapts(interface.Interface)
+
+ def __init__(self, context):
+ self.context = context
+
+ def dump(self, writeable):
+ pickler = xmlpickle._PicklerThatSortsDictItems(writeable, 0)
+ generator = interfaces.IPersistentIdGenerator(self, None)
+ if generator is not None:
+ pickler.persistent_id = generator.id
+ pickler.dump(self.context)
+
+ def dumps(self):
+ stream = StringIO()
+ self.dump(stream)
+ return stream.getvalue()
+
+
+class StandardUnpickler(object):
+ """An unpickler for a standard pickle format.
+
+ Calls an IPersistentIdLoader multi adapter.
+ """
+
+ interface.implements(interfaces.IUnpickler)
+ component.adapts(interface.Interface)
+
+ def __init__(self, context):
+ self.context = context
+
+ def load(self, readable):
+ unpickler = Unpickler(readable)
+ loader = interfaces.IPersistentIdLoader(self, None)
+ if loader is not None:
+ unpickler.persistent_load = loader.load
+ return unpickler.load()
+
+ def loads(self, pickle):
+ return self.load(StringIO(pickle))
+
+
+class XMLPickler(StandardPickler):
+ """A pickler that uses a XML format.
+
+ The current implementation assumes that the pickle can be
+ hold in memory completely.
+ """
+
+ interface.implements(interfaces.IPickler)
+ component.adapts(interface.Interface)
+
+ def dump(self, writeable):
+ stream = StringIO()
+ super(XMLPickler, self).dump(stream)
+ p = stream.getvalue()
+ writeable.write(xmlpickle.toxml(p))
+
+
+class XMLUnpickler(StandardUnpickler):
+ """A pickler that uses a XML format.
+
+ The current implementation assumes that the pickle can be
+ hold in memory completely.
+ """
+
+ def load(self, readable):
+ pickle = xmlpickle.fromxml(readable.read())
+ return super(XMLUnpickler, self).load(StringIO(pickle))
+
+
+class DataLocation(TLocation):
+ """Sample data container class used in doctests."""
+
+ def __init__(self, name, parent, data):
+ self.__name__ = name
+ self.__parent__ = parent
+ if parent is not None:
+ setattr(parent, name, self)
+ self.data = data
+ super(DataLocation, self).__init__()
Added: zope.fssync/trunk/src/zope/fssync/repository.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/repository.py (rev 0)
+++ zope.fssync/trunk/src/zope/fssync/repository.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -0,0 +1,424 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Repositories for serialized data.
+
+$Id: repository.py 73003 2007-03-06 10:34:19Z oestermeier $
+"""
+import os, copy
+import os
+import sys
+import unicodedata
+
+import zope.interface
+
+import metadata
+import interfaces
+
+
+unwanted = ("", os.curdir, os.pardir)
+
+class Repository(object):
+ """Represents a repository that uses filepaths.
+
+ Possible examples are filesystems, SNARF-archives,
+ zip-archives, svn-repository, etc.
+
+ This base class also handles case insensitive filenames
+ and provides methods to resolve ambiguities.
+ """
+
+ zope.interface.implements(interfaces.IRepository)
+
+ chunk_size = 32768
+
+ def __init__(self, case_insensitive=False, enforce_nfd=False, metadata=None):
+ self.case_insensitive = case_insensitive
+ self.enforce_nfd = enforce_nfd
+ self.files = {} # Keeps references to normalized filenames
+ # which have been used
+ self.disambiguated = {} # reserved disambiguated paths
+ self.metadata = metadata
+
+ def getMetadata(self):
+ """Returns a metadata database.
+
+ This implementation returns an empty database
+ which reads the metadata on demand.
+ """
+ if self.metadata is None:
+ return metadata.Metadata()
+ return self.metadata
+
+ def disambiguate(self, dirpath, name):
+ """Disambiguates a name in a directory.
+
+ Adds a number to the file and leaves the case untouched.
+ """
+ if self.case_insensitive or self.enforce_nfd:
+ count = 1
+ if self.enforce_nfd:
+ dirpath = self._toNFD(dirpath)
+ name = self._toNFD(name)
+ disambiguated = self.disambiguated.setdefault(dirpath, set())
+ dot = name.rfind('.')
+ if dot >= 0:
+ suffix = name[dot:]
+ name = name[:dot]
+ else:
+ suffix = ''
+ n = name + suffix
+ normalized = self.normalize(n)
+ i = 1
+ while normalized in disambiguated:
+ n = name + '-' + str(i) + suffix
+ normalized = self.normalize(n)
+ i += 1
+ disambiguated.add(normalized)
+ return n
+ return name
+
+ def dirname(self, path):
+ """Returns the dirname."""
+ return os.path.dirname(path)
+
+ def join(self, path, *names):
+ """Returns a joined path."""
+ return os.path.join(path, *names)
+
+ def _toNFD(self, name):
+ """Helper to ensure NFD encoding.
+
+ Linux and (most?) other Unix-like operating systems use the normalization
+ form C (NFC) for UTF-8 encoding by default but do not enforce this.
+ Darwin, the base of Macintosh OSX, enforces normalization form D (NFD),
+ where a few characters are encoded in a different way.
+ """
+ if isinstance(name, unicode):
+ name = unicodedata.normalize("NFD", name)
+ elif sys.getfilesystemencoding() == 'utf-8':
+ name = unicode(name, encoding='utf-8')
+ name = unicodedata.normalize("NFD", name)
+ name = name.encode('utf-8')
+ return name
+
+ def _toNFC(self, name):
+ """Helper to ensure NFC encoding.
+
+ Linux and (most?) other Unix-like operating systems use the normalization
+ form C (NFC) for UTF-8 encoding by default but do not enforce this.
+ Darwin, the base of Macintosh OSX, enforces normalization form D (NFD),
+ where a few characters are encoded in a different way.
+ """
+ if isinstance(name, unicode):
+ name = unicodedata.normalize("NFC", name)
+ elif sys.getfilesystemencoding() == 'utf-8':
+ name = unicode(name, encoding='utf-8')
+ name = unicodedata.normalize("NFC", name)
+ name = name.encode('utf-8')
+ return name
+
+ def normalize(self, name):
+ """Normalize a filename.
+
+ Uses lower case filenames if the repository is case sensitive.
+ """
+ if self.enforce_nfd:
+ name = self._toNFD(name)
+ if self.case_insensitive:
+ name = name.lower()
+ return name
+
+ def encode(self, path, encoding=None):
+ """Encodes a path in its normalized form.
+
+ Uses the filesystem encoding as a default encoding. Assumes that the given path
+ is also encoded in the filesystem encoding.
+ """
+ fsencoding = sys.getfilesystemencoding()
+ if encoding is None:
+ encoding = fsencoding
+ if isinstance(path, unicode):
+ return normalize(path).encode(encoding)
+ return unicode(path, encoding=fsencoding).encode(encoding)
+
+ def writeable(self, path):
+ """Must be overwritten.
+ """
+ pass
+
+ def readable(self, path):
+ """Must be overwritten."""
+ pass
+
+ def readFile(self, path):
+ """Convenient method for reading a whole file."""
+ fp = self.readable(path)
+ try:
+ data = fp.read()
+ return data
+ finally:
+ fp.close()
+
+ def compare(self, readable1, readable2):
+ if readable1 is None:
+ return False
+ if readable2 is None:
+ return False
+ try:
+ for chunk in readable1.read(self.chunk_size):
+ size = len(chunk)
+ echo = readable2.read(size)
+ if echo != chunk:
+ return False
+ return True
+ finally:
+ readable1.close()
+ readable1.close()
+
+
+class FileSystemRepository(Repository):
+ """A filesystem repository that keeps track of already written files."""
+
+ zope.interface.implements(interfaces.IFileSystemRepository)
+
+ def exists(self, path):
+ """Returns a joined path."""
+ return os.path.exists(path)
+
+ def isdir(self, path):
+ """Checks whether the path corresponds to a directory."""
+ return os.path.isdir(path)
+
+ def readable(self, path):
+ """Returns a file like object that is open for read operations."""
+ fp = self.files[path] = file(path, 'rb')
+ return fp
+
+ def writeable(self, path):
+ """Returns a file like object that open for write operations.
+ """
+ dirname = self.dirname(path)
+ self.ensuredir(dirname)
+ fp = self.files[path] = file(path, 'wb')
+ return fp
+
+ def split(path):
+ """Split a path, making sure that the tail returned is real."""
+ head, tail = os.path.split(path)
+ if tail in unwanted:
+ newpath = os.path.normpath(path)
+ head, tail = os.path.split(newpath)
+ if tail in unwanted:
+ newpath = os.path.realpath(path)
+ head, tail = os.path.split(newpath)
+ if head == newpath or tail in unwanted:
+ raise Error("path '%s' is the filesystem root", path)
+ if not head:
+ head = os.curdir
+ return head, tail
+
+ def ensuredir(self, path):
+ """Make sure that the given path is a directory, creating it if necessary.
+
+ This may raise OSError if the creation operation fails.
+ """
+ if not os.path.isdir(path):
+ os.makedirs(path)
+
+
+class SnarfMetadata(metadata.Metadata):
+ """A metadata implementation that reads the metadata from a SNARF archive."""
+
+ def __init__(self, repository):
+ super(SnarfMetadata, self).__init__()
+ self.repository = repository
+ if not repository.stream:
+ return
+ for path in repository.iterPaths():
+ if path.endswith(repository.join('@@Zope', 'Entries.xml')):
+ dm = metadata.DirectoryManager.__new__(metadata.DirectoryManager)
+ dm.zdir = repository.dirname(path)
+ dm.efile = path
+ text = repository.readFile(path)
+ dm.entries = metadata.load_entries(text)
+ dm.originals = copy.deepcopy(dm.entries)
+ key = repository.dirname(dm.zdir)
+ self.cache[key] = dm
+
+ def getentry(self, file):
+ """Return the metadata entry for a given file (or directory).
+
+ Modifying the dict that is returned will cause the changes to
+ the metadata to be written out when flush() is called. If
+ there is no metadata entry for the file, return a new empty
+ dict, modifications to which will also be flushed.
+ """
+ dir, base = self.repository.split(file)
+ return self.getmanager(dir).getentry(base)
+
+ def getmanager(self, dir):
+ if dir not in self.cache:
+ self.cache[dir] = metadata.DirectoryManager(dir)
+ return self.cache[dir]
+
+
+class SnarfReadable(object):
+ """Mimics read access to a serialized SNARF file."""
+
+ def __init__(self, repository, path):
+ self.stream = repository.stream
+ self.startpos = repository.readpos[path]
+ self.size = repository.sizes[path]
+ self.name = path
+
+ def seek(self, offset, whence=0):
+ if whence == 0:
+ self.stream.seek(self.startpos + offset)
+ return offset
+ elif whence == 1:
+ return self.seek(self.tell() + offset)
+ elif whence == 2:
+ return self.seek(self.size + offset)
+
+ def tell(self):
+ return self.stream.tell() - self.startpos
+
+ def readline(self):
+ self.stream.readline()
+
+ def read(self, bytes=None):
+ if bytes is None or bytes is -1:
+ rest = self.size - self.tell()
+ return self.stream.read(rest)
+ rest = self.size - self.tell()
+ if bytes > rest:
+ return self.stream.read(rest)
+ return self.stream.read(bytes)
+
+ def close(self):
+ pass
+
+
+class SnarfWriteable(object):
+ """Mimics write access to a SNARF archive."""
+
+ def __init__(self, repository, path, pos):
+ stream = self.stream = repository.stream
+ self.name = path
+ self.pos = pos # pos of dummy length
+ self.start = stream.tell()
+ self.format = repository.len_format
+
+ def write(self, data):
+ self.stream.write(data)
+ self.stream.flush()
+
+ def close(self):
+ self.stream.flush()
+ pos = self.stream.tell()
+ size = pos - self.start
+ self.stream.seek(self.pos)
+ self.stream.write(self.format % size)
+ self.stream.seek(pos)
+
+
+class SnarfRepository(FileSystemRepository):
+ """A SNARF repository that stores a directory tree in a single archive."""
+
+ zope.interface.implements(interfaces.IArchiveRepository)
+
+ len_format = '%08d' # format of file length indicator
+
+ def __init__(self, stream, case_insensitive=False, enforce_nfd=False):
+ super(SnarfRepository, self).__init__(case_insensitive=case_insensitive,
+ enforce_nfd=enforce_nfd)
+ self.stream = stream
+ self.readpos = {}
+ self.sizes = {}
+ self.directories = set()
+
+ def getMetadata(self):
+ """Returns a special metadata database which reads directly
+ from the SNARF archive."""
+ return SnarfMetadata(self)
+
+ def isdir(self, path):
+ """Returns True iff the path matches a directory name.
+
+ Since SNARF refers only implicitely to dirnames all filenames
+ are scanned in the worst case.
+ """
+ if path in self.files or path in self.readpos:
+ return False
+ return path in self.directories
+
+ def ensuredir(self, path):
+ """Does nothing since a snarf treats directories implicitely."""
+ pass
+
+ def split(self, path):
+ return os.path.split(path)
+
+ def writeable(self, path):
+ """Returns a file like object that is open for write operations.
+
+ Writes a dummy length indicator and a path. The returned
+ SnarfWriteable overwrites the length indicator on close.
+ """
+ pos = self.stream.tell()
+ dummy = self.len_format % 0
+ self.stream.write("%s %s\n" % (dummy, path.encode('utf-8')))
+ fp = self.files[path] = SnarfWriteable(self, path, pos)
+ return fp
+
+ def readable(self, path):
+ fp = self.files[path] = SnarfReadable(self, path)
+ fp.seek(0)
+ return fp
+
+ def exists(self, path):
+ if not self.readpos:
+ self._scan()
+ return path in self.readpos or path in self.directories
+
+ def iterPaths(self):
+ if not self.readpos:
+ self._scan()
+ return self.readpos.iterkeys()
+
+ def readFile(self, path):
+ self.stream.seek(self.readpos[path])
+ return self.stream.read(self.sizes[path])
+
+ def _scan(self):
+ """Scans the archive and reads all positions of files into a cache."""
+
+ self.stream.seek(0)
+ self.readpos = {}
+ pos = 0
+ while True:
+ infoline = self.stream.readline()
+ if not infoline:
+ break
+ if not infoline.endswith("\n"):
+ raise IOError("incomplete info line %r" % infoline)
+ offset = len(infoline)
+ infoline = infoline[:-1]
+ sizestr, path = infoline.split(" ", 1)
+ size = int(sizestr)
+ self.sizes[path] = size
+ pos = self.readpos[path] = pos + offset
+ self.directories.add(self.dirname(path))
+ pos += size
+ self.stream.seek(pos)
Modified: zope.fssync/trunk/src/zope/fssync/snarf.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/snarf.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/snarf.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -60,7 +60,7 @@
if filter is None:
def filter(fspath):
return True
- names = fsutil.listdir(root)
+ names = os.listdir(root)
names.sort()
for name in names:
fspath = os.path.join(root, name)
Added: zope.fssync/trunk/src/zope/fssync/synchronizer.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/synchronizer.py (rev 0)
+++ zope.fssync/trunk/src/zope/fssync/synchronizer.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -0,0 +1,282 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Interfaces for filesystem synchronization.
+
+$Id: interfaces.py 73003 2007-03-06 10:34:19Z oestermeier $
+"""
+
+from zope import interface
+from zope import component
+from zope import annotation
+from zope import filerepresentation
+from zope import lifecycleevent
+
+from zope.dottedname.resolve import resolve
+from zope.filerepresentation.interfaces import IFileFactory
+from zope.filerepresentation.interfaces import IDirectoryFactory
+
+import metadata
+import interfaces
+
+def dottedname(klass):
+ return "%s.%s" % (klass.__module__, klass.__name__)
+
+
+class MissingSynchronizer(Exception):
+ pass
+
+
+class Extras(dict):
+ """A serializable mapping of object attributes."""
+
+ interface.implements(interfaces.ISynchronizableExtras)
+
+
+class SynchronizableAnnotations(dict):
+ """A serializable mapping of annotations."""
+
+ interface.implements(interfaces.ISynchronizableAnnotations)
+ component.adapts(annotation.interfaces.IAnnotations)
+
+ def modify(self, target):
+ """Transfers the namespaces to the target annotations.
+
+ Returns a lifecycleevent.interfaces.ISequence modification
+ descriptor or None if nothing changed.
+ """
+ target.update(self)
+ modified = []
+ for key, value in self.items():
+ old = interfaces.IPickler(target.get(key)).dumps()
+ target[key] = value
+ if old != interfaces.IPickler(value).dumps():
+ modified.append(key)
+ if modified:
+ return lifecycleevent.Sequence(
+ annotation.interfaces.IAnnotations,
+ modified)
+ return None
+
+class Synchronizer(object):
+ """A convenient base class for serializers."""
+
+ interface.implements(interfaces.ISynchronizer)
+
+ def __init__(self, context):
+ self.context = context
+
+ def getObject(self):
+ return self.context
+
+ def metadata(self):
+ """Returns a mapping for the metadata entries."""
+ result = dict(factory=dottedname(self.context.__class__))
+ ifaces = interface.directlyProvidedBy(self.context)
+ if ifaces:
+ result['provides'] = ' '.join([dottedname(i) for i in ifaces])
+ return result
+
+ def setmetadata(self, metadata):
+ """Loads metadata from a dict.
+
+ Specializations should return an IModificationDescription
+ if a ModifiedEvent should be thrown.
+ """
+ provides = metadata.get('provides')
+ if provides:
+ for dottedname in provides.split():
+ iface = resolve(dottedname)
+ interface.alsoProvides(self.context, iface)
+ return None
+
+ def extras(self):
+ return None
+
+ def annotations(self):
+ ann = annotation.interfaces.IAnnotations(self.context, None)
+ if ann is not None:
+ return interfaces.ISynchronizableAnnotations(ann, None)
+
+ def setannotations(self, annotations):
+ """Consumes de-serialized annotations."""
+ ann = annotation.interfaces.IAnnotations(self.context, None)
+ if ann is not None:
+ sann = interfaces.ISynchronizableAnnotations(annotations, None)
+ if sann is not None:
+ return sann.modify(ann)
+
+ def setextras(self, extras):
+ """Consumes de-serialized extra attributes.
+
+ Returns an unspecific IModificationDescription.
+ Application specific adapters may provide more informative
+ descriptors.
+ """
+
+ modified = []
+ for key, value in extras.iteritems():
+ if hasattr(self.context, key):
+ if getattr(self.context, key) != value:
+ modified.append(key)
+ setattr(self.context, key, value)
+ if modified:
+ return lifecycleevent.Attributes(None, modified)
+ return None
+
+
+class FileSynchronizer(Synchronizer):
+ """A convenient base class for file serializers."""
+
+ interface.implements(interfaces.IFileSynchronizer)
+
+ def dump(self, writeable):
+ pass
+
+ def load(self, readable):
+ pass
+
+
+class DefaultSynchronizer(FileSynchronizer):
+ """A synchronizer that stores an object as an xml pickle."""
+
+ interface.implements(interfaces.IDefaultSynchronizer)
+
+ def __init__(self, context):
+ self.context = context
+
+ def metadata(self):
+ """Returns None.
+
+ A missing factory indicates that the object has
+ has to be unpickled.
+ """
+ return None
+
+ def extras(self):
+ """Returns None.
+
+ A pickle is self contained."""
+ return None
+
+ def annotations(self):
+ """Returns None.
+
+ The annotations are already stored in the pickle.
+ This is only the right thing if the annotations are
+ stored in the object's attributes (such as IAttributeAnnotatable);
+ if that's not the case, then either this method needs to be
+ overridden or this class shouldn't be used."""
+ return None
+
+ def dump(self, writeable):
+ """Dumps the xml pickle."""
+ interfaces.IPickler(self.context).dump(writeable)
+
+ def load(self, readable):
+ raise NotImplementedError
+
+
+class DirectorySynchronizer(Synchronizer):
+ """A serializer that stores objects as directory-like objects.
+ """
+ interface.implements(interfaces.IDirectorySynchronizer)
+
+ def __getitem__(self, name):
+ """Traverses the name in the given context.
+ """
+ return self.context[name]
+
+ def iteritems(self):
+ return self.context.items()
+
+ def update(self, items):
+ """Updates the context."""
+ self.context.update(items)
+
+ def __setitem__(self, name, obj):
+ """Sets the item."""
+ self.context[name] = obj
+
+ def __delitem__(self, name):
+ """Deletes ths item."""
+ del self.context[name]
+
+
+class FileGenerator(object):
+ """A generator that creates file-like objects
+ from a serialized representation.
+
+ Should be registered as the IFileGenerator utility
+ and be used if no other class-based serializer can be found.
+ """
+
+ interface.implements(interfaces.IFileGenerator)
+
+ def create(self, location, name, extension):
+ """Creates a file.
+
+ This implementation uses the registered zope.filerepresentation adapters.
+ """
+ factory = component.queryAdapter(location, IFileFactory, extension)
+ if factory is None:
+ factory = IFileFactory(location, None)
+ if factory is not None:
+ return factory(name, None, '')
+
+ def load(self, obj, readable):
+ obj.data = readable.read()
+
+class DirectoryGenerator(object):
+ """A generator that creates a directory-like object
+ from a serialized representation.
+
+ Should be registered as the IDirectoryGenerator utility
+ and be used if no other class-based serializer can be found.
+ """
+
+ interface.implements(interfaces.IDirectoryGenerator)
+
+ def create(self, location, name):
+ """Creates a directory like object.
+
+ This implementation uses the registered zope.filerepresentation adapters.
+ """
+
+ factory = component.queryAdapter(location, IDirectoryFactory)
+ if factory is None:
+ factory = IDirectoryFactory(location, None)
+ if factory is not None:
+ return factory(name)
+
+
+def getSynchronizer(obj, raise_error=False):
+ """Looks up a synchronizer.
+
+ Sometimes no serializer might be defined or sometimes access
+ to a serializer may be forbidden. We return None in those cases.
+
+ Those cases may be unexpected and it may be a problem that
+ the data are not completely serialized. If raise_error is True
+ we raise a MissingSerializer in those cases.
+ """
+ dn = dottedname(obj.__class__)
+ factory = component.queryUtility(interfaces.ISynchronizerFactory, name=dn)
+ if factory is None:
+ factory = component.queryUtility(interfaces.ISynchronizerFactory)
+ if factory is None:
+ if raise_error:
+ raise MissingSerializer(dn)
+ return None
+ return factory(obj)
+
Added: zope.fssync/trunk/src/zope/fssync/task.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/task.py (rev 0)
+++ zope.fssync/trunk/src/zope/fssync/task.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -0,0 +1,698 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Synchronize content objects with a repository.
+
+$Id: syncer.py 73003 2007-03-06 10:34:19Z oestermeier $
+"""
+import os
+
+import zope.interface
+import zope.component
+import zope.traversing.api
+import zope.event
+import zope.lifecycleevent
+import zope.dottedname.resolve
+
+import metadata
+import interfaces
+import synchronizer
+import repository
+import fsutil
+import pickle
+
+from synchronizer import dottedname
+
+class SynchronizationError(Exception):
+ pass
+
+ at zope.component.adapter(zope.interface.Interface)
+ at zope.interface.implementer(interfaces.IEntryId)
+def EntryId(obj):
+ try:
+ path = zope.traversing.api.getPath(obj)
+ return path.encode('utf-8')
+ except (TypeError, KeyError, AttributeError):
+ # this case can be triggered for persistent objects that don't
+ # have a name in the content space (annotations, extras)
+ return None
+
+
+class ObjectSynchronized(object):
+ """A default modification description for synchronized objects."""
+
+ zope.interface.implements(interfaces.IObjectSynchronized)
+
+class SyncTask(object):
+ """Convenient base class for synchronization tasks."""
+
+ def __init__(self, getSynchronizer,
+ repository,
+ context=None):
+ self.getSynchronizer = getSynchronizer
+ self.repository = repository
+ self.context = context
+
+class Checkout(SyncTask):
+ """Checkout of a content space into a repository."""
+
+ zope.interface.implements(interfaces.ICheckout)
+
+ def perform(self, ob, name, location=''):
+ """Check an object out.
+
+ ob -- The object to be checked out
+
+ name -- The name of the object
+
+ location -- The directory or path where the object will go
+ """
+ root = dict()
+ self.context = root[name] = ob
+ self.dump(synchronizer.DirectorySynchronizer(root), location)
+
+ def serializableItems(self, items, dirpath):
+ """Returns items which have synchronizer.
+
+ Returns a tuple of disambiguated name, original key, and synchronizer.
+ """
+ result = []
+ repository = self.repository
+ if items is not None:
+ for key, value in items:
+ synchronizer = self.getSynchronizer(value, raise_error=False)
+ if synchronizer is not None:
+ name = repository.disambiguate(dirpath, key)
+ result.append((name, key, synchronizer))
+ return sorted(result)
+
+ def dump(self, synchronizer, path):
+
+
+ if synchronizer is None:
+ return
+ if interfaces.IDirectorySynchronizer.providedBy(synchronizer):
+ items = self.serializableItems(synchronizer.iteritems(), path)
+ self.dumpSpecials(path, items)
+ for name, key, s in items: # recurse down the tree
+ self.dump(s, self.repository.join(path, name))
+ elif interfaces.IFileSynchronizer.providedBy(synchronizer):
+ fp = self.repository.writeable(path)
+ synchronizer.dump(fp)
+ fp.close()
+ else:
+ raise SynchronizationError("invalid synchronizer")
+
+ def dumpMetadata(self, epath, entries):
+ xml = metadata.dump_entries(entries)
+ fp = self.repository.writeable(epath)
+ fp.write(xml)
+ fp.close()
+
+ def dumpSpecials(self, path, items):
+ entries = {}
+ repository = self.repository
+ zdir = repository.join(path, '@@Zope')
+ epath = repository.join(zdir, 'Entries.xml')
+
+ for name, key, s in items:
+ obj = s.getObject()
+ entry = dict(type=typeIdentifier(obj))
+ metadata = s.metadata()
+ if metadata:
+ for k, v in metadata.items():
+ if v:
+ entry[k] = v
+ objid = getEntryId(obj)
+ if objid:
+ entry['id'] = str(objid)
+ if key != name:
+ entry['key'] = key
+ entry['keytype'] = dottedname(key.__class__)
+ entries[name] = entry
+
+ if path:
+ self.dumpMetadata(epath, entries)
+
+ adir = repository.join(zdir, 'Annotations')
+ for name, key, s in items:
+ dir = repository.join(adir, name)
+ annotations = s.annotations()
+ if annotations:
+ synchronizer = self.getSynchronizer(annotations)
+ if synchronizer is not None:
+ self.dump(synchronizer, dir)
+
+ edir = repository.join(zdir, 'Extra')
+ for name, key, s in items:
+ dir = repository.join(edir, name)
+ extras = s.extras()
+ if extras:
+ synchronizer = self.getSynchronizer(extras)
+ if synchronizer is not None:
+ self.dump(synchronizer, dir)
+
+ if not path:
+ self.dumpMetadata(epath, entries)
+
+
+class Commit(SyncTask):
+ """Commit changes from a repository to the object database.
+
+ The repository's originals must be consistent with the object
+ database; this should be checked beforehand by a `Check` instance
+ with the same arguments.
+ """
+
+ zope.interface.implements(interfaces.ICommit)
+
+ debug = False
+
+ def __init__(self, getSynchronizer, repository):
+ super(Commit, self).__init__(getSynchronizer, repository)
+ self.metadata = repository.getMetadata()
+
+ def perform(self, container, name, fspath):
+ self.synchronize(container, name, fspath)
+
+ def synchronize(self, container, name, fspath):
+ """Synchronize an object or object tree from a repository.
+
+ ``SynchronizationError`` is raised for errors that can't be
+ corrected by a update operation, including invalid object
+ names.
+ """
+ self.context = container
+ modifications = []
+ if invalidName(name):
+ raise SynchronizationError("invalid separator in name %r" % name)
+
+ if not name:
+ self.synchDirectory(container, fspath)
+ else:
+ synchronizer = self.getSynchronizer(container)
+ key = originalKey(fspath, name, self.metadata)
+ try:
+ traverseKey(container, key)
+ except:
+ self.synchNew(container, key, fspath)
+ else:
+ modified = self.synchOld(container, key, fspath)
+ if modified:
+ modifications.append(modified)
+ # Now update extra and annotations
+ try:
+ obj = traverseKey(container, key)
+ except:
+ pass
+ else:
+ metadata = self.metadata.getentry(fspath)
+ synchronizer = self.getSynchronizer(obj)
+ modified = synchronizer.setmetadata(metadata)
+ if modified:
+ modifications.append(modified)
+
+ extrapath = fsutil.getextra(fspath)
+ if self.repository.exists(extrapath):
+ extras = synchronizer.extras()
+ extras = self.synchSpecials(extrapath, extras)
+ modified = synchronizer.setextras(extras)
+ if modified:
+ modifications.append(modified)
+
+ annpath = fsutil.getannotations(fspath)
+ if self.repository.exists(annpath):
+ annotations = synchronizer.annotations()
+ annotations = self.synchSpecials(annpath, annotations)
+ modified = synchronizer.setannotations(annotations)
+ if modified:
+ modifications.append(modified)
+
+ if modifications:
+ zope.event.notify(zope.lifecycleevent.ObjectModifiedEvent(obj, *modifications))
+
+
+ def synchSpecials(self, fspath, specials):
+ """Synchronize an extra or annotation mapping."""
+ repository = self.repository
+ md = self.metadata.getmanager(fspath)
+ entries = md.entries
+ synchronizer = self.getSynchronizer(specials)
+ if interfaces.IDirectorySynchronizer.providedBy(synchronizer):
+ for name, entry in entries.items():
+ path = self.repository.join(fspath, name)
+ self.synchronize(specials, name, path)
+ else:
+ if interfaces.IDefaultSynchronizer.providedBy(synchronizer):
+ fp = self.repository.readable(fspath)
+ unpickler = interfaces.IUnpickler(self.context)
+ specials = unpickler.load(fp)
+ fp.close()
+ elif interfaces.IFileSynchronizer.providedBy(synchronizer):
+ fp = self.repository.readable(fspath)
+ synchronizer.load(fp)
+ fp.close()
+
+ return specials
+
+ def synchDirectory(self, container, fspath):
+ """Helper to synchronize a directory."""
+ adapter = self.getSynchronizer(container)
+ nameset = {}
+ if interfaces.IDirectorySynchronizer.providedBy(adapter):
+ for key, obj in adapter.iteritems():
+ nameset[key] = self.repository.join(fspath, key)
+ else:
+ # Annotations, Extra
+ for key in container:
+ nameset[key] = self.repository.join(fspath, key)
+ for name in self.metadata.getnames(fspath):
+ nameset[name] = self.repository.join(fspath, name)
+
+ # Sort the list of keys for repeatability
+ names_paths = nameset.items()
+ names_paths.sort()
+ subdirs = []
+ # Do the non-directories first.
+ # This ensures that the objects are created before dealing
+ # with Annotations/Extra for those objects.
+ for name, path in names_paths:
+ if self.repository.isdir(path):
+ subdirs.append((name, path))
+ else:
+ self.synchronize(container, name, path)
+ # Now do the directories
+ for name, path in subdirs:
+ self.synchronize(container, name, path)
+
+ def synchNew(self, container, name, fspath):
+ """Helper to synchronize a new object."""
+ entry = self.metadata.getentry(fspath)
+ if entry:
+ obj = self.createObject(container, name, entry, fspath)
+ synchronizer = self.getSynchronizer(obj)
+ if interfaces.IDirectorySynchronizer.providedBy(synchronizer):
+ self.synchDirectory(obj, fspath)
+
+ def synchOld(self, container, name, fspath):
+ """Helper to synchronize an existing object."""
+
+ modification = None
+ entry = self.metadata.getentry(fspath)
+ if entry.get("flag") == "removed":
+ self.deleteItem(container, name)
+ return
+ if not entry:
+ # This object was not included on the filesystem; skip it
+ return
+ cont = self.getSynchronizer(container)
+ key = originalKey(fspath, name, self.metadata)
+ obj = traverseKey(container, key)
+ synchronizer = self.getSynchronizer(obj)
+ if interfaces.IDirectorySynchronizer.providedBy(synchronizer):
+ self.synchDirectory(obj, fspath)
+ else:
+ type = entry.get("type")
+ if type and typeIdentifier(obj) != type:
+ self.createObject(container, key, entry, fspath, replace=True)
+ else:
+ original_fn = fsutil.getoriginal(fspath)
+ if self.repository.exists(original_fn):
+ original = self.repository.readable(original_fn)
+ replica = self.repository.readable(fspath)
+ new = not self.repository.compare(original, replica)
+ else:
+ # value appears to exist in the object tree, but
+ # may have been created as a side effect of an
+ # addition in the parent; this can easily happen
+ # in the extra or annotation data for an object
+ # copied from another using "zsync copy" (for
+ # example)
+ new = True
+ if new:
+ if not entry.get("factory"):
+ # If there's no factory, we can't call load
+ self.createObject(container, key, entry, fspath, True)
+ obj = traverseKey(container, key)
+ modification = ObjectSynchronized()
+ else:
+ fp = self.repository.readable(fspath)
+ modified = not compare(fp, synchronizer)
+ if modified:
+ fp.seek(0)
+ synchronizer.load(fp)
+ modification = ObjectSynchronized()
+ fp.close()
+ return modification
+
+ def createObject(self, container, name, entry, fspath, replace=False):
+ """Helper to create a deserialized object."""
+ factory_name = entry.get("factory")
+ type = entry.get("type")
+ isdir = self.repository.isdir(fspath)
+ added = False
+ if factory_name:
+ generator = zope.component.queryUtility(interfaces.IObjectGenerator,
+ name=factory_name)
+ if generator is not None:
+ obj = generator.create(container, name)
+ added = True
+ else:
+ try:
+ obj = resolveDottedname(factory_name)()
+ except TypeError:
+ raise fsutil.Error("Don't know how to create %s" % factory_name)
+ synchronizer = self.getSynchronizer(obj)
+ if interfaces.IDefaultSynchronizer.providedBy(synchronizer):
+ fp = self.repository.readable(fspath)
+ unpickler = interfaces.IUnpickler(self.context)
+ obj = unpickler.load(fp)
+ fp.close()
+ elif interfaces.IFileSynchronizer.providedBy(synchronizer):
+ fp = self.repository.readable(fspath)
+ synchronizer.load(fp)
+ fp.close()
+ elif type:
+ fp = self.repository.readable(fspath)
+ unpickler = interfaces.IUnpickler(self.context)
+ obj = unpickler.load(fp)
+ else:
+ if isdir:
+ generator = zope.component.queryUtility(interfaces.IDirectoryGenerator)
+ else:
+ generator = zope.component.queryUtility(interfaces.IFileGenerator)
+ isuffix = name.rfind(".")
+ if isuffix >= 0:
+ suffix = name[isuffix:]
+ else:
+ suffix = "."
+
+ if generator is None:
+ raise fsutil.Error("Don't know how to create object for %s" % fspath)
+
+ if isdir:
+ obj = generator.create(container, name)
+ else:
+ obj = generator.create(container, name, suffix)
+ fp = self.repository.readable(fspath)
+ if obj is None:
+ pickler = interfaces.IUnpickler(self.context)
+ obj = pickler.load(fp)
+ else:
+ generator.load(obj, fp)
+ fp.close()
+
+ if not added:
+ self.setItem(container, name, obj, replace)
+ return obj
+
+ def setItem(self, container, key, obj, replace=False):
+ """Helper to set an item in a container.
+
+ Uses the synchronizer for the container if a synchronizer is available.
+ """
+
+ dir = self.getSynchronizer(container)
+ if interfaces.IDirectorySynchronizer.providedBy(dir):
+ if not replace:
+ zope.event.notify(zope.lifecycleevent.ObjectCreatedEvent(obj))
+ if replace:
+ del dir[key]
+ dir[key] = obj
+ else:
+ container[key] = obj
+
+ def deleteItem(self, container, key):
+ """Helper to delete an item from a container.
+
+ Uses the synchronizer if possible.
+ """
+
+ dir = self.getSynchronizer(container)
+ if interfaces.IDirectorySynchronizer.providedBy(dir):
+ del dir[key]
+ else:
+ del container[key]
+
+
+class Checkin(Commit):
+
+ zope.interface.implements(interfaces.ICheckin)
+
+ def perform(self, container, name, fspath):
+ """Checkin a new object tree.
+
+ Raises a ``SynchronizationError`` if the name already exists
+ in the object database.
+ """
+
+ self.context = container # use container as context of reference
+ self.metadata.added()
+ try:
+ traverseKey(container, name)
+ except:
+ self.synchronize(container, name, fspath)
+ else:
+ raise SynchronizationError("object already exists %r" % name)
+
+
+class Check(SyncTask):
+ """Check that a repository is consistent with the object database.
+ """
+
+ zope.interface.implements(interfaces.ICheck)
+
+ def __init__(self, getSynchronizer, repository,
+ raise_on_conflicts=False):
+ super(Check, self).__init__(getSynchronizer, repository)
+ self.metadata = repository.getMetadata()
+ self.conflicts = []
+ self.raise_on_conflicts = raise_on_conflicts
+
+ def errors(self):
+ """Return a list of errors (conflicts).
+
+ The return value is a list of filesystem pathnames for which
+ a conflict exists. A conflict usually refers to a file that
+ was modified on the filesystem while the corresponding object
+ was also modified in the database. Other forms of conflicts
+ are possible, e.g. a file added while an object was added in
+ the corresponding place, or inconsistent labeling of the
+ filesystem objects (e.g. an existing file marked as removed,
+ or a non-existing file marked as added).
+ """
+ return self.conflicts
+
+ def conflict(self, fspath):
+ """Helper to report a conflict.
+
+ Conflicts can be retrieved by calling `errors()`.
+ """
+ if self.raise_on_conflicts:
+ raise SynchronizationError(fspath)
+ if fspath not in self.conflicts:
+ self.conflicts.append(fspath)
+
+ def check(self, container, name, fspath):
+ """Compare an object or object tree from the filesystem.
+
+ If the originals on the filesystem are not uptodate, errors
+ are reported by calling `conflict()`.
+
+ Invalid object names are reported by raising
+ ``SynchronizationError``.
+ """
+ self.context = container
+ if invalidName(name):
+ raise SynchronizationError("invalid separator in name %r" % name)
+
+ if not name:
+ self.checkDirectory(container, fspath)
+ else:
+ synchronizer = self.getSynchronizer(container)
+ key = originalKey(fspath, name, self.metadata)
+ try:
+ traverseKey(container, key)
+ except:
+ self.checkNew(fspath)
+ else:
+ self.checkOld(container, key, fspath)
+
+ # Now check extra and annotations
+ try:
+ obj = traverseKey(container, key)
+ except:
+ pass
+ else:
+ adapter = self.getSynchronizer(obj)
+ extras = adapter.extras()
+ extrapath = fsutil.getextra(fspath)
+ if extras and self.repository.exists(extrapath):
+ self.checkSpecials(extras, extrapath)
+
+ annotations = adapter.annotations()
+ annpath = fsutil.getannotations(fspath)
+ if annotations and self.repository.exists(annpath):
+ self.checkSpecials(annotations, annpath)
+
+ def checkSpecials(self, container, fspath):
+ """Helper to check a directory."""
+
+ nameset = {}
+ for key in container:
+ nameset[key] = 1
+ for name in self.metadata.getnames(fspath):
+ nameset[name] = 1
+ # Sort the list of keys for repeatability
+ names = nameset.keys()
+ names.sort()
+ for name in names:
+ self.check(container, name, self.repository.join(fspath, name))
+
+
+ def checkDirectory(self, container, fspath):
+ """Helper to check a directory."""
+ adapter = self.getSynchronizer(container)
+ nameset = {}
+ if interfaces.IDirectorySynchronizer.providedBy(adapter):
+ for key, obj in adapter.iteritems():
+ nameset[key] = 1
+ else:
+ for key in container:
+ nameset[key] = 1
+ for name in self.metadata.getnames(fspath):
+ nameset[name] = 1
+ # Sort the list of keys for repeatability
+ names = nameset.keys()
+ names.sort()
+ for name in names:
+ self.check(container, name, self.repository.join(fspath, name))
+
+ def checkNew(self, fspath):
+ """Helper to check a new object."""
+ entry = self.metadata.getentry(fspath)
+ if entry:
+ if entry.get("flag") != "added":
+ self.conflict(fspath)
+ else:
+ if not self.repository.exists(fspath):
+ self.conflict(fspath)
+ if self.repository.isdir(fspath):
+ # Recursively check registered contents
+ for name in self.metadata.getnames(fspath):
+ self.checkNew(self.repository.join(fspath, name))
+
+ def checkOld(self, container, name, fspath):
+ """Helper to check an existing object."""
+ entry = self.metadata.getentry(fspath)
+ if not entry:
+ self.conflict(fspath)
+ if "conflict" in entry:
+ self.conflict(fspath)
+ flag = entry.get("flag")
+ if flag == "removed":
+ if self.repository.exists(fspath):
+ self.conflict(fspath)
+ else:
+ if not self.repository.exists(fspath):
+ self.conflict(fspath)
+
+ synchronizer = self.getSynchronizer(container)
+ key = originalKey(fspath, name, self.metadata)
+ obj = traverseKey(container, key)
+ adapter = self.getSynchronizer(obj)
+ if interfaces.IDirectorySynchronizer.providedBy(adapter):
+ if flag != "removed" or self.repository.exists(fspath):
+ self.checkDirectory(obj, fspath)
+ else:
+ if flag == "added":
+ self.conflict(fspath)
+ oldfspath = fsutil.getoriginal(fspath)
+ if self.repository.exists(oldfspath):
+ cmppath = oldfspath
+ else:
+ cmppath = fspath
+
+ fp = self.repository.readable(cmppath)
+ if not compare(fp, adapter):
+ self.conflict(fspath)
+ fp.close()
+
+
+def getEntryId(obj):
+ """Shortcut for adapter lookup."""
+ return zope.component.queryAdapter(obj, interfaces.IEntryId)
+
+def invalidName(name):
+ return (os.sep in name or
+ (os.altsep and os.altsep in name) or
+ name == "." or
+ name == ".." or
+ "/" in name)
+
+def traverseKey(container, key):
+ return container[key]
+
+def typeIdentifier(obj):
+ return synchronizer.dottedname(obj.__class__)
+
+def originalKey(fspath, name, metadata):
+ """Reconstructs the original key from the metadata database."""
+ entry = metadata.getentry(fspath)
+ keytype = entry.get('keytype')
+ key = entry.get('key', name)
+ if keytype:
+ keytype = resolveDottedname(keytype)
+ if keytype == key.__class__:
+ return key
+ if keytype == unicode:
+ return unicode(name, encoding='utf-8')
+ return keytype(name)
+ return name
+
+def resolveDottedname(dottedname):
+ factory = zope.dottedname.resolve.resolve(dottedname)
+ if factory == eval:
+ raise TypeError('invalid factory type')
+ return factory
+
+
+def compare(readable, dumper):
+ """Help function for the comparison of a readable and a synchronizer.
+
+ Simulates a writeable that raises an exception if the serializer
+ dumps data which do not match the content of the readable.
+ """
+ class Failed(Exception):
+ pass
+ class Comparable(object):
+ def write(self, data):
+ echo = readable.read(len(data))
+ if echo != data:
+ raise Failed
+ try:
+ comparable = Comparable()
+ dumper.dump(comparable)
+ return readable.read() == ''
+ except Failed:
+ return False
+
+
+class ComparePickles(object):
+
+ def __init__(self, context, pickler):
+ self.context = context
+ self.pickler = pickler
+
+ def dump(self, writeable):
+ self.pickler.dump(self.context, writeable)
Modified: zope.fssync/trunk/src/zope/fssync/tests/mockmetadata.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/tests/mockmetadata.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/tests/mockmetadata.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -42,6 +42,9 @@
def flush(self):
pass
+ def added(self):
+ pass
+
# These only exist for the test framework
def makekey(self, path):
Deleted: zope.fssync/trunk/src/zope/fssync/tests/test_command.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/tests/test_command.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/tests/test_command.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -1,102 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests for yet another command-line handler.
-
-$Id$
-"""
-
-import sys
-import unittest
-
-from cStringIO import StringIO
-
-from zope.fssync import command
-
-
-class CommandTests(unittest.TestCase):
-
- def setUp(self):
- self.called = False
- self.old_stdout = sys.stdout
- self.old_stderr = sys.stderr
- self.new_stdout = StringIO()
- self.new_stderr = StringIO()
- sys.stdout = self.new_stdout
- sys.stderr = self.new_stderr
- self.cmd = command.Command("testcmd", "%(program)s msg")
-
- def tearDown(self):
- sys.stdout = self.old_stdout
- sys.stderr = self.old_stderr
-
- def test_no_command(self):
- self.assertRaises(command.Usage, self.cmd.realize, [])
-
- def test_unknown_command(self):
- self.assertRaises(command.Usage, self.cmd.realize, ["throb"])
-
- def test_global_help_short(self):
- self.assertRaises(SystemExit, self.cmd.realize, ["-h"])
- self.assert_(self.new_stdout.getvalue())
-
- def test_global_help_long(self):
- self.assertRaises(SystemExit, self.cmd.realize, ["--help"])
- self.assert_(self.new_stdout.getvalue())
-
- def test_calling_command(self):
- self.cmd.addCommand("throb", self.mycmd)
- self.cmd.realize(["throb"])
- self.cmd.run()
- self.assertEqual(self.opts, [])
- self.assertEqual(self.args, [])
-
- def mycmd(self, opts, args):
- """dummy help text"""
- self.called = True
- self.opts = opts
- self.args = args
-
- def test_calling_command_via_alias(self):
- self.cmd.addCommand("throb", self.mycmd, "x:y", "prev next",
- aliases="chunk thunk")
- self.cmd.realize(["thunk", "-yx", "42", "--", "-more", "args"])
- self.cmd.run()
- self.assertEqual(self.opts, [("-y", ""), ("-x", "42")])
- self.assertEqual(self.args, ["-more", "args"])
-
- def test_calling_command_with_args(self):
- self.cmd.addCommand("throb", self.mycmd, "x:", "spew")
- self.cmd.realize(["throb", "-x", "42", "--spew", "more", "args"])
- self.cmd.run()
- self.assertEqual(self.opts, [("-x", "42"), ("--spew", "")])
- self.assertEqual(self.args, ["more", "args"])
-
- def test_local_help_short(self):
- self.cmd.addCommand("throb", self.mycmd)
- self.assertRaises(SystemExit, self.cmd.realize, ["throb", "-h"])
- self.assert_(self.new_stdout.getvalue())
- self.assert_(not self.called)
-
- def test_local_help_long(self):
- self.cmd.addCommand("throb", self.mycmd)
- self.assertRaises(SystemExit, self.cmd.realize, ["throb", "--help"])
- self.assert_(self.new_stdout.getvalue())
- self.assert_(not self.called)
-
-
-def test_suite():
- return unittest.makeSuite(CommandTests)
-
-if __name__ == "__main__":
- unittest.main(defaultTest="test_suite")
Added: zope.fssync/trunk/src/zope/fssync/tests/test_docs.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/tests/test_docs.py (rev 0)
+++ zope.fssync/trunk/src/zope/fssync/tests/test_docs.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -0,0 +1,95 @@
+##############################################################################
+#
+# Copyright (c) 2004 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Tests for the fssync package's documentation files
+
+$Id: test_docs.py 70826 2006-10-20 03:41:16Z baijum $
+"""
+import sys
+import unittest
+import zope
+
+from zope import interface
+from zope.testing import doctest
+from zope.testing import doctestunit
+from zope.testing import module
+
+
+from zope.traversing.interfaces import IContainmentRoot
+from zope.location.tests import TLocation
+
+from zope.fssync import pickle
+
+def setUp(test):
+ module.setUp(test, 'zope.fssync.doctest')
+
+def tearDown(test):
+ module.tearDown(test, 'zope.fssync.doctest')
+
+
+class PersistentLoaderTestCase(unittest.TestCase):
+
+ def setUp(self):
+ root = TLocation()
+ interface.directlyProvides(root, IContainmentRoot)
+ o1 = TLocation(); o1.__parent__ = root; o1.__name__ = 'o1'
+ o2 = TLocation(); o2.__parent__ = root; o2.__name__ = 'o2'
+ o3 = TLocation(); o3.__parent__ = o1; o3.__name__ = 'o3'
+ root.o1 = o1
+ root.o2 = o2
+ o1.foo = o2
+ o1.o3 = o3
+ self.root = root
+ self.o1 = o1
+ self.o2 = o2
+
+ def testPathPersistentLoader(self):
+
+ pickler = pickle.StandardUnpickler(self.o1)
+ loader = pickle.PathPersistentLoader(pickler)
+ self.assert_(loader.load('/') is self.root)
+ self.assert_(loader.load('/o2') is self.o2)
+
+# def testParentPersistentLoader(self):
+# loader = pickle.ParentPersistentLoader(self.o1, self.o1)
+# self.assert_(loader.load(fspickle.PARENT_MARKER) is self.o1)
+# self.assert_(loader.load('/') is self.root)
+# self.assert_(loader.load('/o2') is self.o2)
+
+
+def test_suite():
+
+ globs = {'zope':zope,
+ 'pprint': doctestunit.pprint}
+
+ flags = doctest.NORMALIZE_WHITESPACE+doctest.ELLIPSIS
+ suite = unittest.TestSuite()
+
+ suite = unittest.makeSuite(PersistentLoaderTestCase)
+ suite.addTest(doctest.DocTestSuite('zope.fssync.pickle'))
+
+
+ suite.addTest(doctest.DocFileSuite('../README.txt',
+ globs=globs,
+ setUp=setUp, tearDown=tearDown,
+ optionflags=flags))
+
+ suite.addTest(doctest.DocFileSuite('../caseinsensitivity.txt',
+ globs=globs,
+ setUp=setUp, tearDown=tearDown,
+ optionflags=flags))
+
+ return suite
+
+if __name__ == '__main__':
+ unittest.main(default='test_suite')
Deleted: zope.fssync/trunk/src/zope/fssync/tests/test_network.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/tests/test_network.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/tests/test_network.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -1,195 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests for the Network class.
-
-$Id$
-"""
-
-import os
-import select
-import socket
-import unittest
-import threading
-
-from StringIO import StringIO
-
-from os.path import join
-
-from zope.fssync.fssync import Network, Error
-from zope.fssync.tests.tempfiles import TempFiles
-
-sample_rooturl = "http://user:passwd@host:8080/path"
-
-HOST = "127.0.0.1" # localhost
-PORT = 60841 # random number
-RESPONSE = """HTTP/1.0 404 Not found\r
-Content-type: text/plain\r
-Content-length: 0\r
-\r
-"""
-
-class DummyServer(threading.Thread):
-
- """A server that can handle one HTTP request (returning a 404 error)."""
-
- def __init__(self, ready):
- self.ready = ready # Event signaling we're listening
- self.stopping = False
- threading.Thread.__init__(self)
-
- def run(self):
- svr = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- svr.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- svr.bind((HOST, PORT))
- svr.listen(1)
- self.ready.set()
- conn = None
- sent_response = False
- while not self.stopping:
- if conn is None:
- r = [svr]
- else:
- r = [conn]
- r, w, x = select.select(r, [], [], 0.01)
- if not r:
- continue
- s = r[0]
- if s is svr:
- conn, addr = svr.accept()
- ##print "connect from", `addr`
- else:
- if s is not conn:
- raise AssertionError("s is not conn")
- data = conn.recv(1000)
- ##print "received", `data`
- if not data:
- break
- if not sent_response:
- conn.send(RESPONSE)
- conn.close()
- conn = None
- sent_response = True
- if conn is not None:
- conn.close()
- svr.close()
- ##print "stopped"
-
- def stop(self):
- ##print "stopping"
- self.stopping = True
-
-class TestNetwork(TempFiles):
-
- def setUp(self):
- TempFiles.setUp(self)
- self.network = Network()
-
- def test_initial_state(self):
- self.assertEqual(self.network.rooturl, None)
- self.assertEqual(self.network.roottype, None)
- self.assertEqual(self.network.rootpath, None)
- self.assertEqual(self.network.user_passwd, None)
- self.assertEqual(self.network.host_port, None)
-
- def test_setrooturl(self):
- self.network.setrooturl(sample_rooturl)
- self.assertEqual(self.network.rooturl, sample_rooturl)
- self.assertEqual(self.network.roottype, "http")
- self.assertEqual(self.network.rootpath, "/path")
- self.assertEqual(self.network.user_passwd, "user:passwd")
- self.assertEqual(self.network.host_port, "host:8080")
-
- def test_setrooturl_nopath(self):
- rooturl = "http://user:passwd@host:8080"
- self.network.setrooturl(rooturl)
- self.assertEqual(self.network.rooturl, rooturl)
- self.assertEqual(self.network.roottype, "http")
- self.assertEqual(self.network.rootpath, "/")
- self.assertEqual(self.network.user_passwd, "user:passwd")
- self.assertEqual(self.network.host_port, "host:8080")
-
- def test_findrooturl_notfound(self):
- # TODO: This test will fail if a file /tmp/@@Zope/Root exists :-(
- target = self.tempdir()
- self.assertEqual(self.network.findrooturl(target), None)
-
- def test_findrooturl_found(self):
- target = self.tempdir()
- zdir = join(target, "@@Zope")
- os.mkdir(zdir)
- rootfile = join(zdir, "Root")
- f = open(rootfile, "w")
- f.write(sample_rooturl + "\n")
- f.close()
- self.assertEqual(self.network.findrooturl(target), sample_rooturl)
-
- def test_saverooturl(self):
- self.network.setrooturl(sample_rooturl)
- target = self.tempdir()
- zdir = join(target, "@@Zope")
- os.mkdir(zdir)
- rootfile = join(zdir, "Root")
- self.network.saverooturl(target)
- f = open(rootfile, "r")
- data = f.read()
- f.close()
- self.assertEqual(data.strip(), sample_rooturl)
-
- def test_loadrooturl(self):
- target = self.tempdir()
- self.assertRaises(Error, self.network.loadrooturl, target)
- zdir = join(target, "@@Zope")
- os.mkdir(zdir)
- self.network.setrooturl(sample_rooturl)
- self.network.saverooturl(target)
- new = Network()
- new.loadrooturl(target)
- self.assertEqual(new.rooturl, sample_rooturl)
-
- def test_httpreq(self):
- ready = threading.Event()
- svr = DummyServer(ready)
- svr.start()
- ready.wait()
- try:
- self.network.setrooturl("http://%s:%s" % (HOST, PORT))
- self.assertRaises(Error, self.network.httpreq, "/xyzzy", "@@view")
- finally:
- svr.stop()
- svr.join()
-
- def test_slurptext_html(self):
- fp = StringIO("<p>This is some\n\ntext.</p>\n")
- result = self.network.slurptext(fp, {"Content-type": "text/html"})
- self.assertEqual(result, "This is some text.")
-
- def test_slurptext_plain(self):
- fp = StringIO("<p>This is some\n\ntext.</p>\n")
- result = self.network.slurptext(fp, {"Content-type": "text/plain"})
- self.assertEqual(result, "<p>This is some\n\ntext.</p>")
-
- def test_slurptext_nontext(self):
- fp = StringIO("<p>This is some\n\ntext.</p>\n")
- result = self.network.slurptext(fp, {"Content-type": "foo/bar"})
- self.assertEqual(result, "Content-type: foo/bar")
-
-def test_suite():
- loader = unittest.TestLoader()
- return loader.loadTestsFromTestCase(TestNetwork)
-
-def test_main():
- unittest.TextTestRunner().run(test_suite())
-
-if __name__=='__main__':
- test_main()
Deleted: zope.fssync/trunk/src/zope/fssync/tests/test_passwd.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/tests/test_passwd.py 2007-06-13 14:20:24 UTC (rev 76666)
+++ zope.fssync/trunk/src/zope/fssync/tests/test_passwd.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -1,184 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test the authentication token manager.
-
-$Id$
-"""
-import os
-import tempfile
-import unittest
-
-from zope.fssync import passwd
-
-
-class PasswordGetter(object):
- """PasswordManager.getPassword() replacement to use in the tests."""
-
- def __call__(self, user, host_port):
- self.user = user
- self.host_port = host_port
- return "mypassword"
-
-
-class TestPasswordManager(unittest.TestCase):
-
- def setUp(self):
- self.filename = tempfile.mktemp()
- self.pwmgr = passwd.PasswordManager(filename=self.filename)
- self.getter = PasswordGetter()
- self.pwmgr.getPassword = self.getter
-
- def tearDown(self):
- if os.path.exists(self.filename):
- os.unlink(self.filename)
-
- def create_file(self, include_comment=True):
- """Create the file with a single record."""
- f = open(self.filename, "w")
- if include_comment:
- print >>f, "# this is a comment"
- print >>f
- print >>f, "http", "example.com", "testuser", "faketoken"
- f.close()
-
- def read_file(self):
- """Return a list of non-blank, non-comment lines from the file."""
- f = open(self.filename)
- lines = f.readlines()
- f.close()
- return [line.split()
- for line in lines
- if line.strip()[:1] not in ("#", "")]
-
- # getToken()
-
- def test_hostport_normalization(self):
- token1 = self.pwmgr.getToken("http", "example.com", "testuser")
- token2 = self.pwmgr.getToken("http", "example.com:80", "testuser")
- self.assertEqual(token1, token2)
- self.assertEqual(self.getter.host_port, "example.com")
-
- def test_load_token_from_file(self):
- self.create_file()
- token = self.pwmgr.getToken("http", "example.com:80", "testuser")
- self.assertEqual(token, "faketoken")
- self.failIf(hasattr(self.getter, "user"))
- self.failIf(hasattr(self.getter, "host_post"))
-
- def test_load_token_missing_from_file(self):
- self.create_file()
- token = self.pwmgr.getToken("http", "example.com:80", "otheruser")
- self.assertNotEqual(token, "faketoken")
- self.assertEqual(self.getter.user, "otheruser")
- self.assertEqual(self.getter.host_port, "example.com")
-
- def test_diff_in_scheme(self):
- self.create_file()
- token = self.pwmgr.getToken("https", "example.com", "testuser")
- self.assertNotEqual(token, "faketoken")
-
- def test_diff_in_host(self):
- self.check_difference("http", "example.net", "testuser")
-
- def test_diff_in_port(self):
- self.check_difference("http", "example.com:9000", "testuser")
-
- def test_diff_in_username(self):
- self.check_difference("http", "example.com", "otheruser")
-
- def check_difference(self, scheme, host_port, username):
- self.create_file()
- token = self.pwmgr.getToken(scheme, host_port, username)
- self.assertNotEqual(token, "faketoken")
- self.assertEqual(self.getter.user, username)
- self.assertEqual(self.getter.host_port, host_port)
-
- # addToken()
-
- def test_add_token_to_new_file(self):
- self.pwmgr.addToken("http", "example.com:80", "testuser", "faketoken")
- records = self.read_file()
- self.assertEqual(len(records), 1)
- self.assertEqual(records[0],
- ["http", "example.com", "testuser", "faketoken"])
-
- def test_add_token_to_file(self):
- self.create_file()
- self.pwmgr.addToken("http", "example.com", "otheruser", "mytoken")
- records = self.read_file()
- records.sort()
- self.assertEqual(len(records), 2)
- self.assertEqual(records,
- [["http", "example.com", "otheruser", "mytoken"],
- ["http", "example.com", "testuser", "faketoken"],
- ])
-
- def test_replace_token_from_file(self):
- self.create_file()
- self.pwmgr.addToken("http", "example.com", "testuser", "newtoken")
- records = self.read_file()
- self.assertEqual(len(records), 1)
- self.assertEqual(records[0],
- ["http", "example.com", "testuser", "newtoken"])
-
- # removeToken()
-
- def test_remove_without_file(self):
- found = self.pwmgr.removeToken("http", "example.com", "someuser")
- self.assert_(not found)
-
- def test_remove_not_in_file(self):
- self.create_file()
- found = self.pwmgr.removeToken("http", "example.com", "someuser")
- self.assert_(not found)
- # file should not have been modified
- records = self.read_file()
- self.assertEqual(len(records), 1)
- self.assertEqual(records[0],
- ["http", "example.com", "testuser", "faketoken"])
-
- def test_remove_last_in_file_with_comment(self):
- self.create_file()
- found = self.pwmgr.removeToken("http", "example.com", "testuser")
- self.assert_(found)
- records = self.read_file()
- self.assertEqual(len(records), 0)
- # the file included a comment, so must not have been removed:
- self.assert_(os.path.exists(self.filename))
-
- def test_remove_last_in_file_without_comment(self):
- self.create_file(include_comment=False)
- found = self.pwmgr.removeToken("http", "example.com", "testuser")
- self.assert_(found)
- # the result should only include a blank line, so should be removed:
- self.assert_(not os.path.exists(self.filename))
-
- def test_remove_one_of_two(self):
- f = open(self.filename, "w")
- print >>f, "http", "example.com", "testuser", "faketoken"
- print >>f, "http", "example.com", "otheruser", "othertoken"
- f.close()
- found = self.pwmgr.removeToken("http", "example.com", "testuser")
- self.assert_(found)
- records = self.read_file()
- self.assertEqual(len(records), 1)
- self.assertEqual(records[0],
- ["http", "example.com", "otheruser", "othertoken"])
-
-
-def test_suite():
- return unittest.makeSuite(TestPasswordManager)
-
-if __name__ == "__main__":
- unittest.main(defaultTest="test_suite")
Added: zope.fssync/trunk/src/zope/fssync/tests/test_task.py
===================================================================
--- zope.fssync/trunk/src/zope/fssync/tests/test_task.py (rev 0)
+++ zope.fssync/trunk/src/zope/fssync/tests/test_task.py 2007-06-13 15:24:10 UTC (rev 76667)
@@ -0,0 +1,666 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Tests for the Commit task.
+
+TODO: This should be rewritten as doctest and moved
+to zope.fssync where it belongs.
+
+$Id: test_committer.py 72447 2007-02-08 07:48:58Z oestermeier $
+"""
+import os
+import shutil
+import tempfile
+import unittest
+
+import zope.component
+import zope.interface
+from zope.traversing.interfaces import TraversalError, IContainmentRoot
+from zope.traversing.interfaces import ITraversable, ITraverser
+from zope.xmlpickle import loads, dumps
+from zope.location import Location
+from zope.filerepresentation.interfaces import IFileFactory
+from zope.filerepresentation.interfaces import IDirectoryFactory
+
+from zope.fssync import fsutil
+from zope.fssync.tests.mockmetadata import MockMetadata
+from zope.fssync.tests.tempfiles import TempFiles
+
+from zope.component.testing import PlacelessSetup
+
+from zope.fssync import synchronizer
+from zope.fssync import interfaces
+from zope.fssync import repository
+from zope.fssync import pickle
+from zope.fssync import task
+
+def provideSynchronizer(klass, Synchronizer):
+ zope.component.provideUtility(Synchronizer, interfaces.ISynchronizerFactory,
+ name=synchronizer.dottedname(klass))
+
+class Sample(object):
+ pass
+
+class IPretendFile(zope.interface.Interface):
+ pass
+
+class PretendFile(object):
+ zope.interface.implements(IPretendFile)
+
+ data = ''
+ contentType = ''
+
+ def __init__(self, data, contentType):
+ self.data = data
+ self.contentType = contentType
+
+class IPretendContainer(zope.interface.Interface):
+ pass
+
+class PretendContainer(Location):
+ zope.interface.implements(IPretendContainer, ITraversable, ITraverser)
+
+ def __init__(self):
+ self.holding = {}
+
+ def __setitem__(self, name, value):
+ name = name.lower()
+ if name in self.holding:
+ raise KeyError
+ self.holding[name] = value
+ return name
+
+ def __delitem__(self, name):
+ name = name.lower()
+ del self.holding[name]
+
+ def __getitem__(self, name):
+ name = name.lower()
+ return self.holding[name]
+
+ def get(self, name):
+ name = name.lower()
+ return self.holding.get(name)
+
+ def __contains__(self, name):
+ name = name.lower()
+ return name in self.holding
+
+ def keys(self):
+ return self.holding.keys()
+
+ def items(self):
+ return self.holding.items()
+
+ def iteritems(self):
+ return self.holding.iteritems()
+
+ def traverse(self, name, furtherPath):
+ try:
+ return self[name]
+ except KeyError:
+ raise TraversalError
+
+PCname = PretendContainer.__module__ + "." + PretendContainer.__name__
+
+class PretendRootContainer(PretendContainer):
+ zope.interface.implements(IContainmentRoot)
+
+
+class TestBase(PlacelessSetup, TempFiles):
+
+ # Base class for test classes
+
+ def setUp(self):
+ super(TestBase, self).setUp()
+
+ # Set up serializer factory
+ zope.component.provideUtility(synchronizer.DefaultSynchronizer,
+ interfaces.ISynchronizerFactory)
+
+ zope.component.provideAdapter(pickle.XMLPickler)
+ zope.component.provideAdapter(pickle.XMLUnpickler)
+ zope.component.provideAdapter(pickle.PathPersistentIdGenerator)
+ zope.component.provideAdapter(pickle.PathPersistentLoader)
+
+ zope.component.provideUtility(synchronizer.FileGenerator())
+ zope.component.provideUtility(synchronizer.DirectoryGenerator())
+
+ # Set up temporary name administration
+ TempFiles.setUp(self)
+
+ def tearDown(self):
+ # Clean up temporary files and directories
+ TempFiles.tearDown(self)
+
+ PlacelessSetup.tearDown(self)
+
+
+ at zope.component.adapter(IPretendContainer)
+ at zope.interface.implementer(IFileFactory)
+def file_factory_maker(container):
+ def file_factory(name, content_type, data):
+ return PretendFile(data, content_type)
+ return file_factory
+
+ at zope.component.adapter(IPretendContainer)
+ at zope.interface.implementer(IDirectoryFactory)
+def directory_factory_maker(container):
+ def directory_factory(name):
+ return PretendContainer()
+ return directory_factory
+
+def sort(lst):
+ lst.sort()
+ return lst
+
+
+class TestTaskModule(TestBase):
+
+ def setUp(self):
+ super(TestTaskModule, self).setUp()
+ self.location = tempfile.mktemp()
+ os.mkdir(self.location)
+
+ def tearDown(self):
+ super(TestTaskModule, self).tearDown()
+ shutil.rmtree(self.location)
+
+ def test_getSynchronizer(self):
+ obj = Sample()
+ adapter = synchronizer.getSynchronizer(obj)
+ self.assertEqual(adapter.__class__, synchronizer.DefaultSynchronizer)
+
+class TestCommitClass(TestBase):
+
+ def create_committer(self):
+ filesystem = repository.FileSystemRepository()
+ return task.Commit(synchronizer.getSynchronizer, filesystem)
+
+ def test_set_item_without_serializer(self):
+ committer = self.create_committer()
+ container = {}
+ committer.setItem(container, "foo", 42)
+ self.assertEqual(container, {"foo": 42})
+
+ def test_set_item_new(self):
+ committer = self.create_committer()
+ container = PretendContainer()
+ committer.setItem(container, "foo", 42)
+ self.assertEqual(container.holding, {"foo": 42})
+
+ def test_set_item_replace(self):
+ provideSynchronizer(PretendContainer, synchronizer.DirectorySynchronizer)
+ committer = self.create_committer()
+ container = PretendContainer()
+ committer.setItem(container, "foo", 42)
+ committer.setItem(container, "foo", 24, replace=True)
+ self.assertEqual(container.holding, {"foo": 24})
+
+ def test_set_item_nonexisting(self):
+ provideSynchronizer(PretendContainer, synchronizer.DirectorySynchronizer)
+ committer = self.create_committer()
+ container = PretendContainer()
+ self.assertRaises(KeyError, committer.setItem,
+ container, "foo", 42, replace=True)
+
+ def create_object(self, *args, **kw):
+ # Helper for the create_object() tests.
+ filesystem = repository.FileSystemRepository()
+ c = task.Commit(synchronizer.getSynchronizer, filesystem)
+ c.createObject(*args, **kw)
+
+ def create_object_debug(self, *args, **kw):
+ # Helper for the create_object() tests.
+ filesystem = repository.FileSystemRepository()
+ c = task.Commit(synchronizer.getSynchronizer, filesystem)
+ c.debug = True
+ c.createObject(*args, **kw)
+
+
+ def test_create_object_extra(self):
+ class TestContainer(object):
+ # simulate AttrMapping
+ def __setitem__(self, name, value):
+ self.name = name
+ self.value = value
+ class TestRoot(object):
+ zope.interface.implements(IContainmentRoot, ITraverser)
+ def traverse(self, *args):
+ pass
+ fspath = tempfile.mktemp()
+ f = open(fspath, 'w')
+ f.write('<?xml version="1.0" encoding="utf-8" ?>')
+ f.write('<pickle> <string>text/plain</string> </pickle>')
+ f.close()
+ container = TestContainer()
+ name = "contentType"
+ root = TestRoot()
+ try:
+ self.create_object(container, name, {}, fspath) #, context=root)
+ finally:
+ os.remove(fspath)
+ self.assertEqual(container.name, name)
+ self.assertEqual(container.value, "text/plain")
+
+ def test_create_object_factory_file(self):
+ container = {}
+ entry = {"flag": "added", "factory": "__builtin__.dict"}
+ tfn = os.path.join(self.tempdir(), "foo")
+ data = {"hello": "world"}
+ self.writefile(dumps(data), tfn)
+ self.create_object_debug(container, "foo", entry, tfn)
+ self.assertEqual(container, {"foo": data})
+
+ def test_create_object_factory_directory(self):
+ provideSynchronizer(PretendContainer, synchronizer.DirectorySynchronizer)
+ container = {}
+ entry = {"flag": "added", "factory": PCname}
+ tfn = os.path.join(self.tempdir(), "foo")
+ os.mkdir(tfn)
+ self.create_object(container, "foo", entry, tfn)
+ self.assertEqual(container.keys(), ["foo"])
+ self.assertEqual(container["foo"].__class__, PretendContainer)
+
+ def test_create_object_default(self):
+ container = PretendRootContainer()
+ entry = {"flag": "added"}
+ data = ["hello", "world"]
+ tfn = os.path.join(self.tempdir(), "foo")
+ self.writefile(dumps(data), tfn, "wb")
+ self.create_object(container, "foo", entry, tfn)
+ self.assertEqual(container.items(), [("foo", ["hello", "world"])])
+
+ def test_create_object_ifilefactory(self):
+ zope.component.provideAdapter(file_factory_maker)
+ container = PretendContainer()
+ entry = {"flag": "added"}
+ data = "hello world"
+ tfn = os.path.join(self.tempdir(), "foo")
+ self.writefile(data, tfn, "wb")
+ self.create_object(container, "foo", entry, tfn)
+ self.assertEqual(container.holding["foo"].__class__, PretendFile)
+ self.assertEqual(container.holding["foo"].data, "hello world")
+
+ def test_create_object_idirectoryfactory(self):
+ zope.component.provideAdapter(directory_factory_maker)
+ container = PretendContainer()
+ entry = {"flag": "added"}
+ tfn = os.path.join(self.tempdir(), "foo")
+ os.mkdir(tfn)
+ self.create_object(container, "foo", entry, tfn)
+ self.assertEqual(container.holding["foo"].__class__, PretendContainer)
+
+
+class TestCheckClass(TestBase):
+
+ def setUp(self):
+ # Set up base class (PlacelessSetup and TempNames)
+ TestBase.setUp(self)
+
+ # Set up environment
+ provideSynchronizer(PretendContainer, synchronizer.DirectorySynchronizer)
+ #provideSynchronizer(dict, DictAdapter)
+ #zope.component.provideAdapter(file_factory_maker)
+ zope.component.provideAdapter(directory_factory_maker)
+
+ # Set up fixed part of object tree
+ self.parent = PretendContainer()
+ self.child = PretendContainer()
+ self.grandchild = PretendContainer()
+ self.parent["child"] = self.child
+ self.child["grandchild"] = self.grandchild
+ self.foo = ["hello", "world"]
+ self.child["foo"] = self.foo
+
+ # Set up fixed part of filesystem tree
+ self.parentdir = self.tempdir()
+ self.childdir = os.path.join(self.parentdir, "child")
+ os.mkdir(self.childdir)
+ self.foofile = os.path.join(self.childdir, "foo")
+ self.writefile(dumps(self.foo), self.foofile, "wb")
+ self.originalfoofile = fsutil.getoriginal(self.foofile)
+ self.writefile(dumps(self.foo), self.originalfoofile, "wb")
+ self.grandchilddir = os.path.join(self.childdir, "grandchild")
+ os.mkdir(self.grandchilddir)
+
+ # Set up metadata
+ self.metadata = MockMetadata()
+ self.getentry = self.metadata.getentry
+
+ # Set up fixed part of entries
+ self.parententry = self.getentry(self.parentdir)
+ self.parententry["path"] = "/parent"
+ self.childentry = self.getentry(self.childdir)
+ self.childentry["path"] = "/parent/child"
+ self.grandchildentry = self.getentry(self.grandchilddir)
+ self.grandchildentry["path"] = "/parent/child/grandchild"
+ self.fooentry = self.getentry(self.foofile)
+ self.fooentry["path"] = "/parent/child/foo"
+
+ # Set up check task
+ filesystem = repository.FileSystemRepository(metadata=self.metadata)
+ self.checker = task.Check(synchronizer.getSynchronizer, filesystem)
+
+ def check_errors(self, expected_errors):
+ # Helper to call the checker and assert a given set of errors
+ self.checker.check(self.parent, "", self.parentdir)
+ self.assertEqual(sort(self.checker.errors()), sort(expected_errors))
+
+ def check_no_errors(self):
+ # Helper to call the checker and assert there are no errors
+ self.check_errors([])
+
+ def test_vanilla(self):
+ # The vanilla situation should not be an error
+ self.check_no_errors()
+
+ def test_file_changed(self):
+ # Changing a file is okay
+ self.newfoo = self.foo + ["news"]
+ self.writefile(dumps(self.newfoo), self.foofile, "wb")
+ self.check_no_errors()
+
+ def test_file_type_changed(self):
+ # Changing a file's type is okay
+ self.newfoo = ("one", "two")
+ self.fooentry["type"] = "__builtin__.tuple"
+ self.writefile(dumps(self.newfoo), self.foofile, "wb")
+ self.check_no_errors()
+
+ def test_file_conflict(self):
+ # A real conflict is an error
+ newfoo = self.foo + ["news"]
+ self.writefile(dumps(newfoo), self.foofile, "wb")
+ self.foo.append("something else")
+ self.check_errors([self.foofile])
+
+ def test_file_sticky_conflict(self):
+ # A sticky conflict is an error
+ self.fooentry["conflict"] = 1
+ self.check_errors([self.foofile])
+
+ def test_file_added(self):
+ # Adding a file properly is okay
+ self.bar = ["this", "is", "bar"]
+ barfile = os.path.join(self.childdir, "bar")
+ self.writefile(dumps(self.bar), barfile, "wb")
+ barentry = self.getentry(barfile)
+ barentry["flag"] = "added"
+ self.check_no_errors()
+
+ def test_file_added_no_file(self):
+ # Flagging a non-existing file as added is an error
+ barfile = os.path.join(self.childdir, "bar")
+ barentry = self.getentry(barfile)
+ barentry["flag"] = "added"
+ self.check_errors([barfile])
+
+ def test_file_spurious(self):
+ # A spurious file (empty entry) is okay
+ bar = ["this", "is", "bar"]
+ barfile = os.path.join(self.childdir, "bar")
+ self.writefile(dumps(bar), barfile, "wb")
+ self.check_no_errors()
+
+ def test_file_added_no_flag(self):
+ # Adding a file without setting the "added" flag is an error
+ bar = ["this", "is", "bar"]
+ barfile = os.path.join(self.childdir, "bar")
+ self.writefile(dumps(bar), barfile, "wb")
+ barentry = self.getentry(barfile)
+ barentry["path"] = "/parent/child/bar"
+ self.check_errors([barfile])
+
+ def test_same_files_added_twice(self):
+ # Adding files in both places is ok as long as the files are the same
+ self.bar = ["this", "is", "bar"]
+ self.child["bar"] = self.bar
+ barfile = os.path.join(self.childdir, "bar")
+ self.writefile(dumps(self.bar), barfile, "wb")
+ barentry = self.getentry(barfile)
+ barentry["path"] = "/parent/child/bar"
+ self.check_no_errors()
+
+ def test_different_files_added_twice(self):
+ # Adding files in both places is an error if the files are different
+ bar = ["this", "is", "bar"]
+ self.child["bar"] = bar
+ barfile = os.path.join(self.childdir, "bar")
+ self.writefile(dumps(["something else"]), barfile, "wb")
+ barentry = self.getentry(barfile)
+ barentry["path"] = "/parent/child/bar"
+ self.check_errors([barfile])
+
+ def test_file_lost(self):
+ # Losing a file is an error
+ os.remove(self.foofile)
+ self.check_errors([self.foofile])
+
+ def test_file_unmodified_lost_originial(self):
+ # Losing the original file is ok as long as the file does not change
+ os.remove(self.originalfoofile)
+ self.check_no_errors()
+
+ def test_file_modified_lost_originial(self):
+ # Losing the original file is an error as soon as the file changes
+ os.remove(self.originalfoofile)
+ self.writefile('something changed', self.foofile, "wb")
+ self.check_errors([self.foofile])
+
+ def test_file_removed(self):
+ # Removing a file properly is okay
+ os.remove(self.foofile)
+ self.fooentry["flag"] = "removed"
+ self.check_no_errors()
+
+ def test_file_removed_conflict(self):
+ # Removing a file that was changed in the database is an error
+ os.remove(self.foofile)
+ self.fooentry["flag"] = "removed"
+ self.foo.append("news")
+ self.check_errors([self.foofile])
+
+ def test_file_removed_twice(self):
+ # Removing a file in both places is an error
+ os.remove(self.foofile)
+ self.fooentry["flag"] = "removed"
+ del self.child["foo"]
+ self.check_errors([self.foofile])
+
+ def test_file_removed_object(self):
+ # Removing the object should cause a conflict
+ del self.child["foo"]
+ self.check_errors([self.foofile])
+
+ def test_file_entry_cleared(self):
+ # Clearing out a file's entry is an error
+ self.fooentry.clear()
+ self.check_errors([self.foofile])
+
+ def test_dir_added(self):
+ # Adding a directory is okay
+ bardir = os.path.join(self.childdir, "bar")
+ os.mkdir(bardir)
+ barentry = self.getentry(bardir)
+ barentry["flag"] = "added"
+ self.check_no_errors()
+
+ def test_dir_spurious(self):
+ # A spurious directory is okay
+ bardir = os.path.join(self.childdir, "bar")
+ os.mkdir(bardir)
+ self.check_no_errors()
+
+ def test_dir_added_no_flag(self):
+ # Adding a directory without setting the "added" flag is an error
+ bardir = os.path.join(self.childdir, "bar")
+ os.mkdir(bardir)
+ barentry = self.getentry(bardir)
+ barentry["path"] = "/parent/child/bar"
+ self.check_errors([bardir])
+
+ def test_dir_lost(self):
+ # Losing a directory is an error
+ shutil.rmtree(self.grandchilddir)
+ self.check_errors([self.grandchilddir])
+
+ def test_dir_removed(self):
+ # Removing a directory properly is okay
+ shutil.rmtree(self.grandchilddir)
+ self.grandchildentry["flag"] = "removed"
+ self.check_no_errors()
+
+ def test_dir_entry_cleared(self):
+ # Clearing ot a directory's entry is an error
+ self.grandchildentry.clear()
+ self.check_errors([self.grandchilddir])
+
+ def test_tree_added(self):
+ # Adding a subtree is okay
+ bardir = os.path.join(self.childdir, "bar")
+ os.mkdir(bardir)
+ barentry = self.getentry(bardir)
+ barentry["path"] = "/parent/child/bar"
+ barentry["flag"] = "added"
+ bazfile = os.path.join(bardir, "baz")
+ self.baz = ["baz"]
+ self.writefile(dumps(self.baz), bazfile, "wb")
+ bazentry = self.getentry(bazfile)
+ bazentry["flag"] = "added"
+ burpdir = os.path.join(bardir, "burp")
+ os.mkdir(burpdir)
+ burpentry = self.getentry(burpdir)
+ burpentry["flag"] = "added"
+ self.check_no_errors()
+
+ def test_tree_added_no_flag(self):
+ # Adding a subtree without flagging everything as added is an error
+ bardir = os.path.join(self.childdir, "bar")
+ os.mkdir(bardir)
+ barentry = self.getentry(bardir)
+ barentry["path"] = "/parent/child/bar"
+ barentry["flag"] = "added"
+ bazfile = os.path.join(bardir, "baz")
+ baz = ["baz"]
+ self.writefile(dumps(baz), bazfile, "wb")
+ bazentry = self.getentry(bazfile)
+ bazentry["path"] = "/parent/child/bar/baz"
+ burpdir = os.path.join(bardir, "burp")
+ os.mkdir(burpdir)
+ burpentry = self.getentry(burpdir)
+ burpentry["path"] = "/parent/child/bar/burp"
+ self.check_errors([bazfile, burpdir])
+
+ def test_tree_removed(self):
+ # Removing a subtree is okay
+ shutil.rmtree(self.childdir)
+ self.childentry["flag"] = "removed"
+ self.grandchildentry.clear()
+ self.fooentry.clear()
+ self.check_no_errors()
+
+ # TODO Extra and Annotations is not tested directly
+
+ # TODO Changing directories into files or vice versa is not tested
+
+
+
+class TestCheckAndCommit(TestCheckClass):
+
+ # This class extends all tests from TestCheckClass that call
+ # self.check_no_errors() to carry out the change and check on it.
+ # Yes, this means that all the tests that call check_errors() are
+ # repeated. Big deal. :-)
+
+ def __init__(self, name):
+ TestCheckClass.__init__(self, name)
+ self.name = name
+
+ def setUp(self):
+ TestCheckClass.setUp(self)
+ self.committer = task.Commit(synchronizer.getSynchronizer, self.checker.repository)
+
+ def check_no_errors(self):
+ TestCheckClass.check_no_errors(self)
+ self.committer.perform(self.parent, "", self.parentdir)
+ name = "verify" + self.name[4:]
+ method = getattr(self, name, None)
+ if method:
+ method()
+ else:
+ print "?", name
+
+ def verify_vanilla(self):
+ self.assertEqual(self.parent.keys(), ["child"])
+ self.assertEqual(self.parent["child"], self.child)
+ self.assertEqual(sort(self.child.keys()), ["foo", "grandchild"])
+ self.assertEqual(self.child["foo"], self.foo)
+ self.assertEqual(self.child["grandchild"], self.grandchild)
+ self.assertEqual(self.grandchild.keys(), [])
+
+ def verify_file_added(self):
+ self.assertEqual(self.child["bar"], self.bar)
+ self.assertEqual(sort(self.child.keys()), ["bar", "foo", "grandchild"])
+
+ def verify_file_changed(self):
+ self.assertEqual(self.child["foo"], self.newfoo)
+
+ def verify_file_removed(self):
+ self.assertEqual(self.child.keys(), ["grandchild"])
+
+ def verify_file_spurious(self):
+ self.verify_vanilla()
+
+ def verify_file_type_changed(self):
+ self.assertEqual(self.child["foo"], self.newfoo)
+
+ def verify_file_unmodified_lost_originial(self):
+ self.verify_vanilla()
+
+ def verify_same_files_added_twice(self):
+ self.assertEqual(self.child["bar"], self.bar)
+
+ def verify_dir_removed(self):
+ self.assertEqual(self.child.keys(), ["foo"])
+
+ def verify_dir_added(self):
+ self.assertEqual(sort(self.child.keys()), ["bar", "foo", "grandchild"])
+
+ def verify_dir_spurious(self):
+ self.verify_vanilla()
+
+ def verify_tree_added(self):
+ self.assertEqual(sort(self.child.keys()), ["bar", "foo", "grandchild"])
+ bar = self.child["bar"]
+ self.assertEqual(bar.__class__, PretendContainer)
+ baz = bar["baz"]
+ self.assertEqual(self.baz, baz)
+
+ def verify_tree_removed(self):
+ self.assertEqual(self.parent.keys(), [])
+
+
+def test_suite():
+ s = unittest.TestSuite()
+ s.addTest(unittest.makeSuite(TestTaskModule))
+ s.addTest(unittest.makeSuite(TestCommitClass))
+ s.addTest(unittest.makeSuite(TestCheckClass))
+ s.addTest(unittest.makeSuite(TestCheckAndCommit))
+ return s
+
+def test_main():
+ unittest.TextTestRunner().run(test_suite())
+
+if __name__=='__main__':
+ test_main()
More information about the Checkins
mailing list