4 Abstract bug repository data storage to easily support multiple backends.
12 from libbe.error import NotSupported
13 from libbe.util.tree import Tree
14 from libbe.util import InvalidObject
15 from libbe import TESTING
23 from libbe.util.utility import Dir
25 class ConnectionError (Exception):
28 class InvalidID (KeyError):
31 class InvalidRevision (KeyError):
34 class NotWriteable (NotSupported):
35 def __init__(self, msg):
36 NotSupported.__init__(self, 'write', msg)
38 class NotReadable (NotSupported):
39 def __init__(self, msg):
40 NotSupported.__init__(self, 'read', msg)
42 class EmptyCommit(Exception):
44 Exception.__init__(self, 'No changes to commit')
47 def __init__(self, id, value=None, parent=None, children=None):
51 Tree.__init__(self, children)
55 if self.parent != None:
59 return '<Entry %s: %s>' % (self.id, self.value)
64 def __cmp__(self, other, local=False):
67 if cmp(self.id, other.id) != 0:
68 return cmp(self.id, other.id)
69 if cmp(self.value, other.value) != 0:
70 return cmp(self.value, other.value)
72 if self.parent == None:
73 if cmp(self.parent, other.parent) != 0:
74 return cmp(self.parent, other.parent)
75 elif self.parent.__cmp__(other.parent, local=True) != 0:
76 return self.parent.__cmp__(other.parent, local=True)
77 for sc,oc in zip(self, other):
78 if sc.__cmp__(oc, local=True) != 0:
79 return sc.__cmp__(oc, local=True)
82 def _objects_to_ids(self):
83 if self.parent != None:
84 self.parent = self.parent.id
85 for i,c in enumerate(self):
89 def _ids_to_objects(self, dict):
90 if self.parent != None:
91 self.parent = dict[self.parent]
92 for i,c in enumerate(self):
96 class Storage (object):
98 This class declares all the methods required by a Storage
99 interface. This implementation just keeps the data in a
100 dictionary and uses pickle for persistent storage.
104 def __init__(self, repo, encoding='utf-8', options=None):
106 self.encoding = encoding
107 self.options = options
108 self.readable = True # soft limit (user choice)
109 self._readable = True # hard limit (backend choice)
110 self.writeable = True # soft limit (user choice)
111 self._writeable = True # hard limit (backend choice)
112 self.versioned = False
116 return '<%s %s>' % (self.__class__.__name__, id(self))
122 """Return a version string for this backend."""
125 def is_readable(self):
126 return self.readable and self._readable
128 def is_writeable(self):
129 return self.writeable and self._writeable
132 """Create a new storage repository."""
133 if self.can_init == False:
134 raise NotSupported('init',
135 'Cannot initialize this repository format.')
136 if self.is_writeable() == False:
137 raise NotWriteable('Cannot initialize unwriteable storage.')
141 f = open(self.repo, 'wb')
142 root = Entry(id='__ROOT__')
144 pickle.dump(dict((k,v._objects_to_ids()) for k,v in d.items()), f, -1)
148 """Remove the storage repository."""
149 if self.is_writeable() == False:
150 raise NotWriteable('Cannot destroy unwriteable storage.')
151 return self._destroy()
157 """Open a connection to the repository."""
158 if self.is_readable() == False:
159 raise NotReadable('Cannot connect to unreadable storage.')
164 f = open(self.repo, 'rb')
166 raise ConnectionError(self)
168 self._data = dict((k,v._ids_to_objects(d)) for k,v in d.items())
171 def disconnect(self):
172 """Close the connection to the repository."""
173 if self.is_writeable() == False:
175 f = open(self.repo, 'wb')
176 pickle.dump(dict((k,v._objects_to_ids())
177 for k,v in self._data.items()), f, -1)
181 def add(self, *args, **kwargs):
183 if self.is_writeable() == False:
184 raise NotWriteable('Cannot add entry to unwriteable storage.')
185 try: # Maybe we've already added that id?
187 pass # yup, no need to add another
189 self._add(*args, **kwargs)
191 def _add(self, id, parent=None):
194 p = self._data[parent]
195 self._data[id] = Entry(id, parent=p)
197 def remove(self, *args, **kwargs):
198 """Remove an entry."""
199 if self.is_writeable() == False:
200 raise NotSupported('write',
201 'Cannot remove entry from unwriteable storage.')
202 self._remove(*args, **kwargs)
204 def _remove(self, id):
205 e = self._data.pop(id)
208 def recursive_remove(self, *args, **kwargs):
209 """Remove an entry and all its decendents."""
210 if self.is_writeable() == False:
211 raise NotSupported('write',
212 'Cannot remove entries from unwriteable storage.')
213 self._recursive_remove(*args, **kwargs)
215 def _recursive_remove(self, id):
216 for entry in self._data[id].traverse():
217 self._remove(entry.id)
219 def children(self, *args, **kwargs):
220 """Return a list of specified entry's children's ids."""
221 if self.is_readable() == False:
222 raise NotReadable('Cannot list children with unreadable storage.')
223 return self._children(*args, **kwargs)
225 def _children(self, id=None, revision=None):
228 return [c.id for c in self._data[id] if not c.id.startswith('__')]
230 def get(self, *args, **kwargs):
232 Get contents of and entry as they were in a given revision.
233 revision==None specifies the current revision.
235 If there is no id, return default, unless default is not
236 given, in which case raise InvalidID.
238 if self.is_readable() == False:
239 raise NotReadable('Cannot get entry with unreadable storage.')
240 if 'decode' in kwargs:
241 decode = kwargs.pop('decode')
244 value = self._get(*args, **kwargs)
246 return unicode(value, self.encoding)
249 def _get(self, id, default=InvalidObject, revision=None):
251 return self._data[id].value
252 elif default == InvalidObject:
256 def set(self, id, value, *args, **kwargs):
258 Set the entry contents.
260 if self.is_writeable() == False:
261 raise NotWriteable('Cannot set entry in unwriteable storage.')
262 if type(value) == types.UnicodeType:
263 value = value.encode(self.encoding)
264 self._set(id, value, *args, **kwargs)
266 def _set(self, id, value):
267 if id not in self._data:
269 self._data[id].value = value
271 class VersionedStorage (Storage):
273 This class declares all the methods required by a Storage
274 interface that supports versioning. This implementation just
275 keeps the data in a list and uses pickle for persistent
278 name = 'VersionedStorage'
280 def __init__(self, *args, **kwargs):
281 Storage.__init__(self, *args, **kwargs)
282 self.versioned = True
285 f = open(self.repo, 'wb')
286 root = Entry(id='__ROOT__')
287 summary = Entry(id='__COMMIT__SUMMARY__', value='Initial commit')
288 body = Entry(id='__COMMIT__BODY__')
289 initial_commit = {root.id:root, summary.id:summary, body.id:body}
290 d = dict((k,v._objects_to_ids()) for k,v in initial_commit.items())
291 pickle.dump([d, copy.deepcopy(d)], f, -1) # [inital tree, working tree]
296 f = open(self.repo, 'rb')
298 raise ConnectionError(self)
300 self._data = [dict((k,v._ids_to_objects(t)) for k,v in t.items())
304 def disconnect(self):
305 """Close the connection to the repository."""
306 if self.is_writeable() == False:
308 f = open(self.repo, 'wb')
309 pickle.dump([dict((k,v._objects_to_ids())
310 for k,v in t.items()) for t in self._data], f, -1)
314 def _add(self, id, parent=None):
317 p = self._data[-1][parent]
318 self._data[-1][id] = Entry(id, parent=p)
320 def _remove(self, id):
321 e = self._data[-1].pop(id)
324 def _recursive_remove(self, id):
325 for entry in self._data[-1][id].traverse():
326 self._remove(entry.id)
328 def _children(self, id=None, revision=None):
333 return [c.id for c in self._data[revision][id]
334 if not c.id.startswith('__')]
336 def _get(self, id, default=InvalidObject, revision=None):
339 if id in self._data[revision]:
340 return self._data[revision][id].value
341 elif default == InvalidObject:
345 def _set(self, id, value):
346 if id not in self._data[-1]:
348 self._data[-1][id].value = value
350 def commit(self, *args, **kwargs):
352 Commit the current repository, with a commit message string
353 summary and body. Return the name of the new revision.
355 If allow_empty == False (the default), raise EmptyCommit if
356 there are no changes to commit.
358 if self.is_writeable() == False:
359 raise NotWriteable('Cannot commit to unwriteable storage.')
360 return self._commit(*args, **kwargs)
362 def _commit(self, summary, body=None, allow_empty=False):
363 if self._data[-1] == self._data[-2] and allow_empty == False:
365 self._data[-1]["__COMMIT__SUMMARY__"].value = summary
366 self._data[-1]["__COMMIT__BODY__"].value = body
367 rev = len(self._data)-1
368 self._data.append(copy.deepcopy(self._data[-1]))
371 def revision_id(self, index=None):
373 Return the name of the <index>th revision. The choice of
374 which branch to follow when crossing branches/merges is not
375 defined. Revision indices start at 1; ID 0 is the blank
378 Return None if index==None.
380 If the specified revision does not exist, raise InvalidRevision.
385 if int(index) != index:
386 raise InvalidRevision(index)
388 raise InvalidRevision(index)
389 L = len(self._data) - 1 # -1 b/c of initial commit
390 if index >= -L and index <= L:
392 raise InvalidRevision(i)
395 class StorageTestCase (unittest.TestCase):
396 """Test cases for base Storage class."""
400 def __init__(self, *args, **kwargs):
401 super(StorageTestCase, self).__init__(*args, **kwargs)
405 """Set up test fixtures for Storage test case."""
406 super(StorageTestCase, self).setUp()
408 self.dirname = self.dir.path
409 self.s = self.Class(repo=os.path.join(self.dirname, 'repo.pkl'))
410 self.assert_failed_connect()
415 super(StorageTestCase, self).tearDown()
418 self.assert_failed_connect()
420 def assert_failed_connect(self):
424 "Connected to %(name)s repository before initialising"
426 except ConnectionError:
429 class Storage_init_TestCase (StorageTestCase):
430 """Test cases for Storage.init method."""
432 def test_connect_should_succeed_after_init(self):
433 """Should connect after initialization."""
436 class Storage_add_remove_TestCase (StorageTestCase):
437 """Test cases for Storage.add, .remove, and .recursive_remove methods."""
439 def test_initially_empty(self):
440 """New repository should be empty."""
441 self.failUnless(len(self.s.children()) == 0, self.s.children())
443 def test_add_rooted(self):
445 Adding entries with the same ID should not increase the number of children.
448 self.s.add('some id')
449 s = sorted(self.s.children())
450 self.failUnless(s == ['some id'], s)
452 def test_add_rooted(self):
454 Adding entries should increase the number of children (rooted).
460 s = sorted(self.s.children())
461 self.failUnless(s == ids, '\n %s\n !=\n %s' % (s, ids))
463 def test_add_nonrooted(self):
465 Adding entries should increase the number of children (nonrooted).
471 self.s.add(ids[-1], 'parent')
472 s = sorted(self.s.children('parent'))
473 self.failUnless(s == ids, '\n %s\n !=\n %s' % (s, ids))
474 s = self.s.children()
475 self.failUnless(s == ['parent'], s)
477 def test_remove_rooted(self):
479 Removing entries should decrease the number of children (rooted).
486 self.s.remove(ids.pop())
487 s = sorted(self.s.children())
488 self.failUnless(s == ids, '\n %s\n !=\n %s' % (s, ids))
490 def test_remove_nonrooted(self):
492 Removing entries should decrease the number of children (nonrooted).
498 self.s.add(ids[-1], 'parent')
500 self.s.remove(ids.pop())
501 s = sorted(self.s.children('parent'))
502 self.failUnless(s == ids, '\n %s\n !=\n %s' % (s, ids))
503 s = self.s.children()
504 self.failUnless(s == ['parent'], s)
506 def test_recursive_remove(self):
508 Recursive remove should empty the tree.
514 self.s.add(ids[-1], 'parent')
515 for j in range(10): # add some grandkids
516 self.s.add(str(20*i+j), ids[-i])
517 self.s.recursive_remove('parent')
518 s = sorted(self.s.children())
519 self.failUnless(s == [], s)
521 class Storage_get_set_TestCase (StorageTestCase):
522 """Test cases for Storage.get and .set methods."""
525 val = 'unlikely value'
527 def test_get_default(self):
529 Get should return specified default if id not in Storage.
531 ret = self.s.get(self.id, default=self.val)
532 self.failUnless(ret == self.val,
533 "%s.get() returned %s not %s"
534 % (vars(self.Class)['name'], ret, self.val))
536 def test_get_default_exception(self):
538 Get should raise exception if id not in Storage and no default.
541 ret = self.s.get(self.id)
543 "%s.get() returned %s instead of raising InvalidID"
544 % (vars(self.Class)['name'], ret))
548 def test_get_initial_value(self):
550 Data value should be None before any value has been set.
553 ret = self.s.get(self.id)
554 self.failUnless(ret == None,
555 "%s.get() returned %s not None"
556 % (vars(self.Class)['name'], ret))
558 def test_set_exception(self):
560 Set should raise exception if id not in Storage.
563 self.s.set(self.id, self.val)
565 "%(name)s.set() did not raise InvalidID"
572 Set should define the value returned by get.
575 self.s.set(self.id, self.val)
576 ret = self.s.get(self.id)
577 self.failUnless(ret == self.val,
578 "%s.get() returned %s not %s"
579 % (vars(self.Class)['name'], ret, self.val))
581 def test_unicode_set(self):
583 Set should define the value returned by get.
587 self.s.set(self.id, val)
588 ret = self.s.get(self.id, decode=True)
589 self.failUnless(type(ret) == types.UnicodeType,
590 "%s.get() returned %s not UnicodeType"
591 % (vars(self.Class)['name'], type(ret)))
592 self.failUnless(ret == val,
593 "%s.get() returned %s not %s"
594 % (vars(self.Class)['name'], ret, self.val))
595 ret = self.s.get(self.id)
596 self.failUnless(type(ret) == types.StringType,
597 "%s.get() returned %s not StringType"
598 % (vars(self.Class)['name'], type(ret)))
599 s = unicode(ret, self.s.encoding)
600 self.failUnless(s == val,
601 "%s.get() returned %s not %s"
602 % (vars(self.Class)['name'], s, self.val))
605 class Storage_persistence_TestCase (StorageTestCase):
606 """Test cases for Storage.disconnect and .connect methods."""
609 val = 'unlikely value'
611 def test_get_set_persistence(self):
613 Set should define the value returned by get after reconnect.
616 self.s.set(self.id, self.val)
619 ret = self.s.get(self.id)
620 self.failUnless(ret == self.val,
621 "%s.get() returned %s not %s"
622 % (vars(self.Class)['name'], ret, self.val))
624 def test_add_nonrooted_persistence(self):
626 Adding entries should increase the number of children after reconnect.
632 self.s.add(ids[-1], 'parent')
635 s = sorted(self.s.children('parent'))
636 self.failUnless(s == ids, '\n %s\n !=\n %s' % (s, ids))
637 s = self.s.children()
638 self.failUnless(s == ['parent'], s)
640 class VersionedStorageTestCase (StorageTestCase):
641 """Test cases for base VersionedStorage class."""
643 Class = VersionedStorage
645 class VersionedStorage_commit_TestCase (VersionedStorageTestCase):
646 """Test cases for VersionedStorage methods."""
648 id = 'I' #unlikely id'
650 commit_msg = 'C' #ommitting something interesting'
651 commit_body = 'B' #ome\nlonger\ndescription\n'
653 def test_revision_id_exception(self):
655 Invalid revision id should raise InvalidRevision.
658 rev = self.s.revision_id('highly unlikely revision id')
660 "%s.revision_id() didn't raise InvalidRevision, returned %s."
661 % (vars(self.Class)['name'], rev))
662 except InvalidRevision:
665 def test_empty_commit_raises_exception(self):
667 Empty commit should raise exception.
670 self.s.commit(self.commit_msg, self.commit_body)
672 "Empty %(name)s.commit() didn't raise EmptyCommit."
677 def test_empty_commit_allowed(self):
679 Empty commit should _not_ raise exception if allow_empty=True.
681 self.s.commit(self.commit_msg, self.commit_body,
684 def test_commit_revision_ids(self):
686 Commit / revision_id should agree on revision ids.
690 revs.append(self.s.commit(self.commit_msg,
694 rev = self.s.revision_id(i+1)
695 self.failUnless(rev == revs[i],
696 "%s.revision_id(%d) returned %s not %s"
697 % (vars(self.Class)['name'], i+1, rev, revs[i]))
698 for i in range(-1, -9, -1):
699 rev = self.s.revision_id(i)
700 self.failUnless(rev == revs[i],
701 "%s.revision_id(%d) returned %s not %s"
702 % (vars(self.Class)['name'], i, rev, revs[i]))
704 def test_get_previous_version(self):
706 Get should be able to return the previous version.
709 return '%s:%d' % (self.val, i+1)
713 self.s.set(self.id, val(i))
714 revs.append(self.s.commit('%s: %d' % (self.commit_msg, i),
717 ret = self.s.get(self.id, revision=revs[i])
718 self.failUnless(ret == val(i),
719 "%s.get() returned %s not %s for revision %d"
720 % (vars(self.Class)['name'], ret, val(i), revs[i]))
722 def make_storage_testcase_subclasses(storage_class, namespace):
723 """Make StorageTestCase subclasses for storage_class in namespace."""
724 storage_testcase_classes = [
726 ob for ob in globals().values() if isinstance(ob, type))
727 if issubclass(c, StorageTestCase) \
728 and not issubclass(c, VersionedStorageTestCase)]
730 for base_class in storage_testcase_classes:
731 testcase_class_name = storage_class.__name__ + base_class.__name__
732 testcase_class_bases = (base_class,)
733 testcase_class_dict = dict(base_class.__dict__)
734 testcase_class_dict['Class'] = storage_class
735 testcase_class = type(
736 testcase_class_name, testcase_class_bases, testcase_class_dict)
737 setattr(namespace, testcase_class_name, testcase_class)
739 def make_versioned_storage_testcase_subclasses(storage_class, namespace):
740 """Make VersionedStorageTestCase subclasses for storage_class in namespace."""
741 storage_testcase_classes = [
743 ob for ob in globals().values() if isinstance(ob, type))
744 if issubclass(c, StorageTestCase)]
746 for base_class in storage_testcase_classes:
747 testcase_class_name = storage_class.__name__ + base_class.__name__
748 testcase_class_bases = (base_class,)
749 testcase_class_dict = dict(base_class.__dict__)
750 testcase_class_dict['Class'] = storage_class
751 testcase_class = type(
752 testcase_class_name, testcase_class_bases, testcase_class_dict)
753 setattr(namespace, testcase_class_name, testcase_class)
755 make_storage_testcase_subclasses(VersionedStorage, sys.modules[__name__])
757 unitsuite =unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
758 suite = unittest.TestSuite([unitsuite, doctest.DocTestSuite()])