c7d618c26b8e7411eff0344f6f1cf5dbca17cc2c
[portage.git] / pym / portage / cache / flat_list.py
1 from portage.cache import fs_template
2 from portage.cache import cache_errors
3 from portage import os
4 from portage import _encodings
5 from portage import _unicode_encode
6 import codecs
7 import errno
8 import stat
9 import sys
10
11 if sys.hexversion >= 0x3000000:
12         long = int
13
14 # store the current key order *here*.
15 class database(fs_template.FsBased):
16
17         autocommits = True
18
19         # do not screw with this ordering. _eclasses_ needs to be last
20         auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
21                 'RESTRICT',  'HOMEPAGE',  'LICENSE', 'DESCRIPTION',
22                 'KEYWORDS',  'IUSE', 'CDEPEND',
23                 'PDEPEND',   'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
24
25         def __init__(self, label, auxdbkeys, **config):
26                 super(database,self).__init__(label, auxdbkeys, **config)
27                 self._base = os.path.join(self._base, 
28                         self.label.lstrip(os.path.sep).rstrip(os.path.sep))
29
30                 if len(self._known_keys) > len(self.auxdbkey_order) + 2:
31                         raise Exception("less ordered keys then auxdbkeys")
32                 if not os.path.exists(self._base):
33                         self._ensure_dirs()
34
35
36         def _getitem(self, cpv):
37                 d = {}
38                 try:
39                         myf = codecs.open(_unicode_encode(os.path.join(self._base, cpv),
40                                 encoding=_encodings['fs'], errors='strict'),
41                                 mode='r', encoding=_encodings['repo.content'],
42                                 errors='replace')
43                         for k,v in zip(self.auxdbkey_order, myf):
44                                 d[k] = v.rstrip("\n")
45                 except (OSError, IOError) as e:
46                         if errno.ENOENT == e.errno:
47                                 raise KeyError(cpv)
48                         raise cache_errors.CacheCorruption(cpv, e)
49
50                 try:
51                         d["_mtime_"] = long(os.fstat(myf.fileno()).st_mtime)
52                 except OSError as e:    
53                         myf.close()
54                         raise cache_errors.CacheCorruption(cpv, e)
55                 myf.close()
56                 return d
57
58
59         def _setitem(self, cpv, values):
60                 s = cpv.rfind("/")
61                 fp=os.path.join(self._base,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
62                 try:
63                         myf = codecs.open(_unicode_encode(fp,
64                                 encoding=_encodings['fs'], errors='strict'),
65                                 mode='w', encoding=_encodings['repo.content'],
66                                 errors='backslashreplace')
67                 except (OSError, IOError) as e:
68                         if errno.ENOENT == e.errno:
69                                 try:
70                                         self._ensure_dirs(cpv)
71                                         myf = codecs.open(_unicode_encode(fp,
72                                                 encoding=_encodings['fs'], errors='strict'),
73                                                 mode='w', encoding=_encodings['repo.content'],
74                                                 errors='backslashreplace')
75                                 except (OSError, IOError) as e:
76                                         raise cache_errors.CacheCorruption(cpv, e)
77                         else:
78                                 raise cache_errors.CacheCorruption(cpv, e)
79                 
80
81                 for x in self.auxdbkey_order:
82                         myf.write(values.get(x,"")+"\n")
83
84                 myf.close()
85                 self._ensure_access(fp, mtime=values["_mtime_"])
86                 #update written.  now we move it.
87                 new_fp = os.path.join(self._base,cpv)
88                 try:
89                         os.rename(fp, new_fp)
90                 except (OSError, IOError) as e:
91                         os.remove(fp)
92                         raise cache_errors.CacheCorruption(cpv, e)
93
94
95         def _delitem(self, cpv):
96                 try:
97                         os.remove(os.path.join(self._base,cpv))
98                 except OSError as e:
99                         if errno.ENOENT == e.errno:
100                                 raise KeyError(cpv)
101                         else:
102                                 raise cache_errors.CacheCorruption(cpv, e)
103
104
105         def __contains__(self, cpv):
106                 return os.path.exists(os.path.join(self._base, cpv))
107
108
109         def __iter__(self):
110                 """generator for walking the dir struct"""
111                 dirs = [self._base]
112                 len_base = len(self._base)
113                 while len(dirs):
114                         for l in os.listdir(dirs[0]):
115                                 if l.endswith(".cpickle"):
116                                         continue
117                                 p = os.path.join(dirs[0],l)
118                                 st = os.lstat(p)
119                                 if stat.S_ISDIR(st.st_mode):
120                                         dirs.append(p)
121                                         continue
122                                 yield p[len_base+1:]
123                         dirs.pop(0)
124
125
126         def commit(self):       pass