##// END OF EJS Templates
obsolete: import modules within mercurial/ without "from mercurial"
Steve Borho -
r17405:b0aad9fb stable
parent child Browse files
Show More
@@ -1,331 +1,331 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23
24 24 Format
25 25 ------
26 26
27 27 Markers are stored in an append-only file stored in
28 28 '.hg/store/obsstore'.
29 29
30 30 The file starts with a version header:
31 31
32 32 - 1 unsigned byte: version number, starting at zero.
33 33
34 34
35 35 The header is followed by the markers. Each marker is made of:
36 36
37 37 - 1 unsigned byte: number of new changesets "R", could be zero.
38 38
39 39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
40 40
41 41 - 1 byte: a bit field. It is reserved for flags used in obsolete
42 42 markers common operations, to avoid repeated decoding of metadata
43 43 entries.
44 44
45 45 - 20 bytes: obsoleted changeset identifier.
46 46
47 47 - N*20 bytes: new changesets identifiers.
48 48
49 49 - M bytes: metadata as a sequence of nul-terminated strings. Each
50 50 string contains a key and a value, separated by a color ':', without
51 51 additional encoding. Keys cannot contain '\0' or ':' and values
52 52 cannot contain '\0'.
53 53 """
54 54 import struct
55 from mercurial import util, base85
55 import util, base85
56 56 from i18n import _
57 57
58 58 # the obsolete feature is not mature enought to be enabled by default.
59 59 # you have to rely on third party extension extension to enable this.
60 60 _enabled = False
61 61
62 62 _pack = struct.pack
63 63 _unpack = struct.unpack
64 64
65 65 # the obsolete feature is not mature enought to be enabled by default.
66 66 # you have to rely on third party extension extension to enable this.
67 67 _enabled = False
68 68
69 69 # data used for parsing and writing
70 70 _fmversion = 0
71 71 _fmfixed = '>BIB20s'
72 72 _fmnode = '20s'
73 73 _fmfsize = struct.calcsize(_fmfixed)
74 74 _fnodesize = struct.calcsize(_fmnode)
75 75
76 76 def _readmarkers(data):
77 77 """Read and enumerate markers from raw data"""
78 78 off = 0
79 79 diskversion = _unpack('>B', data[off:off + 1])[0]
80 80 off += 1
81 81 if diskversion != _fmversion:
82 82 raise util.Abort(_('parsing obsolete marker: unknown version %r')
83 83 % diskversion)
84 84
85 85 # Loop on markers
86 86 l = len(data)
87 87 while off + _fmfsize <= l:
88 88 # read fixed part
89 89 cur = data[off:off + _fmfsize]
90 90 off += _fmfsize
91 91 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
92 92 # read replacement
93 93 sucs = ()
94 94 if nbsuc:
95 95 s = (_fnodesize * nbsuc)
96 96 cur = data[off:off + s]
97 97 sucs = _unpack(_fmnode * nbsuc, cur)
98 98 off += s
99 99 # read metadata
100 100 # (metadata will be decoded on demand)
101 101 metadata = data[off:off + mdsize]
102 102 if len(metadata) != mdsize:
103 103 raise util.Abort(_('parsing obsolete marker: metadata is too '
104 104 'short, %d bytes expected, got %d')
105 105 % (mdsize, len(metadata)))
106 106 off += mdsize
107 107 yield (pre, sucs, flags, metadata)
108 108
109 109 def encodemeta(meta):
110 110 """Return encoded metadata string to string mapping.
111 111
112 112 Assume no ':' in key and no '\0' in both key and value."""
113 113 for key, value in meta.iteritems():
114 114 if ':' in key or '\0' in key:
115 115 raise ValueError("':' and '\0' are forbidden in metadata key'")
116 116 if '\0' in value:
117 117 raise ValueError("':' are forbidden in metadata value'")
118 118 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
119 119
120 120 def decodemeta(data):
121 121 """Return string to string dictionary from encoded version."""
122 122 d = {}
123 123 for l in data.split('\0'):
124 124 if l:
125 125 key, value = l.split(':')
126 126 d[key] = value
127 127 return d
128 128
129 129 class marker(object):
130 130 """Wrap obsolete marker raw data"""
131 131
132 132 def __init__(self, repo, data):
133 133 # the repo argument will be used to create changectx in later version
134 134 self._repo = repo
135 135 self._data = data
136 136 self._decodedmeta = None
137 137
138 138 def precnode(self):
139 139 """Precursor changeset node identifier"""
140 140 return self._data[0]
141 141
142 142 def succnodes(self):
143 143 """List of successor changesets node identifiers"""
144 144 return self._data[1]
145 145
146 146 def metadata(self):
147 147 """Decoded metadata dictionary"""
148 148 if self._decodedmeta is None:
149 149 self._decodedmeta = decodemeta(self._data[3])
150 150 return self._decodedmeta
151 151
152 152 def date(self):
153 153 """Creation date as (unixtime, offset)"""
154 154 parts = self.metadata()['date'].split(' ')
155 155 return (float(parts[0]), int(parts[1]))
156 156
157 157 class obsstore(object):
158 158 """Store obsolete markers
159 159
160 160 Markers can be accessed with two mappings:
161 161 - precursors: old -> set(new)
162 162 - successors: new -> set(old)
163 163 """
164 164
165 165 def __init__(self, sopener):
166 166 self._all = []
167 167 # new markers to serialize
168 168 self.precursors = {}
169 169 self.successors = {}
170 170 self.sopener = sopener
171 171 data = sopener.tryread('obsstore')
172 172 if data:
173 173 self._load(_readmarkers(data))
174 174
175 175 def __iter__(self):
176 176 return iter(self._all)
177 177
178 178 def __nonzero__(self):
179 179 return bool(self._all)
180 180
181 181 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
182 182 """obsolete: add a new obsolete marker
183 183
184 184 * ensuring it is hashable
185 185 * check mandatory metadata
186 186 * encode metadata
187 187 """
188 188 if metadata is None:
189 189 metadata = {}
190 190 if len(prec) != 20:
191 191 raise ValueError(prec)
192 192 for succ in succs:
193 193 if len(succ) != 20:
194 194 raise ValueError(succ)
195 195 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
196 196 self.add(transaction, [marker])
197 197
198 198 def add(self, transaction, markers):
199 199 """Add new markers to the store
200 200
201 201 Take care of filtering duplicate.
202 202 Return the number of new marker."""
203 203 if not _enabled:
204 204 raise util.Abort('obsolete feature is not enabled on this repo')
205 205 new = [m for m in markers if m not in self._all]
206 206 if new:
207 207 f = self.sopener('obsstore', 'ab')
208 208 try:
209 209 # Whether the file's current position is at the begin or at
210 210 # the end after opening a file for appending is implementation
211 211 # defined. So we must seek to the end before calling tell(),
212 212 # or we may get a zero offset for non-zero sized files on
213 213 # some platforms (issue3543).
214 214 f.seek(0, 2) # os.SEEK_END
215 215 offset = f.tell()
216 216 transaction.add('obsstore', offset)
217 217 # offset == 0: new file - add the version header
218 218 for bytes in _encodemarkers(new, offset == 0):
219 219 f.write(bytes)
220 220 finally:
221 221 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
222 222 # call 'filecacheentry.refresh()' here
223 223 f.close()
224 224 self._load(new)
225 225 return len(new)
226 226
227 227 def mergemarkers(self, transation, data):
228 228 markers = _readmarkers(data)
229 229 self.add(transation, markers)
230 230
231 231 def _load(self, markers):
232 232 for mark in markers:
233 233 self._all.append(mark)
234 234 pre, sucs = mark[:2]
235 235 self.precursors.setdefault(pre, set()).add(mark)
236 236 for suc in sucs:
237 237 self.successors.setdefault(suc, set()).add(mark)
238 238
239 239 def _encodemarkers(markers, addheader=False):
240 240 # Kept separate from flushmarkers(), it will be reused for
241 241 # markers exchange.
242 242 if addheader:
243 243 yield _pack('>B', _fmversion)
244 244 for marker in markers:
245 245 yield _encodeonemarker(marker)
246 246
247 247
248 248 def _encodeonemarker(marker):
249 249 pre, sucs, flags, metadata = marker
250 250 nbsuc = len(sucs)
251 251 format = _fmfixed + (_fmnode * nbsuc)
252 252 data = [nbsuc, len(metadata), flags, pre]
253 253 data.extend(sucs)
254 254 return _pack(format, *data) + metadata
255 255
256 256 # arbitrary picked to fit into 8K limit from HTTP server
257 257 # you have to take in account:
258 258 # - the version header
259 259 # - the base85 encoding
260 260 _maxpayload = 5300
261 261
262 262 def listmarkers(repo):
263 263 """List markers over pushkey"""
264 264 if not repo.obsstore:
265 265 return {}
266 266 keys = {}
267 267 parts = []
268 268 currentlen = _maxpayload * 2 # ensure we create a new part
269 269 for marker in repo.obsstore:
270 270 nextdata = _encodeonemarker(marker)
271 271 if (len(nextdata) + currentlen > _maxpayload):
272 272 currentpart = []
273 273 currentlen = 0
274 274 parts.append(currentpart)
275 275 currentpart.append(nextdata)
276 276 currentlen += len(nextdata)
277 277 for idx, part in enumerate(reversed(parts)):
278 278 data = ''.join([_pack('>B', _fmversion)] + part)
279 279 keys['dump%i' % idx] = base85.b85encode(data)
280 280 return keys
281 281
282 282 def pushmarker(repo, key, old, new):
283 283 """Push markers over pushkey"""
284 284 if not key.startswith('dump'):
285 285 repo.ui.warn(_('unknown key: %r') % key)
286 286 return 0
287 287 if old:
288 288 repo.ui.warn(_('unexpected old value') % key)
289 289 return 0
290 290 data = base85.b85decode(new)
291 291 lock = repo.lock()
292 292 try:
293 293 tr = repo.transaction('pushkey: obsolete markers')
294 294 try:
295 295 repo.obsstore.mergemarkers(tr, data)
296 296 tr.close()
297 297 return 1
298 298 finally:
299 299 tr.release()
300 300 finally:
301 301 lock.release()
302 302
303 303 def allmarkers(repo):
304 304 """all obsolete markers known in a repository"""
305 305 for markerdata in repo.obsstore:
306 306 yield marker(repo, markerdata)
307 307
308 308 def precursormarkers(ctx):
309 309 """obsolete marker making this changeset obsolete"""
310 310 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
311 311 yield marker(ctx._repo, data)
312 312
313 313 def successormarkers(ctx):
314 314 """obsolete marker marking this changeset as a successors"""
315 315 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
316 316 yield marker(ctx._repo, data)
317 317
318 318 def anysuccessors(obsstore, node):
319 319 """Yield every successor of <node>
320 320
321 321 This this a linear yield unsuitable to detect splitted changeset."""
322 322 remaining = set([node])
323 323 seen = set(remaining)
324 324 while remaining:
325 325 current = remaining.pop()
326 326 yield current
327 327 for mark in obsstore.precursors.get(current, ()):
328 328 for suc in mark[1]:
329 329 if suc not in seen:
330 330 seen.add(suc)
331 331 remaining.add(suc)
General Comments 0
You need to be logged in to leave comments. Login now