##// END OF EJS Templates
obsolete: avoid 2-argument form of enumerate, which was new in Python 2.6
Augie Fackler -
r22262:10880c8a default
parent child Browse files
Show More
@@ -1,930 +1,930 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66
66
67 The header is followed by the markers. Each marker is made of:
67 The header is followed by the markers. Each marker is made of:
68
68
69 - 1 unsigned byte: number of new changesets "N", can be zero.
69 - 1 unsigned byte: number of new changesets "N", can be zero.
70
70
71 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
72
72
73 - 1 byte: a bit field. It is reserved for flags used in common
73 - 1 byte: a bit field. It is reserved for flags used in common
74 obsolete marker operations, to avoid repeated decoding of metadata
74 obsolete marker operations, to avoid repeated decoding of metadata
75 entries.
75 entries.
76
76
77 - 20 bytes: obsoleted changeset identifier.
77 - 20 bytes: obsoleted changeset identifier.
78
78
79 - N*20 bytes: new changesets identifiers.
79 - N*20 bytes: new changesets identifiers.
80
80
81 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 - M bytes: metadata as a sequence of nul-terminated strings. Each
82 string contains a key and a value, separated by a colon ':', without
82 string contains a key and a value, separated by a colon ':', without
83 additional encoding. Keys cannot contain '\0' or ':' and values
83 additional encoding. Keys cannot contain '\0' or ':' and values
84 cannot contain '\0'.
84 cannot contain '\0'.
85
85
86 """
86 """
87 import struct
87 import struct
88 import util, base85, node
88 import util, base85, node
89 import phases
89 import phases
90 from i18n import _
90 from i18n import _
91
91
92 _pack = struct.pack
92 _pack = struct.pack
93 _unpack = struct.unpack
93 _unpack = struct.unpack
94
94
95 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
95 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
96
96
97 # the obsolete feature is not mature enough to be enabled by default.
97 # the obsolete feature is not mature enough to be enabled by default.
98 # you have to rely on third party extension extension to enable this.
98 # you have to rely on third party extension extension to enable this.
99 _enabled = False
99 _enabled = False
100
100
101 # data used for parsing and writing
101 # data used for parsing and writing
102 _fmversion = 0
102 _fmversion = 0
103 _fmfixed = '>BIB20s'
103 _fmfixed = '>BIB20s'
104 _fmnode = '20s'
104 _fmnode = '20s'
105 _fmfsize = struct.calcsize(_fmfixed)
105 _fmfsize = struct.calcsize(_fmfixed)
106 _fnodesize = struct.calcsize(_fmnode)
106 _fnodesize = struct.calcsize(_fmnode)
107
107
108 ### obsolescence marker flag
108 ### obsolescence marker flag
109
109
110 ## bumpedfix flag
110 ## bumpedfix flag
111 #
111 #
112 # When a changeset A' succeed to a changeset A which became public, we call A'
112 # When a changeset A' succeed to a changeset A which became public, we call A'
113 # "bumped" because it's a successors of a public changesets
113 # "bumped" because it's a successors of a public changesets
114 #
114 #
115 # o A' (bumped)
115 # o A' (bumped)
116 # |`:
116 # |`:
117 # | o A
117 # | o A
118 # |/
118 # |/
119 # o Z
119 # o Z
120 #
120 #
121 # The way to solve this situation is to create a new changeset Ad as children
121 # The way to solve this situation is to create a new changeset Ad as children
122 # of A. This changeset have the same content than A'. So the diff from A to A'
122 # of A. This changeset have the same content than A'. So the diff from A to A'
123 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
123 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
124 #
124 #
125 # o Ad
125 # o Ad
126 # |`:
126 # |`:
127 # | x A'
127 # | x A'
128 # |'|
128 # |'|
129 # o | A
129 # o | A
130 # |/
130 # |/
131 # o Z
131 # o Z
132 #
132 #
133 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
133 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
134 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
134 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
135 # This flag mean that the successors express the changes between the public and
135 # This flag mean that the successors express the changes between the public and
136 # bumped version and fix the situation, breaking the transitivity of
136 # bumped version and fix the situation, breaking the transitivity of
137 # "bumped" here.
137 # "bumped" here.
138 bumpedfix = 1
138 bumpedfix = 1
139
139
140 def _readmarkers(data):
140 def _readmarkers(data):
141 """Read and enumerate markers from raw data"""
141 """Read and enumerate markers from raw data"""
142 off = 0
142 off = 0
143 diskversion = _unpack('>B', data[off:off + 1])[0]
143 diskversion = _unpack('>B', data[off:off + 1])[0]
144 off += 1
144 off += 1
145 if diskversion != _fmversion:
145 if diskversion != _fmversion:
146 raise util.Abort(_('parsing obsolete marker: unknown version %r')
146 raise util.Abort(_('parsing obsolete marker: unknown version %r')
147 % diskversion)
147 % diskversion)
148
148
149 # Loop on markers
149 # Loop on markers
150 l = len(data)
150 l = len(data)
151 while off + _fmfsize <= l:
151 while off + _fmfsize <= l:
152 # read fixed part
152 # read fixed part
153 cur = data[off:off + _fmfsize]
153 cur = data[off:off + _fmfsize]
154 off += _fmfsize
154 off += _fmfsize
155 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
155 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
156 # read replacement
156 # read replacement
157 sucs = ()
157 sucs = ()
158 if nbsuc:
158 if nbsuc:
159 s = (_fnodesize * nbsuc)
159 s = (_fnodesize * nbsuc)
160 cur = data[off:off + s]
160 cur = data[off:off + s]
161 sucs = _unpack(_fmnode * nbsuc, cur)
161 sucs = _unpack(_fmnode * nbsuc, cur)
162 off += s
162 off += s
163 # read metadata
163 # read metadata
164 # (metadata will be decoded on demand)
164 # (metadata will be decoded on demand)
165 metadata = data[off:off + mdsize]
165 metadata = data[off:off + mdsize]
166 if len(metadata) != mdsize:
166 if len(metadata) != mdsize:
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 'short, %d bytes expected, got %d')
168 'short, %d bytes expected, got %d')
169 % (mdsize, len(metadata)))
169 % (mdsize, len(metadata)))
170 off += mdsize
170 off += mdsize
171 meta = decodemeta(metadata)
171 meta = decodemeta(metadata)
172 try:
172 try:
173 date = util.parsedate(decodemeta(metadata).pop('date', '0 0'))
173 date = util.parsedate(decodemeta(metadata).pop('date', '0 0'))
174 except util.Abort:
174 except util.Abort:
175 date = (0., 0)
175 date = (0., 0)
176 parents = None
176 parents = None
177 if 'p2' in meta:
177 if 'p2' in meta:
178 parents = (meta.pop('p1', None), meta.pop('p2', None))
178 parents = (meta.pop('p1', None), meta.pop('p2', None))
179 elif 'p1' in meta:
179 elif 'p1' in meta:
180 parents = (meta.pop('p1', None),)
180 parents = (meta.pop('p1', None),)
181 elif 'p0' in meta:
181 elif 'p0' in meta:
182 parents = ()
182 parents = ()
183 if parents is not None:
183 if parents is not None:
184 try:
184 try:
185 parents = tuple(node.bin(p) for p in parents)
185 parents = tuple(node.bin(p) for p in parents)
186 # if parent content is not a nodeid, drop the data
186 # if parent content is not a nodeid, drop the data
187 for p in parents:
187 for p in parents:
188 if len(p) != 20:
188 if len(p) != 20:
189 parents = None
189 parents = None
190 break
190 break
191 except TypeError:
191 except TypeError:
192 # if content cannot be translated to nodeid drop the data.
192 # if content cannot be translated to nodeid drop the data.
193 parents = None
193 parents = None
194
194
195 metadata = encodemeta(meta)
195 metadata = encodemeta(meta)
196
196
197 yield (pre, sucs, flags, metadata, date, parents)
197 yield (pre, sucs, flags, metadata, date, parents)
198
198
199 def encodemeta(meta):
199 def encodemeta(meta):
200 """Return encoded metadata string to string mapping.
200 """Return encoded metadata string to string mapping.
201
201
202 Assume no ':' in key and no '\0' in both key and value."""
202 Assume no ':' in key and no '\0' in both key and value."""
203 for key, value in meta.iteritems():
203 for key, value in meta.iteritems():
204 if ':' in key or '\0' in key:
204 if ':' in key or '\0' in key:
205 raise ValueError("':' and '\0' are forbidden in metadata key'")
205 raise ValueError("':' and '\0' are forbidden in metadata key'")
206 if '\0' in value:
206 if '\0' in value:
207 raise ValueError("':' is forbidden in metadata value'")
207 raise ValueError("':' is forbidden in metadata value'")
208 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
208 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
209
209
210 def decodemeta(data):
210 def decodemeta(data):
211 """Return string to string dictionary from encoded version."""
211 """Return string to string dictionary from encoded version."""
212 d = {}
212 d = {}
213 for l in data.split('\0'):
213 for l in data.split('\0'):
214 if l:
214 if l:
215 key, value = l.split(':')
215 key, value = l.split(':')
216 d[key] = value
216 d[key] = value
217 return d
217 return d
218
218
219 class marker(object):
219 class marker(object):
220 """Wrap obsolete marker raw data"""
220 """Wrap obsolete marker raw data"""
221
221
222 def __init__(self, repo, data):
222 def __init__(self, repo, data):
223 # the repo argument will be used to create changectx in later version
223 # the repo argument will be used to create changectx in later version
224 self._repo = repo
224 self._repo = repo
225 self._data = data
225 self._data = data
226 self._decodedmeta = None
226 self._decodedmeta = None
227
227
228 def __hash__(self):
228 def __hash__(self):
229 return hash(self._data)
229 return hash(self._data)
230
230
231 def __eq__(self, other):
231 def __eq__(self, other):
232 if type(other) != type(self):
232 if type(other) != type(self):
233 return False
233 return False
234 return self._data == other._data
234 return self._data == other._data
235
235
236 def precnode(self):
236 def precnode(self):
237 """Precursor changeset node identifier"""
237 """Precursor changeset node identifier"""
238 return self._data[0]
238 return self._data[0]
239
239
240 def succnodes(self):
240 def succnodes(self):
241 """List of successor changesets node identifiers"""
241 """List of successor changesets node identifiers"""
242 return self._data[1]
242 return self._data[1]
243
243
244 def parentnodes(self):
244 def parentnodes(self):
245 """Parents of the precursors (None if not recorded)"""
245 """Parents of the precursors (None if not recorded)"""
246 return self._data[5]
246 return self._data[5]
247
247
248 def metadata(self):
248 def metadata(self):
249 """Decoded metadata dictionary"""
249 """Decoded metadata dictionary"""
250 if self._decodedmeta is None:
250 if self._decodedmeta is None:
251 self._decodedmeta = decodemeta(self._data[3])
251 self._decodedmeta = decodemeta(self._data[3])
252 return self._decodedmeta
252 return self._decodedmeta
253
253
254 def date(self):
254 def date(self):
255 """Creation date as (unixtime, offset)"""
255 """Creation date as (unixtime, offset)"""
256 return self._data[4]
256 return self._data[4]
257
257
258 def flags(self):
258 def flags(self):
259 """The flags field of the marker"""
259 """The flags field of the marker"""
260 return self._data[2]
260 return self._data[2]
261
261
262 class obsstore(object):
262 class obsstore(object):
263 """Store obsolete markers
263 """Store obsolete markers
264
264
265 Markers can be accessed with two mappings:
265 Markers can be accessed with two mappings:
266 - precursors[x] -> set(markers on precursors edges of x)
266 - precursors[x] -> set(markers on precursors edges of x)
267 - successors[x] -> set(markers on successors edges of x)
267 - successors[x] -> set(markers on successors edges of x)
268 """
268 """
269
269
270 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
270 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
271 # prec: nodeid, precursor changesets
271 # prec: nodeid, precursor changesets
272 # succs: tuple of nodeid, successor changesets (0-N length)
272 # succs: tuple of nodeid, successor changesets (0-N length)
273 # flag: integer, flag field carrying modifier for the markers (see doc)
273 # flag: integer, flag field carrying modifier for the markers (see doc)
274 # meta: binary blob, encoded metadata dictionary
274 # meta: binary blob, encoded metadata dictionary
275 # date: (float, int) tuple, date of marker creation
275 # date: (float, int) tuple, date of marker creation
276 # parents: (tuple of nodeid) or None, parents of precursors
276 # parents: (tuple of nodeid) or None, parents of precursors
277 # None is used when no data has been recorded
277 # None is used when no data has been recorded
278
278
279 def __init__(self, sopener):
279 def __init__(self, sopener):
280 # caches for various obsolescence related cache
280 # caches for various obsolescence related cache
281 self.caches = {}
281 self.caches = {}
282 self._all = []
282 self._all = []
283 # new markers to serialize
283 # new markers to serialize
284 self.precursors = {}
284 self.precursors = {}
285 self.successors = {}
285 self.successors = {}
286 self.sopener = sopener
286 self.sopener = sopener
287 data = sopener.tryread('obsstore')
287 data = sopener.tryread('obsstore')
288 if data:
288 if data:
289 self._load(_readmarkers(data))
289 self._load(_readmarkers(data))
290
290
291 def __iter__(self):
291 def __iter__(self):
292 return iter(self._all)
292 return iter(self._all)
293
293
294 def __len__(self):
294 def __len__(self):
295 return len(self._all)
295 return len(self._all)
296
296
297 def __nonzero__(self):
297 def __nonzero__(self):
298 return bool(self._all)
298 return bool(self._all)
299
299
300 def create(self, transaction, prec, succs=(), flag=0, parents=None,
300 def create(self, transaction, prec, succs=(), flag=0, parents=None,
301 date=None, metadata=None):
301 date=None, metadata=None):
302 """obsolete: add a new obsolete marker
302 """obsolete: add a new obsolete marker
303
303
304 * ensuring it is hashable
304 * ensuring it is hashable
305 * check mandatory metadata
305 * check mandatory metadata
306 * encode metadata
306 * encode metadata
307
307
308 If you are a human writing code creating marker you want to use the
308 If you are a human writing code creating marker you want to use the
309 `createmarkers` function in this module instead.
309 `createmarkers` function in this module instead.
310
310
311 return True if a new marker have been added, False if the markers
311 return True if a new marker have been added, False if the markers
312 already existed (no op).
312 already existed (no op).
313 """
313 """
314 if metadata is None:
314 if metadata is None:
315 metadata = {}
315 metadata = {}
316 if date is None:
316 if date is None:
317 if 'date' in metadata:
317 if 'date' in metadata:
318 # as a courtesy for out-of-tree extensions
318 # as a courtesy for out-of-tree extensions
319 date = util.parsedate(metadata.pop('date'))
319 date = util.parsedate(metadata.pop('date'))
320 else:
320 else:
321 date = util.makedate()
321 date = util.makedate()
322 if len(prec) != 20:
322 if len(prec) != 20:
323 raise ValueError(prec)
323 raise ValueError(prec)
324 for succ in succs:
324 for succ in succs:
325 if len(succ) != 20:
325 if len(succ) != 20:
326 raise ValueError(succ)
326 raise ValueError(succ)
327 if prec in succs:
327 if prec in succs:
328 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
328 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
329 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata),
329 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata),
330 date, parents)
330 date, parents)
331 return bool(self.add(transaction, [marker]))
331 return bool(self.add(transaction, [marker]))
332
332
333 def add(self, transaction, markers):
333 def add(self, transaction, markers):
334 """Add new markers to the store
334 """Add new markers to the store
335
335
336 Take care of filtering duplicate.
336 Take care of filtering duplicate.
337 Return the number of new marker."""
337 Return the number of new marker."""
338 if not _enabled:
338 if not _enabled:
339 raise util.Abort('obsolete feature is not enabled on this repo')
339 raise util.Abort('obsolete feature is not enabled on this repo')
340 known = set(self._all)
340 known = set(self._all)
341 new = []
341 new = []
342 for m in markers:
342 for m in markers:
343 if m not in known:
343 if m not in known:
344 known.add(m)
344 known.add(m)
345 new.append(m)
345 new.append(m)
346 if new:
346 if new:
347 f = self.sopener('obsstore', 'ab')
347 f = self.sopener('obsstore', 'ab')
348 try:
348 try:
349 # Whether the file's current position is at the begin or at
349 # Whether the file's current position is at the begin or at
350 # the end after opening a file for appending is implementation
350 # the end after opening a file for appending is implementation
351 # defined. So we must seek to the end before calling tell(),
351 # defined. So we must seek to the end before calling tell(),
352 # or we may get a zero offset for non-zero sized files on
352 # or we may get a zero offset for non-zero sized files on
353 # some platforms (issue3543).
353 # some platforms (issue3543).
354 f.seek(0, _SEEK_END)
354 f.seek(0, _SEEK_END)
355 offset = f.tell()
355 offset = f.tell()
356 transaction.add('obsstore', offset)
356 transaction.add('obsstore', offset)
357 # offset == 0: new file - add the version header
357 # offset == 0: new file - add the version header
358 for bytes in _encodemarkers(new, offset == 0):
358 for bytes in _encodemarkers(new, offset == 0):
359 f.write(bytes)
359 f.write(bytes)
360 finally:
360 finally:
361 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
361 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
362 # call 'filecacheentry.refresh()' here
362 # call 'filecacheentry.refresh()' here
363 f.close()
363 f.close()
364 self._load(new)
364 self._load(new)
365 # new marker *may* have changed several set. invalidate the cache.
365 # new marker *may* have changed several set. invalidate the cache.
366 self.caches.clear()
366 self.caches.clear()
367 return len(new)
367 return len(new)
368
368
369 def mergemarkers(self, transaction, data):
369 def mergemarkers(self, transaction, data):
370 markers = _readmarkers(data)
370 markers = _readmarkers(data)
371 self.add(transaction, markers)
371 self.add(transaction, markers)
372
372
373 def _load(self, markers):
373 def _load(self, markers):
374 for mark in markers:
374 for mark in markers:
375 self._all.append(mark)
375 self._all.append(mark)
376 pre, sucs = mark[:2]
376 pre, sucs = mark[:2]
377 self.successors.setdefault(pre, set()).add(mark)
377 self.successors.setdefault(pre, set()).add(mark)
378 for suc in sucs:
378 for suc in sucs:
379 self.precursors.setdefault(suc, set()).add(mark)
379 self.precursors.setdefault(suc, set()).add(mark)
380 if node.nullid in self.precursors:
380 if node.nullid in self.precursors:
381 raise util.Abort(_('bad obsolescence marker detected: '
381 raise util.Abort(_('bad obsolescence marker detected: '
382 'invalid successors nullid'))
382 'invalid successors nullid'))
383
383
384 def _encodemarkers(markers, addheader=False):
384 def _encodemarkers(markers, addheader=False):
385 # Kept separate from flushmarkers(), it will be reused for
385 # Kept separate from flushmarkers(), it will be reused for
386 # markers exchange.
386 # markers exchange.
387 if addheader:
387 if addheader:
388 yield _pack('>B', _fmversion)
388 yield _pack('>B', _fmversion)
389 for marker in markers:
389 for marker in markers:
390 yield _encodeonemarker(marker)
390 yield _encodeonemarker(marker)
391
391
392
392
393 def _encodeonemarker(marker):
393 def _encodeonemarker(marker):
394 pre, sucs, flags, metadata, date, parents = marker
394 pre, sucs, flags, metadata, date, parents = marker
395 metadata = decodemeta(metadata)
395 metadata = decodemeta(metadata)
396 metadata['date'] = '%d %i' % date
396 metadata['date'] = '%d %i' % date
397 if parents is not None:
397 if parents is not None:
398 if not parents:
398 if not parents:
399 # mark that we explicitly recorded no parents
399 # mark that we explicitly recorded no parents
400 metadata['p0'] = ''
400 metadata['p0'] = ''
401 for i, p in enumerate(parents, 1):
401 for i, p in enumerate(parents):
402 metadata['p%i' % i] = node.hex(p)
402 metadata['p%i' % (i + 1)] = node.hex(p)
403 metadata = encodemeta(metadata)
403 metadata = encodemeta(metadata)
404 nbsuc = len(sucs)
404 nbsuc = len(sucs)
405 format = _fmfixed + (_fmnode * nbsuc)
405 format = _fmfixed + (_fmnode * nbsuc)
406 data = [nbsuc, len(metadata), flags, pre]
406 data = [nbsuc, len(metadata), flags, pre]
407 data.extend(sucs)
407 data.extend(sucs)
408 return _pack(format, *data) + metadata
408 return _pack(format, *data) + metadata
409
409
410 # arbitrary picked to fit into 8K limit from HTTP server
410 # arbitrary picked to fit into 8K limit from HTTP server
411 # you have to take in account:
411 # you have to take in account:
412 # - the version header
412 # - the version header
413 # - the base85 encoding
413 # - the base85 encoding
414 _maxpayload = 5300
414 _maxpayload = 5300
415
415
416 def _pushkeyescape(markers):
416 def _pushkeyescape(markers):
417 """encode markers into a dict suitable for pushkey exchange
417 """encode markers into a dict suitable for pushkey exchange
418
418
419 - binary data is base85 encoded
419 - binary data is base85 encoded
420 - split in chunks smaller than 5300 bytes"""
420 - split in chunks smaller than 5300 bytes"""
421 keys = {}
421 keys = {}
422 parts = []
422 parts = []
423 currentlen = _maxpayload * 2 # ensure we create a new part
423 currentlen = _maxpayload * 2 # ensure we create a new part
424 for marker in markers:
424 for marker in markers:
425 nextdata = _encodeonemarker(marker)
425 nextdata = _encodeonemarker(marker)
426 if (len(nextdata) + currentlen > _maxpayload):
426 if (len(nextdata) + currentlen > _maxpayload):
427 currentpart = []
427 currentpart = []
428 currentlen = 0
428 currentlen = 0
429 parts.append(currentpart)
429 parts.append(currentpart)
430 currentpart.append(nextdata)
430 currentpart.append(nextdata)
431 currentlen += len(nextdata)
431 currentlen += len(nextdata)
432 for idx, part in enumerate(reversed(parts)):
432 for idx, part in enumerate(reversed(parts)):
433 data = ''.join([_pack('>B', _fmversion)] + part)
433 data = ''.join([_pack('>B', _fmversion)] + part)
434 keys['dump%i' % idx] = base85.b85encode(data)
434 keys['dump%i' % idx] = base85.b85encode(data)
435 return keys
435 return keys
436
436
437 def listmarkers(repo):
437 def listmarkers(repo):
438 """List markers over pushkey"""
438 """List markers over pushkey"""
439 if not repo.obsstore:
439 if not repo.obsstore:
440 return {}
440 return {}
441 return _pushkeyescape(repo.obsstore)
441 return _pushkeyescape(repo.obsstore)
442
442
443 def pushmarker(repo, key, old, new):
443 def pushmarker(repo, key, old, new):
444 """Push markers over pushkey"""
444 """Push markers over pushkey"""
445 if not key.startswith('dump'):
445 if not key.startswith('dump'):
446 repo.ui.warn(_('unknown key: %r') % key)
446 repo.ui.warn(_('unknown key: %r') % key)
447 return 0
447 return 0
448 if old:
448 if old:
449 repo.ui.warn(_('unexpected old value for %r') % key)
449 repo.ui.warn(_('unexpected old value for %r') % key)
450 return 0
450 return 0
451 data = base85.b85decode(new)
451 data = base85.b85decode(new)
452 lock = repo.lock()
452 lock = repo.lock()
453 try:
453 try:
454 tr = repo.transaction('pushkey: obsolete markers')
454 tr = repo.transaction('pushkey: obsolete markers')
455 try:
455 try:
456 repo.obsstore.mergemarkers(tr, data)
456 repo.obsstore.mergemarkers(tr, data)
457 tr.close()
457 tr.close()
458 return 1
458 return 1
459 finally:
459 finally:
460 tr.release()
460 tr.release()
461 finally:
461 finally:
462 lock.release()
462 lock.release()
463
463
464 def allmarkers(repo):
464 def allmarkers(repo):
465 """all obsolete markers known in a repository"""
465 """all obsolete markers known in a repository"""
466 for markerdata in repo.obsstore:
466 for markerdata in repo.obsstore:
467 yield marker(repo, markerdata)
467 yield marker(repo, markerdata)
468
468
469 def precursormarkers(ctx):
469 def precursormarkers(ctx):
470 """obsolete marker marking this changeset as a successors"""
470 """obsolete marker marking this changeset as a successors"""
471 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
471 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
472 yield marker(ctx._repo, data)
472 yield marker(ctx._repo, data)
473
473
474 def successormarkers(ctx):
474 def successormarkers(ctx):
475 """obsolete marker making this changeset obsolete"""
475 """obsolete marker making this changeset obsolete"""
476 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
476 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
477 yield marker(ctx._repo, data)
477 yield marker(ctx._repo, data)
478
478
479 def allsuccessors(obsstore, nodes, ignoreflags=0):
479 def allsuccessors(obsstore, nodes, ignoreflags=0):
480 """Yield node for every successor of <nodes>.
480 """Yield node for every successor of <nodes>.
481
481
482 Some successors may be unknown locally.
482 Some successors may be unknown locally.
483
483
484 This is a linear yield unsuited to detecting split changesets. It includes
484 This is a linear yield unsuited to detecting split changesets. It includes
485 initial nodes too."""
485 initial nodes too."""
486 remaining = set(nodes)
486 remaining = set(nodes)
487 seen = set(remaining)
487 seen = set(remaining)
488 while remaining:
488 while remaining:
489 current = remaining.pop()
489 current = remaining.pop()
490 yield current
490 yield current
491 for mark in obsstore.successors.get(current, ()):
491 for mark in obsstore.successors.get(current, ()):
492 # ignore marker flagged with specified flag
492 # ignore marker flagged with specified flag
493 if mark[2] & ignoreflags:
493 if mark[2] & ignoreflags:
494 continue
494 continue
495 for suc in mark[1]:
495 for suc in mark[1]:
496 if suc not in seen:
496 if suc not in seen:
497 seen.add(suc)
497 seen.add(suc)
498 remaining.add(suc)
498 remaining.add(suc)
499
499
500 def allprecursors(obsstore, nodes, ignoreflags=0):
500 def allprecursors(obsstore, nodes, ignoreflags=0):
501 """Yield node for every precursors of <nodes>.
501 """Yield node for every precursors of <nodes>.
502
502
503 Some precursors may be unknown locally.
503 Some precursors may be unknown locally.
504
504
505 This is a linear yield unsuited to detecting folded changesets. It includes
505 This is a linear yield unsuited to detecting folded changesets. It includes
506 initial nodes too."""
506 initial nodes too."""
507
507
508 remaining = set(nodes)
508 remaining = set(nodes)
509 seen = set(remaining)
509 seen = set(remaining)
510 while remaining:
510 while remaining:
511 current = remaining.pop()
511 current = remaining.pop()
512 yield current
512 yield current
513 for mark in obsstore.precursors.get(current, ()):
513 for mark in obsstore.precursors.get(current, ()):
514 # ignore marker flagged with specified flag
514 # ignore marker flagged with specified flag
515 if mark[2] & ignoreflags:
515 if mark[2] & ignoreflags:
516 continue
516 continue
517 suc = mark[0]
517 suc = mark[0]
518 if suc not in seen:
518 if suc not in seen:
519 seen.add(suc)
519 seen.add(suc)
520 remaining.add(suc)
520 remaining.add(suc)
521
521
522 def foreground(repo, nodes):
522 def foreground(repo, nodes):
523 """return all nodes in the "foreground" of other node
523 """return all nodes in the "foreground" of other node
524
524
525 The foreground of a revision is anything reachable using parent -> children
525 The foreground of a revision is anything reachable using parent -> children
526 or precursor -> successor relation. It is very similar to "descendant" but
526 or precursor -> successor relation. It is very similar to "descendant" but
527 augmented with obsolescence information.
527 augmented with obsolescence information.
528
528
529 Beware that possible obsolescence cycle may result if complex situation.
529 Beware that possible obsolescence cycle may result if complex situation.
530 """
530 """
531 repo = repo.unfiltered()
531 repo = repo.unfiltered()
532 foreground = set(repo.set('%ln::', nodes))
532 foreground = set(repo.set('%ln::', nodes))
533 if repo.obsstore:
533 if repo.obsstore:
534 # We only need this complicated logic if there is obsolescence
534 # We only need this complicated logic if there is obsolescence
535 # XXX will probably deserve an optimised revset.
535 # XXX will probably deserve an optimised revset.
536 nm = repo.changelog.nodemap
536 nm = repo.changelog.nodemap
537 plen = -1
537 plen = -1
538 # compute the whole set of successors or descendants
538 # compute the whole set of successors or descendants
539 while len(foreground) != plen:
539 while len(foreground) != plen:
540 plen = len(foreground)
540 plen = len(foreground)
541 succs = set(c.node() for c in foreground)
541 succs = set(c.node() for c in foreground)
542 mutable = [c.node() for c in foreground if c.mutable()]
542 mutable = [c.node() for c in foreground if c.mutable()]
543 succs.update(allsuccessors(repo.obsstore, mutable))
543 succs.update(allsuccessors(repo.obsstore, mutable))
544 known = (n for n in succs if n in nm)
544 known = (n for n in succs if n in nm)
545 foreground = set(repo.set('%ln::', known))
545 foreground = set(repo.set('%ln::', known))
546 return set(c.node() for c in foreground)
546 return set(c.node() for c in foreground)
547
547
548
548
549 def successorssets(repo, initialnode, cache=None):
549 def successorssets(repo, initialnode, cache=None):
550 """Return all set of successors of initial nodes
550 """Return all set of successors of initial nodes
551
551
552 The successors set of a changeset A are a group of revisions that succeed
552 The successors set of a changeset A are a group of revisions that succeed
553 A. It succeeds A as a consistent whole, each revision being only a partial
553 A. It succeeds A as a consistent whole, each revision being only a partial
554 replacement. The successors set contains non-obsolete changesets only.
554 replacement. The successors set contains non-obsolete changesets only.
555
555
556 This function returns the full list of successor sets which is why it
556 This function returns the full list of successor sets which is why it
557 returns a list of tuples and not just a single tuple. Each tuple is a valid
557 returns a list of tuples and not just a single tuple. Each tuple is a valid
558 successors set. Not that (A,) may be a valid successors set for changeset A
558 successors set. Not that (A,) may be a valid successors set for changeset A
559 (see below).
559 (see below).
560
560
561 In most cases, a changeset A will have a single element (e.g. the changeset
561 In most cases, a changeset A will have a single element (e.g. the changeset
562 A is replaced by A') in its successors set. Though, it is also common for a
562 A is replaced by A') in its successors set. Though, it is also common for a
563 changeset A to have no elements in its successor set (e.g. the changeset
563 changeset A to have no elements in its successor set (e.g. the changeset
564 has been pruned). Therefore, the returned list of successors sets will be
564 has been pruned). Therefore, the returned list of successors sets will be
565 [(A',)] or [], respectively.
565 [(A',)] or [], respectively.
566
566
567 When a changeset A is split into A' and B', however, it will result in a
567 When a changeset A is split into A' and B', however, it will result in a
568 successors set containing more than a single element, i.e. [(A',B')].
568 successors set containing more than a single element, i.e. [(A',B')].
569 Divergent changesets will result in multiple successors sets, i.e. [(A',),
569 Divergent changesets will result in multiple successors sets, i.e. [(A',),
570 (A'')].
570 (A'')].
571
571
572 If a changeset A is not obsolete, then it will conceptually have no
572 If a changeset A is not obsolete, then it will conceptually have no
573 successors set. To distinguish this from a pruned changeset, the successor
573 successors set. To distinguish this from a pruned changeset, the successor
574 set will only contain itself, i.e. [(A,)].
574 set will only contain itself, i.e. [(A,)].
575
575
576 Finally, successors unknown locally are considered to be pruned (obsoleted
576 Finally, successors unknown locally are considered to be pruned (obsoleted
577 without any successors).
577 without any successors).
578
578
579 The optional `cache` parameter is a dictionary that may contain precomputed
579 The optional `cache` parameter is a dictionary that may contain precomputed
580 successors sets. It is meant to reuse the computation of a previous call to
580 successors sets. It is meant to reuse the computation of a previous call to
581 `successorssets` when multiple calls are made at the same time. The cache
581 `successorssets` when multiple calls are made at the same time. The cache
582 dictionary is updated in place. The caller is responsible for its live
582 dictionary is updated in place. The caller is responsible for its live
583 spawn. Code that makes multiple calls to `successorssets` *must* use this
583 spawn. Code that makes multiple calls to `successorssets` *must* use this
584 cache mechanism or suffer terrible performances.
584 cache mechanism or suffer terrible performances.
585
585
586 """
586 """
587
587
588 succmarkers = repo.obsstore.successors
588 succmarkers = repo.obsstore.successors
589
589
590 # Stack of nodes we search successors sets for
590 # Stack of nodes we search successors sets for
591 toproceed = [initialnode]
591 toproceed = [initialnode]
592 # set version of above list for fast loop detection
592 # set version of above list for fast loop detection
593 # element added to "toproceed" must be added here
593 # element added to "toproceed" must be added here
594 stackedset = set(toproceed)
594 stackedset = set(toproceed)
595 if cache is None:
595 if cache is None:
596 cache = {}
596 cache = {}
597
597
598 # This while loop is the flattened version of a recursive search for
598 # This while loop is the flattened version of a recursive search for
599 # successors sets
599 # successors sets
600 #
600 #
601 # def successorssets(x):
601 # def successorssets(x):
602 # successors = directsuccessors(x)
602 # successors = directsuccessors(x)
603 # ss = [[]]
603 # ss = [[]]
604 # for succ in directsuccessors(x):
604 # for succ in directsuccessors(x):
605 # # product as in itertools cartesian product
605 # # product as in itertools cartesian product
606 # ss = product(ss, successorssets(succ))
606 # ss = product(ss, successorssets(succ))
607 # return ss
607 # return ss
608 #
608 #
609 # But we can not use plain recursive calls here:
609 # But we can not use plain recursive calls here:
610 # - that would blow the python call stack
610 # - that would blow the python call stack
611 # - obsolescence markers may have cycles, we need to handle them.
611 # - obsolescence markers may have cycles, we need to handle them.
612 #
612 #
613 # The `toproceed` list act as our call stack. Every node we search
613 # The `toproceed` list act as our call stack. Every node we search
614 # successors set for are stacked there.
614 # successors set for are stacked there.
615 #
615 #
616 # The `stackedset` is set version of this stack used to check if a node is
616 # The `stackedset` is set version of this stack used to check if a node is
617 # already stacked. This check is used to detect cycles and prevent infinite
617 # already stacked. This check is used to detect cycles and prevent infinite
618 # loop.
618 # loop.
619 #
619 #
620 # successors set of all nodes are stored in the `cache` dictionary.
620 # successors set of all nodes are stored in the `cache` dictionary.
621 #
621 #
622 # After this while loop ends we use the cache to return the successors sets
622 # After this while loop ends we use the cache to return the successors sets
623 # for the node requested by the caller.
623 # for the node requested by the caller.
624 while toproceed:
624 while toproceed:
625 # Every iteration tries to compute the successors sets of the topmost
625 # Every iteration tries to compute the successors sets of the topmost
626 # node of the stack: CURRENT.
626 # node of the stack: CURRENT.
627 #
627 #
628 # There are four possible outcomes:
628 # There are four possible outcomes:
629 #
629 #
630 # 1) We already know the successors sets of CURRENT:
630 # 1) We already know the successors sets of CURRENT:
631 # -> mission accomplished, pop it from the stack.
631 # -> mission accomplished, pop it from the stack.
632 # 2) Node is not obsolete:
632 # 2) Node is not obsolete:
633 # -> the node is its own successors sets. Add it to the cache.
633 # -> the node is its own successors sets. Add it to the cache.
634 # 3) We do not know successors set of direct successors of CURRENT:
634 # 3) We do not know successors set of direct successors of CURRENT:
635 # -> We add those successors to the stack.
635 # -> We add those successors to the stack.
636 # 4) We know successors sets of all direct successors of CURRENT:
636 # 4) We know successors sets of all direct successors of CURRENT:
637 # -> We can compute CURRENT successors set and add it to the
637 # -> We can compute CURRENT successors set and add it to the
638 # cache.
638 # cache.
639 #
639 #
640 current = toproceed[-1]
640 current = toproceed[-1]
641 if current in cache:
641 if current in cache:
642 # case (1): We already know the successors sets
642 # case (1): We already know the successors sets
643 stackedset.remove(toproceed.pop())
643 stackedset.remove(toproceed.pop())
644 elif current not in succmarkers:
644 elif current not in succmarkers:
645 # case (2): The node is not obsolete.
645 # case (2): The node is not obsolete.
646 if current in repo:
646 if current in repo:
647 # We have a valid last successors.
647 # We have a valid last successors.
648 cache[current] = [(current,)]
648 cache[current] = [(current,)]
649 else:
649 else:
650 # Final obsolete version is unknown locally.
650 # Final obsolete version is unknown locally.
651 # Do not count that as a valid successors
651 # Do not count that as a valid successors
652 cache[current] = []
652 cache[current] = []
653 else:
653 else:
654 # cases (3) and (4)
654 # cases (3) and (4)
655 #
655 #
656 # We proceed in two phases. Phase 1 aims to distinguish case (3)
656 # We proceed in two phases. Phase 1 aims to distinguish case (3)
657 # from case (4):
657 # from case (4):
658 #
658 #
659 # For each direct successors of CURRENT, we check whether its
659 # For each direct successors of CURRENT, we check whether its
660 # successors sets are known. If they are not, we stack the
660 # successors sets are known. If they are not, we stack the
661 # unknown node and proceed to the next iteration of the while
661 # unknown node and proceed to the next iteration of the while
662 # loop. (case 3)
662 # loop. (case 3)
663 #
663 #
664 # During this step, we may detect obsolescence cycles: a node
664 # During this step, we may detect obsolescence cycles: a node
665 # with unknown successors sets but already in the call stack.
665 # with unknown successors sets but already in the call stack.
666 # In such a situation, we arbitrary set the successors sets of
666 # In such a situation, we arbitrary set the successors sets of
667 # the node to nothing (node pruned) to break the cycle.
667 # the node to nothing (node pruned) to break the cycle.
668 #
668 #
669 # If no break was encountered we proceed to phase 2.
669 # If no break was encountered we proceed to phase 2.
670 #
670 #
671 # Phase 2 computes successors sets of CURRENT (case 4); see details
671 # Phase 2 computes successors sets of CURRENT (case 4); see details
672 # in phase 2 itself.
672 # in phase 2 itself.
673 #
673 #
674 # Note the two levels of iteration in each phase.
674 # Note the two levels of iteration in each phase.
675 # - The first one handles obsolescence markers using CURRENT as
675 # - The first one handles obsolescence markers using CURRENT as
676 # precursor (successors markers of CURRENT).
676 # precursor (successors markers of CURRENT).
677 #
677 #
678 # Having multiple entry here means divergence.
678 # Having multiple entry here means divergence.
679 #
679 #
680 # - The second one handles successors defined in each marker.
680 # - The second one handles successors defined in each marker.
681 #
681 #
682 # Having none means pruned node, multiple successors means split,
682 # Having none means pruned node, multiple successors means split,
683 # single successors are standard replacement.
683 # single successors are standard replacement.
684 #
684 #
685 for mark in sorted(succmarkers[current]):
685 for mark in sorted(succmarkers[current]):
686 for suc in mark[1]:
686 for suc in mark[1]:
687 if suc not in cache:
687 if suc not in cache:
688 if suc in stackedset:
688 if suc in stackedset:
689 # cycle breaking
689 # cycle breaking
690 cache[suc] = []
690 cache[suc] = []
691 else:
691 else:
692 # case (3) If we have not computed successors sets
692 # case (3) If we have not computed successors sets
693 # of one of those successors we add it to the
693 # of one of those successors we add it to the
694 # `toproceed` stack and stop all work for this
694 # `toproceed` stack and stop all work for this
695 # iteration.
695 # iteration.
696 toproceed.append(suc)
696 toproceed.append(suc)
697 stackedset.add(suc)
697 stackedset.add(suc)
698 break
698 break
699 else:
699 else:
700 continue
700 continue
701 break
701 break
702 else:
702 else:
703 # case (4): we know all successors sets of all direct
703 # case (4): we know all successors sets of all direct
704 # successors
704 # successors
705 #
705 #
706 # Successors set contributed by each marker depends on the
706 # Successors set contributed by each marker depends on the
707 # successors sets of all its "successors" node.
707 # successors sets of all its "successors" node.
708 #
708 #
709 # Each different marker is a divergence in the obsolescence
709 # Each different marker is a divergence in the obsolescence
710 # history. It contributes successors sets distinct from other
710 # history. It contributes successors sets distinct from other
711 # markers.
711 # markers.
712 #
712 #
713 # Within a marker, a successor may have divergent successors
713 # Within a marker, a successor may have divergent successors
714 # sets. In such a case, the marker will contribute multiple
714 # sets. In such a case, the marker will contribute multiple
715 # divergent successors sets. If multiple successors have
715 # divergent successors sets. If multiple successors have
716 # divergent successors sets, a Cartesian product is used.
716 # divergent successors sets, a Cartesian product is used.
717 #
717 #
718 # At the end we post-process successors sets to remove
718 # At the end we post-process successors sets to remove
719 # duplicated entry and successors set that are strict subset of
719 # duplicated entry and successors set that are strict subset of
720 # another one.
720 # another one.
721 succssets = []
721 succssets = []
722 for mark in sorted(succmarkers[current]):
722 for mark in sorted(succmarkers[current]):
723 # successors sets contributed by this marker
723 # successors sets contributed by this marker
724 markss = [[]]
724 markss = [[]]
725 for suc in mark[1]:
725 for suc in mark[1]:
726 # cardinal product with previous successors
726 # cardinal product with previous successors
727 productresult = []
727 productresult = []
728 for prefix in markss:
728 for prefix in markss:
729 for suffix in cache[suc]:
729 for suffix in cache[suc]:
730 newss = list(prefix)
730 newss = list(prefix)
731 for part in suffix:
731 for part in suffix:
732 # do not duplicated entry in successors set
732 # do not duplicated entry in successors set
733 # first entry wins.
733 # first entry wins.
734 if part not in newss:
734 if part not in newss:
735 newss.append(part)
735 newss.append(part)
736 productresult.append(newss)
736 productresult.append(newss)
737 markss = productresult
737 markss = productresult
738 succssets.extend(markss)
738 succssets.extend(markss)
739 # remove duplicated and subset
739 # remove duplicated and subset
740 seen = []
740 seen = []
741 final = []
741 final = []
742 candidate = sorted(((set(s), s) for s in succssets if s),
742 candidate = sorted(((set(s), s) for s in succssets if s),
743 key=lambda x: len(x[1]), reverse=True)
743 key=lambda x: len(x[1]), reverse=True)
744 for setversion, listversion in candidate:
744 for setversion, listversion in candidate:
745 for seenset in seen:
745 for seenset in seen:
746 if setversion.issubset(seenset):
746 if setversion.issubset(seenset):
747 break
747 break
748 else:
748 else:
749 final.append(listversion)
749 final.append(listversion)
750 seen.append(setversion)
750 seen.append(setversion)
751 final.reverse() # put small successors set first
751 final.reverse() # put small successors set first
752 cache[current] = final
752 cache[current] = final
753 return cache[initialnode]
753 return cache[initialnode]
754
754
755 def _knownrevs(repo, nodes):
755 def _knownrevs(repo, nodes):
756 """yield revision numbers of known nodes passed in parameters
756 """yield revision numbers of known nodes passed in parameters
757
757
758 Unknown revisions are silently ignored."""
758 Unknown revisions are silently ignored."""
759 torev = repo.changelog.nodemap.get
759 torev = repo.changelog.nodemap.get
760 for n in nodes:
760 for n in nodes:
761 rev = torev(n)
761 rev = torev(n)
762 if rev is not None:
762 if rev is not None:
763 yield rev
763 yield rev
764
764
765 # mapping of 'set-name' -> <function to compute this set>
765 # mapping of 'set-name' -> <function to compute this set>
766 cachefuncs = {}
766 cachefuncs = {}
767 def cachefor(name):
767 def cachefor(name):
768 """Decorator to register a function as computing the cache for a set"""
768 """Decorator to register a function as computing the cache for a set"""
769 def decorator(func):
769 def decorator(func):
770 assert name not in cachefuncs
770 assert name not in cachefuncs
771 cachefuncs[name] = func
771 cachefuncs[name] = func
772 return func
772 return func
773 return decorator
773 return decorator
774
774
775 def getrevs(repo, name):
775 def getrevs(repo, name):
776 """Return the set of revision that belong to the <name> set
776 """Return the set of revision that belong to the <name> set
777
777
778 Such access may compute the set and cache it for future use"""
778 Such access may compute the set and cache it for future use"""
779 repo = repo.unfiltered()
779 repo = repo.unfiltered()
780 if not repo.obsstore:
780 if not repo.obsstore:
781 return ()
781 return ()
782 if name not in repo.obsstore.caches:
782 if name not in repo.obsstore.caches:
783 repo.obsstore.caches[name] = cachefuncs[name](repo)
783 repo.obsstore.caches[name] = cachefuncs[name](repo)
784 return repo.obsstore.caches[name]
784 return repo.obsstore.caches[name]
785
785
786 # To be simple we need to invalidate obsolescence cache when:
786 # To be simple we need to invalidate obsolescence cache when:
787 #
787 #
788 # - new changeset is added:
788 # - new changeset is added:
789 # - public phase is changed
789 # - public phase is changed
790 # - obsolescence marker are added
790 # - obsolescence marker are added
791 # - strip is used a repo
791 # - strip is used a repo
792 def clearobscaches(repo):
792 def clearobscaches(repo):
793 """Remove all obsolescence related cache from a repo
793 """Remove all obsolescence related cache from a repo
794
794
795 This remove all cache in obsstore is the obsstore already exist on the
795 This remove all cache in obsstore is the obsstore already exist on the
796 repo.
796 repo.
797
797
798 (We could be smarter here given the exact event that trigger the cache
798 (We could be smarter here given the exact event that trigger the cache
799 clearing)"""
799 clearing)"""
800 # only clear cache is there is obsstore data in this repo
800 # only clear cache is there is obsstore data in this repo
801 if 'obsstore' in repo._filecache:
801 if 'obsstore' in repo._filecache:
802 repo.obsstore.caches.clear()
802 repo.obsstore.caches.clear()
803
803
804 @cachefor('obsolete')
804 @cachefor('obsolete')
805 def _computeobsoleteset(repo):
805 def _computeobsoleteset(repo):
806 """the set of obsolete revisions"""
806 """the set of obsolete revisions"""
807 obs = set()
807 obs = set()
808 getrev = repo.changelog.nodemap.get
808 getrev = repo.changelog.nodemap.get
809 getphase = repo._phasecache.phase
809 getphase = repo._phasecache.phase
810 for n in repo.obsstore.successors:
810 for n in repo.obsstore.successors:
811 rev = getrev(n)
811 rev = getrev(n)
812 if rev is not None and getphase(repo, rev):
812 if rev is not None and getphase(repo, rev):
813 obs.add(rev)
813 obs.add(rev)
814 return obs
814 return obs
815
815
816 @cachefor('unstable')
816 @cachefor('unstable')
817 def _computeunstableset(repo):
817 def _computeunstableset(repo):
818 """the set of non obsolete revisions with obsolete parents"""
818 """the set of non obsolete revisions with obsolete parents"""
819 # revset is not efficient enough here
819 # revset is not efficient enough here
820 # we do (obsolete()::) - obsolete() by hand
820 # we do (obsolete()::) - obsolete() by hand
821 obs = getrevs(repo, 'obsolete')
821 obs = getrevs(repo, 'obsolete')
822 if not obs:
822 if not obs:
823 return set()
823 return set()
824 cl = repo.changelog
824 cl = repo.changelog
825 return set(r for r in cl.descendants(obs) if r not in obs)
825 return set(r for r in cl.descendants(obs) if r not in obs)
826
826
827 @cachefor('suspended')
827 @cachefor('suspended')
828 def _computesuspendedset(repo):
828 def _computesuspendedset(repo):
829 """the set of obsolete parents with non obsolete descendants"""
829 """the set of obsolete parents with non obsolete descendants"""
830 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
830 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
831 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
831 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
832
832
833 @cachefor('extinct')
833 @cachefor('extinct')
834 def _computeextinctset(repo):
834 def _computeextinctset(repo):
835 """the set of obsolete parents without non obsolete descendants"""
835 """the set of obsolete parents without non obsolete descendants"""
836 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
836 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
837
837
838
838
839 @cachefor('bumped')
839 @cachefor('bumped')
840 def _computebumpedset(repo):
840 def _computebumpedset(repo):
841 """the set of revs trying to obsolete public revisions"""
841 """the set of revs trying to obsolete public revisions"""
842 bumped = set()
842 bumped = set()
843 # util function (avoid attribute lookup in the loop)
843 # util function (avoid attribute lookup in the loop)
844 phase = repo._phasecache.phase # would be faster to grab the full list
844 phase = repo._phasecache.phase # would be faster to grab the full list
845 public = phases.public
845 public = phases.public
846 cl = repo.changelog
846 cl = repo.changelog
847 torev = cl.nodemap.get
847 torev = cl.nodemap.get
848 obs = getrevs(repo, 'obsolete')
848 obs = getrevs(repo, 'obsolete')
849 for rev in repo:
849 for rev in repo:
850 # We only evaluate mutable, non-obsolete revision
850 # We only evaluate mutable, non-obsolete revision
851 if (public < phase(repo, rev)) and (rev not in obs):
851 if (public < phase(repo, rev)) and (rev not in obs):
852 node = cl.node(rev)
852 node = cl.node(rev)
853 # (future) A cache of precursors may worth if split is very common
853 # (future) A cache of precursors may worth if split is very common
854 for pnode in allprecursors(repo.obsstore, [node],
854 for pnode in allprecursors(repo.obsstore, [node],
855 ignoreflags=bumpedfix):
855 ignoreflags=bumpedfix):
856 prev = torev(pnode) # unfiltered! but so is phasecache
856 prev = torev(pnode) # unfiltered! but so is phasecache
857 if (prev is not None) and (phase(repo, prev) <= public):
857 if (prev is not None) and (phase(repo, prev) <= public):
858 # we have a public precursors
858 # we have a public precursors
859 bumped.add(rev)
859 bumped.add(rev)
860 break # Next draft!
860 break # Next draft!
861 return bumped
861 return bumped
862
862
863 @cachefor('divergent')
863 @cachefor('divergent')
864 def _computedivergentset(repo):
864 def _computedivergentset(repo):
865 """the set of rev that compete to be the final successors of some revision.
865 """the set of rev that compete to be the final successors of some revision.
866 """
866 """
867 divergent = set()
867 divergent = set()
868 obsstore = repo.obsstore
868 obsstore = repo.obsstore
869 newermap = {}
869 newermap = {}
870 for ctx in repo.set('(not public()) - obsolete()'):
870 for ctx in repo.set('(not public()) - obsolete()'):
871 mark = obsstore.precursors.get(ctx.node(), ())
871 mark = obsstore.precursors.get(ctx.node(), ())
872 toprocess = set(mark)
872 toprocess = set(mark)
873 while toprocess:
873 while toprocess:
874 prec = toprocess.pop()[0]
874 prec = toprocess.pop()[0]
875 if prec not in newermap:
875 if prec not in newermap:
876 successorssets(repo, prec, newermap)
876 successorssets(repo, prec, newermap)
877 newer = [n for n in newermap[prec] if n]
877 newer = [n for n in newermap[prec] if n]
878 if len(newer) > 1:
878 if len(newer) > 1:
879 divergent.add(ctx.rev())
879 divergent.add(ctx.rev())
880 break
880 break
881 toprocess.update(obsstore.precursors.get(prec, ()))
881 toprocess.update(obsstore.precursors.get(prec, ()))
882 return divergent
882 return divergent
883
883
884
884
885 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
885 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
886 """Add obsolete markers between changesets in a repo
886 """Add obsolete markers between changesets in a repo
887
887
888 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
888 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
889 tuple. `old` and `news` are changectx. metadata is an optional dictionary
889 tuple. `old` and `news` are changectx. metadata is an optional dictionary
890 containing metadata for this marker only. It is merged with the global
890 containing metadata for this marker only. It is merged with the global
891 metadata specified through the `metadata` argument of this function,
891 metadata specified through the `metadata` argument of this function,
892
892
893 Trying to obsolete a public changeset will raise an exception.
893 Trying to obsolete a public changeset will raise an exception.
894
894
895 Current user and date are used except if specified otherwise in the
895 Current user and date are used except if specified otherwise in the
896 metadata attribute.
896 metadata attribute.
897
897
898 This function operates within a transaction of its own, but does
898 This function operates within a transaction of its own, but does
899 not take any lock on the repo.
899 not take any lock on the repo.
900 """
900 """
901 # prepare metadata
901 # prepare metadata
902 if metadata is None:
902 if metadata is None:
903 metadata = {}
903 metadata = {}
904 if 'user' not in metadata:
904 if 'user' not in metadata:
905 metadata['user'] = repo.ui.username()
905 metadata['user'] = repo.ui.username()
906 tr = repo.transaction('add-obsolescence-marker')
906 tr = repo.transaction('add-obsolescence-marker')
907 try:
907 try:
908 for rel in relations:
908 for rel in relations:
909 prec = rel[0]
909 prec = rel[0]
910 sucs = rel[1]
910 sucs = rel[1]
911 localmetadata = metadata.copy()
911 localmetadata = metadata.copy()
912 if 2 < len(rel):
912 if 2 < len(rel):
913 localmetadata.update(rel[2])
913 localmetadata.update(rel[2])
914
914
915 if not prec.mutable():
915 if not prec.mutable():
916 raise util.Abort("cannot obsolete immutable changeset: %s"
916 raise util.Abort("cannot obsolete immutable changeset: %s"
917 % prec)
917 % prec)
918 nprec = prec.node()
918 nprec = prec.node()
919 nsucs = tuple(s.node() for s in sucs)
919 nsucs = tuple(s.node() for s in sucs)
920 npare = None
920 npare = None
921 if not nsucs:
921 if not nsucs:
922 npare = tuple(p.node() for p in prec.parents())
922 npare = tuple(p.node() for p in prec.parents())
923 if nprec in nsucs:
923 if nprec in nsucs:
924 raise util.Abort("changeset %s cannot obsolete itself" % prec)
924 raise util.Abort("changeset %s cannot obsolete itself" % prec)
925 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
925 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
926 date=date, metadata=localmetadata)
926 date=date, metadata=localmetadata)
927 repo.filteredrevcache.clear()
927 repo.filteredrevcache.clear()
928 tr.close()
928 tr.close()
929 finally:
929 finally:
930 tr.release()
930 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now