##// END OF EJS Templates
obsolete: add __eq__ and __hash__ to marker to make set() deduplication work
Augie Fackler -
r20031:6c1adf20 default
parent child Browse files
Show More
@@ -1,824 +1,832
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete markers handling
9 """Obsolete markers handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewriting operations, and help
17 transformations performed by history rewriting operations, and help
18 building new tools to reconciliate conflicting rewriting actions. To
18 building new tools to reconciliate conflicting rewriting actions. To
19 facilitate conflicts resolution, markers include various annotations
19 facilitate conflicts resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called "precursor" and possible replacements are
23 The old obsoleted changeset is called "precursor" and possible replacements are
24 called "successors". Markers that used changeset X as a precursors are called
24 called "successors". Markers that used changeset X as a precursors are called
25 "successor markers of X" because they hold information about the successors of
25 "successor markers of X" because they hold information about the successors of
26 X. Markers that use changeset Y as a successors are call "precursor markers of
26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 Y" because they hold information about the precursors of Y.
27 Y" because they hold information about the precursors of Y.
28
28
29 Examples:
29 Examples:
30
30
31 - When changeset A is replacement by a changeset A', one marker is stored:
31 - When changeset A is replacement by a changeset A', one marker is stored:
32
32
33 (A, (A'))
33 (A, (A'))
34
34
35 - When changesets A and B are folded into a new changeset C two markers are
35 - When changesets A and B are folded into a new changeset C two markers are
36 stored:
36 stored:
37
37
38 (A, (C,)) and (B, (C,))
38 (A, (C,)) and (B, (C,))
39
39
40 - When changeset A is simply "pruned" from the graph, a marker in create:
40 - When changeset A is simply "pruned" from the graph, a marker in create:
41
41
42 (A, ())
42 (A, ())
43
43
44 - When changeset A is split into B and C, a single marker are used:
44 - When changeset A is split into B and C, a single marker are used:
45
45
46 (A, (C, C))
46 (A, (C, C))
47
47
48 We use a single marker to distinct the "split" case from the "divergence"
48 We use a single marker to distinct the "split" case from the "divergence"
49 case. If two independents operation rewrite the same changeset A in to A' and
49 case. If two independents operation rewrite the same changeset A in to A' and
50 A'' when have an error case: divergent rewriting. We can detect it because
50 A'' when have an error case: divergent rewriting. We can detect it because
51 two markers will be created independently:
51 two markers will be created independently:
52
52
53 (A, (B,)) and (A, (C,))
53 (A, (B,)) and (A, (C,))
54
54
55 Format
55 Format
56 ------
56 ------
57
57
58 Markers are stored in an append-only file stored in
58 Markers are stored in an append-only file stored in
59 '.hg/store/obsstore'.
59 '.hg/store/obsstore'.
60
60
61 The file starts with a version header:
61 The file starts with a version header:
62
62
63 - 1 unsigned byte: version number, starting at zero.
63 - 1 unsigned byte: version number, starting at zero.
64
64
65
65
66 The header is followed by the markers. Each marker is made of:
66 The header is followed by the markers. Each marker is made of:
67
67
68 - 1 unsigned byte: number of new changesets "R", could be zero.
68 - 1 unsigned byte: number of new changesets "R", could be zero.
69
69
70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71
71
72 - 1 byte: a bit field. It is reserved for flags used in obsolete
72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 markers common operations, to avoid repeated decoding of metadata
73 markers common operations, to avoid repeated decoding of metadata
74 entries.
74 entries.
75
75
76 - 20 bytes: obsoleted changeset identifier.
76 - 20 bytes: obsoleted changeset identifier.
77
77
78 - N*20 bytes: new changesets identifiers.
78 - N*20 bytes: new changesets identifiers.
79
79
80 - M bytes: metadata as a sequence of nul-terminated strings. Each
80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 string contains a key and a value, separated by a color ':', without
81 string contains a key and a value, separated by a color ':', without
82 additional encoding. Keys cannot contain '\0' or ':' and values
82 additional encoding. Keys cannot contain '\0' or ':' and values
83 cannot contain '\0'.
83 cannot contain '\0'.
84 """
84 """
85 import struct
85 import struct
86 import util, base85, node
86 import util, base85, node
87 from i18n import _
87 from i18n import _
88
88
89 _pack = struct.pack
89 _pack = struct.pack
90 _unpack = struct.unpack
90 _unpack = struct.unpack
91
91
92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93
93
94 # the obsolete feature is not mature enough to be enabled by default.
94 # the obsolete feature is not mature enough to be enabled by default.
95 # you have to rely on third party extension extension to enable this.
95 # you have to rely on third party extension extension to enable this.
96 _enabled = False
96 _enabled = False
97
97
98 # data used for parsing and writing
98 # data used for parsing and writing
99 _fmversion = 0
99 _fmversion = 0
100 _fmfixed = '>BIB20s'
100 _fmfixed = '>BIB20s'
101 _fmnode = '20s'
101 _fmnode = '20s'
102 _fmfsize = struct.calcsize(_fmfixed)
102 _fmfsize = struct.calcsize(_fmfixed)
103 _fnodesize = struct.calcsize(_fmnode)
103 _fnodesize = struct.calcsize(_fmnode)
104
104
105 ### obsolescence marker flag
105 ### obsolescence marker flag
106
106
107 ## bumpedfix flag
107 ## bumpedfix flag
108 #
108 #
109 # When a changeset A' succeed to a changeset A which became public, we call A'
109 # When a changeset A' succeed to a changeset A which became public, we call A'
110 # "bumped" because it's a successors of a public changesets
110 # "bumped" because it's a successors of a public changesets
111 #
111 #
112 # o A' (bumped)
112 # o A' (bumped)
113 # |`:
113 # |`:
114 # | o A
114 # | o A
115 # |/
115 # |/
116 # o Z
116 # o Z
117 #
117 #
118 # The way to solve this situation is to create a new changeset Ad as children
118 # The way to solve this situation is to create a new changeset Ad as children
119 # of A. This changeset have the same content than A'. So the diff from A to A'
119 # of A. This changeset have the same content than A'. So the diff from A to A'
120 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
120 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 #
121 #
122 # o Ad
122 # o Ad
123 # |`:
123 # |`:
124 # | x A'
124 # | x A'
125 # |'|
125 # |'|
126 # o | A
126 # o | A
127 # |/
127 # |/
128 # o Z
128 # o Z
129 #
129 #
130 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
130 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
131 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 # This flag mean that the successors express the changes between the public and
132 # This flag mean that the successors express the changes between the public and
133 # bumped version and fix the situation, breaking the transitivity of
133 # bumped version and fix the situation, breaking the transitivity of
134 # "bumped" here.
134 # "bumped" here.
135 bumpedfix = 1
135 bumpedfix = 1
136
136
137 def _readmarkers(data):
137 def _readmarkers(data):
138 """Read and enumerate markers from raw data"""
138 """Read and enumerate markers from raw data"""
139 off = 0
139 off = 0
140 diskversion = _unpack('>B', data[off:off + 1])[0]
140 diskversion = _unpack('>B', data[off:off + 1])[0]
141 off += 1
141 off += 1
142 if diskversion != _fmversion:
142 if diskversion != _fmversion:
143 raise util.Abort(_('parsing obsolete marker: unknown version %r')
143 raise util.Abort(_('parsing obsolete marker: unknown version %r')
144 % diskversion)
144 % diskversion)
145
145
146 # Loop on markers
146 # Loop on markers
147 l = len(data)
147 l = len(data)
148 while off + _fmfsize <= l:
148 while off + _fmfsize <= l:
149 # read fixed part
149 # read fixed part
150 cur = data[off:off + _fmfsize]
150 cur = data[off:off + _fmfsize]
151 off += _fmfsize
151 off += _fmfsize
152 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
152 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
153 # read replacement
153 # read replacement
154 sucs = ()
154 sucs = ()
155 if nbsuc:
155 if nbsuc:
156 s = (_fnodesize * nbsuc)
156 s = (_fnodesize * nbsuc)
157 cur = data[off:off + s]
157 cur = data[off:off + s]
158 sucs = _unpack(_fmnode * nbsuc, cur)
158 sucs = _unpack(_fmnode * nbsuc, cur)
159 off += s
159 off += s
160 # read metadata
160 # read metadata
161 # (metadata will be decoded on demand)
161 # (metadata will be decoded on demand)
162 metadata = data[off:off + mdsize]
162 metadata = data[off:off + mdsize]
163 if len(metadata) != mdsize:
163 if len(metadata) != mdsize:
164 raise util.Abort(_('parsing obsolete marker: metadata is too '
164 raise util.Abort(_('parsing obsolete marker: metadata is too '
165 'short, %d bytes expected, got %d')
165 'short, %d bytes expected, got %d')
166 % (mdsize, len(metadata)))
166 % (mdsize, len(metadata)))
167 off += mdsize
167 off += mdsize
168 yield (pre, sucs, flags, metadata)
168 yield (pre, sucs, flags, metadata)
169
169
170 def encodemeta(meta):
170 def encodemeta(meta):
171 """Return encoded metadata string to string mapping.
171 """Return encoded metadata string to string mapping.
172
172
173 Assume no ':' in key and no '\0' in both key and value."""
173 Assume no ':' in key and no '\0' in both key and value."""
174 for key, value in meta.iteritems():
174 for key, value in meta.iteritems():
175 if ':' in key or '\0' in key:
175 if ':' in key or '\0' in key:
176 raise ValueError("':' and '\0' are forbidden in metadata key'")
176 raise ValueError("':' and '\0' are forbidden in metadata key'")
177 if '\0' in value:
177 if '\0' in value:
178 raise ValueError("':' are forbidden in metadata value'")
178 raise ValueError("':' are forbidden in metadata value'")
179 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
179 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
180
180
181 def decodemeta(data):
181 def decodemeta(data):
182 """Return string to string dictionary from encoded version."""
182 """Return string to string dictionary from encoded version."""
183 d = {}
183 d = {}
184 for l in data.split('\0'):
184 for l in data.split('\0'):
185 if l:
185 if l:
186 key, value = l.split(':')
186 key, value = l.split(':')
187 d[key] = value
187 d[key] = value
188 return d
188 return d
189
189
190 class marker(object):
190 class marker(object):
191 """Wrap obsolete marker raw data"""
191 """Wrap obsolete marker raw data"""
192
192
193 def __init__(self, repo, data):
193 def __init__(self, repo, data):
194 # the repo argument will be used to create changectx in later version
194 # the repo argument will be used to create changectx in later version
195 self._repo = repo
195 self._repo = repo
196 self._data = data
196 self._data = data
197 self._decodedmeta = None
197 self._decodedmeta = None
198
198
199 def __hash__(self):
200 return hash(self._data)
201
202 def __eq__(self, other):
203 if type(other) != type(self):
204 return False
205 return self._data == other._data
206
199 def precnode(self):
207 def precnode(self):
200 """Precursor changeset node identifier"""
208 """Precursor changeset node identifier"""
201 return self._data[0]
209 return self._data[0]
202
210
203 def succnodes(self):
211 def succnodes(self):
204 """List of successor changesets node identifiers"""
212 """List of successor changesets node identifiers"""
205 return self._data[1]
213 return self._data[1]
206
214
207 def metadata(self):
215 def metadata(self):
208 """Decoded metadata dictionary"""
216 """Decoded metadata dictionary"""
209 if self._decodedmeta is None:
217 if self._decodedmeta is None:
210 self._decodedmeta = decodemeta(self._data[3])
218 self._decodedmeta = decodemeta(self._data[3])
211 return self._decodedmeta
219 return self._decodedmeta
212
220
213 def date(self):
221 def date(self):
214 """Creation date as (unixtime, offset)"""
222 """Creation date as (unixtime, offset)"""
215 parts = self.metadata()['date'].split(' ')
223 parts = self.metadata()['date'].split(' ')
216 return (float(parts[0]), int(parts[1]))
224 return (float(parts[0]), int(parts[1]))
217
225
218 class obsstore(object):
226 class obsstore(object):
219 """Store obsolete markers
227 """Store obsolete markers
220
228
221 Markers can be accessed with two mappings:
229 Markers can be accessed with two mappings:
222 - precursors[x] -> set(markers on precursors edges of x)
230 - precursors[x] -> set(markers on precursors edges of x)
223 - successors[x] -> set(markers on successors edges of x)
231 - successors[x] -> set(markers on successors edges of x)
224 """
232 """
225
233
226 def __init__(self, sopener):
234 def __init__(self, sopener):
227 # caches for various obsolescence related cache
235 # caches for various obsolescence related cache
228 self.caches = {}
236 self.caches = {}
229 self._all = []
237 self._all = []
230 # new markers to serialize
238 # new markers to serialize
231 self.precursors = {}
239 self.precursors = {}
232 self.successors = {}
240 self.successors = {}
233 self.sopener = sopener
241 self.sopener = sopener
234 data = sopener.tryread('obsstore')
242 data = sopener.tryread('obsstore')
235 if data:
243 if data:
236 self._load(_readmarkers(data))
244 self._load(_readmarkers(data))
237
245
238 def __iter__(self):
246 def __iter__(self):
239 return iter(self._all)
247 return iter(self._all)
240
248
241 def __nonzero__(self):
249 def __nonzero__(self):
242 return bool(self._all)
250 return bool(self._all)
243
251
244 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
252 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
245 """obsolete: add a new obsolete marker
253 """obsolete: add a new obsolete marker
246
254
247 * ensuring it is hashable
255 * ensuring it is hashable
248 * check mandatory metadata
256 * check mandatory metadata
249 * encode metadata
257 * encode metadata
250 """
258 """
251 if metadata is None:
259 if metadata is None:
252 metadata = {}
260 metadata = {}
253 if 'date' not in metadata:
261 if 'date' not in metadata:
254 metadata['date'] = "%d %d" % util.makedate()
262 metadata['date'] = "%d %d" % util.makedate()
255 if len(prec) != 20:
263 if len(prec) != 20:
256 raise ValueError(prec)
264 raise ValueError(prec)
257 for succ in succs:
265 for succ in succs:
258 if len(succ) != 20:
266 if len(succ) != 20:
259 raise ValueError(succ)
267 raise ValueError(succ)
260 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
268 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
261 self.add(transaction, [marker])
269 self.add(transaction, [marker])
262
270
263 def add(self, transaction, markers):
271 def add(self, transaction, markers):
264 """Add new markers to the store
272 """Add new markers to the store
265
273
266 Take care of filtering duplicate.
274 Take care of filtering duplicate.
267 Return the number of new marker."""
275 Return the number of new marker."""
268 if not _enabled:
276 if not _enabled:
269 raise util.Abort('obsolete feature is not enabled on this repo')
277 raise util.Abort('obsolete feature is not enabled on this repo')
270 known = set(self._all)
278 known = set(self._all)
271 new = []
279 new = []
272 for m in markers:
280 for m in markers:
273 if m not in known:
281 if m not in known:
274 known.add(m)
282 known.add(m)
275 new.append(m)
283 new.append(m)
276 if new:
284 if new:
277 f = self.sopener('obsstore', 'ab')
285 f = self.sopener('obsstore', 'ab')
278 try:
286 try:
279 # Whether the file's current position is at the begin or at
287 # Whether the file's current position is at the begin or at
280 # the end after opening a file for appending is implementation
288 # the end after opening a file for appending is implementation
281 # defined. So we must seek to the end before calling tell(),
289 # defined. So we must seek to the end before calling tell(),
282 # or we may get a zero offset for non-zero sized files on
290 # or we may get a zero offset for non-zero sized files on
283 # some platforms (issue3543).
291 # some platforms (issue3543).
284 f.seek(0, _SEEK_END)
292 f.seek(0, _SEEK_END)
285 offset = f.tell()
293 offset = f.tell()
286 transaction.add('obsstore', offset)
294 transaction.add('obsstore', offset)
287 # offset == 0: new file - add the version header
295 # offset == 0: new file - add the version header
288 for bytes in _encodemarkers(new, offset == 0):
296 for bytes in _encodemarkers(new, offset == 0):
289 f.write(bytes)
297 f.write(bytes)
290 finally:
298 finally:
291 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
299 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
292 # call 'filecacheentry.refresh()' here
300 # call 'filecacheentry.refresh()' here
293 f.close()
301 f.close()
294 self._load(new)
302 self._load(new)
295 # new marker *may* have changed several set. invalidate the cache.
303 # new marker *may* have changed several set. invalidate the cache.
296 self.caches.clear()
304 self.caches.clear()
297 return len(new)
305 return len(new)
298
306
299 def mergemarkers(self, transaction, data):
307 def mergemarkers(self, transaction, data):
300 markers = _readmarkers(data)
308 markers = _readmarkers(data)
301 self.add(transaction, markers)
309 self.add(transaction, markers)
302
310
303 def _load(self, markers):
311 def _load(self, markers):
304 for mark in markers:
312 for mark in markers:
305 self._all.append(mark)
313 self._all.append(mark)
306 pre, sucs = mark[:2]
314 pre, sucs = mark[:2]
307 self.successors.setdefault(pre, set()).add(mark)
315 self.successors.setdefault(pre, set()).add(mark)
308 for suc in sucs:
316 for suc in sucs:
309 self.precursors.setdefault(suc, set()).add(mark)
317 self.precursors.setdefault(suc, set()).add(mark)
310 if node.nullid in self.precursors:
318 if node.nullid in self.precursors:
311 raise util.Abort(_('bad obsolescence marker detected: '
319 raise util.Abort(_('bad obsolescence marker detected: '
312 'invalid successors nullid'))
320 'invalid successors nullid'))
313
321
314 def _encodemarkers(markers, addheader=False):
322 def _encodemarkers(markers, addheader=False):
315 # Kept separate from flushmarkers(), it will be reused for
323 # Kept separate from flushmarkers(), it will be reused for
316 # markers exchange.
324 # markers exchange.
317 if addheader:
325 if addheader:
318 yield _pack('>B', _fmversion)
326 yield _pack('>B', _fmversion)
319 for marker in markers:
327 for marker in markers:
320 yield _encodeonemarker(marker)
328 yield _encodeonemarker(marker)
321
329
322
330
323 def _encodeonemarker(marker):
331 def _encodeonemarker(marker):
324 pre, sucs, flags, metadata = marker
332 pre, sucs, flags, metadata = marker
325 nbsuc = len(sucs)
333 nbsuc = len(sucs)
326 format = _fmfixed + (_fmnode * nbsuc)
334 format = _fmfixed + (_fmnode * nbsuc)
327 data = [nbsuc, len(metadata), flags, pre]
335 data = [nbsuc, len(metadata), flags, pre]
328 data.extend(sucs)
336 data.extend(sucs)
329 return _pack(format, *data) + metadata
337 return _pack(format, *data) + metadata
330
338
331 # arbitrary picked to fit into 8K limit from HTTP server
339 # arbitrary picked to fit into 8K limit from HTTP server
332 # you have to take in account:
340 # you have to take in account:
333 # - the version header
341 # - the version header
334 # - the base85 encoding
342 # - the base85 encoding
335 _maxpayload = 5300
343 _maxpayload = 5300
336
344
337 def listmarkers(repo):
345 def listmarkers(repo):
338 """List markers over pushkey"""
346 """List markers over pushkey"""
339 if not repo.obsstore:
347 if not repo.obsstore:
340 return {}
348 return {}
341 keys = {}
349 keys = {}
342 parts = []
350 parts = []
343 currentlen = _maxpayload * 2 # ensure we create a new part
351 currentlen = _maxpayload * 2 # ensure we create a new part
344 for marker in repo.obsstore:
352 for marker in repo.obsstore:
345 nextdata = _encodeonemarker(marker)
353 nextdata = _encodeonemarker(marker)
346 if (len(nextdata) + currentlen > _maxpayload):
354 if (len(nextdata) + currentlen > _maxpayload):
347 currentpart = []
355 currentpart = []
348 currentlen = 0
356 currentlen = 0
349 parts.append(currentpart)
357 parts.append(currentpart)
350 currentpart.append(nextdata)
358 currentpart.append(nextdata)
351 currentlen += len(nextdata)
359 currentlen += len(nextdata)
352 for idx, part in enumerate(reversed(parts)):
360 for idx, part in enumerate(reversed(parts)):
353 data = ''.join([_pack('>B', _fmversion)] + part)
361 data = ''.join([_pack('>B', _fmversion)] + part)
354 keys['dump%i' % idx] = base85.b85encode(data)
362 keys['dump%i' % idx] = base85.b85encode(data)
355 return keys
363 return keys
356
364
357 def pushmarker(repo, key, old, new):
365 def pushmarker(repo, key, old, new):
358 """Push markers over pushkey"""
366 """Push markers over pushkey"""
359 if not key.startswith('dump'):
367 if not key.startswith('dump'):
360 repo.ui.warn(_('unknown key: %r') % key)
368 repo.ui.warn(_('unknown key: %r') % key)
361 return 0
369 return 0
362 if old:
370 if old:
363 repo.ui.warn(_('unexpected old value') % key)
371 repo.ui.warn(_('unexpected old value') % key)
364 return 0
372 return 0
365 data = base85.b85decode(new)
373 data = base85.b85decode(new)
366 lock = repo.lock()
374 lock = repo.lock()
367 try:
375 try:
368 tr = repo.transaction('pushkey: obsolete markers')
376 tr = repo.transaction('pushkey: obsolete markers')
369 try:
377 try:
370 repo.obsstore.mergemarkers(tr, data)
378 repo.obsstore.mergemarkers(tr, data)
371 tr.close()
379 tr.close()
372 return 1
380 return 1
373 finally:
381 finally:
374 tr.release()
382 tr.release()
375 finally:
383 finally:
376 lock.release()
384 lock.release()
377
385
378 def syncpush(repo, remote):
386 def syncpush(repo, remote):
379 """utility function to push obsolete markers to a remote
387 """utility function to push obsolete markers to a remote
380
388
381 Exist mostly to allow overriding for experimentation purpose"""
389 Exist mostly to allow overriding for experimentation purpose"""
382 if (_enabled and repo.obsstore and
390 if (_enabled and repo.obsstore and
383 'obsolete' in remote.listkeys('namespaces')):
391 'obsolete' in remote.listkeys('namespaces')):
384 rslts = []
392 rslts = []
385 remotedata = repo.listkeys('obsolete')
393 remotedata = repo.listkeys('obsolete')
386 for key in sorted(remotedata, reverse=True):
394 for key in sorted(remotedata, reverse=True):
387 # reverse sort to ensure we end with dump0
395 # reverse sort to ensure we end with dump0
388 data = remotedata[key]
396 data = remotedata[key]
389 rslts.append(remote.pushkey('obsolete', key, '', data))
397 rslts.append(remote.pushkey('obsolete', key, '', data))
390 if [r for r in rslts if not r]:
398 if [r for r in rslts if not r]:
391 msg = _('failed to push some obsolete markers!\n')
399 msg = _('failed to push some obsolete markers!\n')
392 repo.ui.warn(msg)
400 repo.ui.warn(msg)
393
401
394 def syncpull(repo, remote, gettransaction):
402 def syncpull(repo, remote, gettransaction):
395 """utility function to pull obsolete markers from a remote
403 """utility function to pull obsolete markers from a remote
396
404
397 The `gettransaction` is function that return the pull transaction, creating
405 The `gettransaction` is function that return the pull transaction, creating
398 one if necessary. We return the transaction to inform the calling code that
406 one if necessary. We return the transaction to inform the calling code that
399 a new transaction have been created (when applicable).
407 a new transaction have been created (when applicable).
400
408
401 Exists mostly to allow overriding for experimentation purpose"""
409 Exists mostly to allow overriding for experimentation purpose"""
402 tr = None
410 tr = None
403 if _enabled:
411 if _enabled:
404 repo.ui.debug('fetching remote obsolete markers\n')
412 repo.ui.debug('fetching remote obsolete markers\n')
405 remoteobs = remote.listkeys('obsolete')
413 remoteobs = remote.listkeys('obsolete')
406 if 'dump0' in remoteobs:
414 if 'dump0' in remoteobs:
407 tr = gettransaction()
415 tr = gettransaction()
408 for key in sorted(remoteobs, reverse=True):
416 for key in sorted(remoteobs, reverse=True):
409 if key.startswith('dump'):
417 if key.startswith('dump'):
410 data = base85.b85decode(remoteobs[key])
418 data = base85.b85decode(remoteobs[key])
411 repo.obsstore.mergemarkers(tr, data)
419 repo.obsstore.mergemarkers(tr, data)
412 repo.invalidatevolatilesets()
420 repo.invalidatevolatilesets()
413 return tr
421 return tr
414
422
415 def allmarkers(repo):
423 def allmarkers(repo):
416 """all obsolete markers known in a repository"""
424 """all obsolete markers known in a repository"""
417 for markerdata in repo.obsstore:
425 for markerdata in repo.obsstore:
418 yield marker(repo, markerdata)
426 yield marker(repo, markerdata)
419
427
420 def precursormarkers(ctx):
428 def precursormarkers(ctx):
421 """obsolete marker marking this changeset as a successors"""
429 """obsolete marker marking this changeset as a successors"""
422 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
430 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
423 yield marker(ctx._repo, data)
431 yield marker(ctx._repo, data)
424
432
425 def successormarkers(ctx):
433 def successormarkers(ctx):
426 """obsolete marker making this changeset obsolete"""
434 """obsolete marker making this changeset obsolete"""
427 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
435 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
428 yield marker(ctx._repo, data)
436 yield marker(ctx._repo, data)
429
437
430 def allsuccessors(obsstore, nodes, ignoreflags=0):
438 def allsuccessors(obsstore, nodes, ignoreflags=0):
431 """Yield node for every successor of <nodes>.
439 """Yield node for every successor of <nodes>.
432
440
433 Some successors may be unknown locally.
441 Some successors may be unknown locally.
434
442
435 This is a linear yield unsuited to detecting split changesets."""
443 This is a linear yield unsuited to detecting split changesets."""
436 remaining = set(nodes)
444 remaining = set(nodes)
437 seen = set(remaining)
445 seen = set(remaining)
438 while remaining:
446 while remaining:
439 current = remaining.pop()
447 current = remaining.pop()
440 yield current
448 yield current
441 for mark in obsstore.successors.get(current, ()):
449 for mark in obsstore.successors.get(current, ()):
442 # ignore marker flagged with with specified flag
450 # ignore marker flagged with with specified flag
443 if mark[2] & ignoreflags:
451 if mark[2] & ignoreflags:
444 continue
452 continue
445 for suc in mark[1]:
453 for suc in mark[1]:
446 if suc not in seen:
454 if suc not in seen:
447 seen.add(suc)
455 seen.add(suc)
448 remaining.add(suc)
456 remaining.add(suc)
449
457
450 def foreground(repo, nodes):
458 def foreground(repo, nodes):
451 """return all nodes in the "foreground" of other node
459 """return all nodes in the "foreground" of other node
452
460
453 The foreground of a revision is anything reachable using parent -> children
461 The foreground of a revision is anything reachable using parent -> children
454 or precursor -> successor relation. It is very similar to "descendant" but
462 or precursor -> successor relation. It is very similar to "descendant" but
455 augmented with obsolescence information.
463 augmented with obsolescence information.
456
464
457 Beware that possible obsolescence cycle may result if complex situation.
465 Beware that possible obsolescence cycle may result if complex situation.
458 """
466 """
459 repo = repo.unfiltered()
467 repo = repo.unfiltered()
460 foreground = set(repo.set('%ln::', nodes))
468 foreground = set(repo.set('%ln::', nodes))
461 if repo.obsstore:
469 if repo.obsstore:
462 # We only need this complicated logic if there is obsolescence
470 # We only need this complicated logic if there is obsolescence
463 # XXX will probably deserve an optimised revset.
471 # XXX will probably deserve an optimised revset.
464 nm = repo.changelog.nodemap
472 nm = repo.changelog.nodemap
465 plen = -1
473 plen = -1
466 # compute the whole set of successors or descendants
474 # compute the whole set of successors or descendants
467 while len(foreground) != plen:
475 while len(foreground) != plen:
468 plen = len(foreground)
476 plen = len(foreground)
469 succs = set(c.node() for c in foreground)
477 succs = set(c.node() for c in foreground)
470 mutable = [c.node() for c in foreground if c.mutable()]
478 mutable = [c.node() for c in foreground if c.mutable()]
471 succs.update(allsuccessors(repo.obsstore, mutable))
479 succs.update(allsuccessors(repo.obsstore, mutable))
472 known = (n for n in succs if n in nm)
480 known = (n for n in succs if n in nm)
473 foreground = set(repo.set('%ln::', known))
481 foreground = set(repo.set('%ln::', known))
474 return set(c.node() for c in foreground)
482 return set(c.node() for c in foreground)
475
483
476
484
477 def successorssets(repo, initialnode, cache=None):
485 def successorssets(repo, initialnode, cache=None):
478 """Return all set of successors of initial nodes
486 """Return all set of successors of initial nodes
479
487
480 Successors set of changeset A are a group of revision that succeed A. It
488 Successors set of changeset A are a group of revision that succeed A. It
481 succeed A as a consistent whole, each revision being only partial
489 succeed A as a consistent whole, each revision being only partial
482 replacement. Successors set contains non-obsolete changeset only.
490 replacement. Successors set contains non-obsolete changeset only.
483
491
484 In most cases a changeset A have zero (changeset pruned) or a single
492 In most cases a changeset A have zero (changeset pruned) or a single
485 successors set that contains a single successor (changeset A replaced by
493 successors set that contains a single successor (changeset A replaced by
486 A')
494 A')
487
495
488 When changeset is split, it results successors set containing more than
496 When changeset is split, it results successors set containing more than
489 a single element. Divergent rewriting will result in multiple successors
497 a single element. Divergent rewriting will result in multiple successors
490 sets.
498 sets.
491
499
492 They are returned as a list of tuples containing all valid successors sets.
500 They are returned as a list of tuples containing all valid successors sets.
493
501
494 Final successors unknown locally are considered plain prune (obsoleted
502 Final successors unknown locally are considered plain prune (obsoleted
495 without successors).
503 without successors).
496
504
497 The optional `cache` parameter is a dictionary that may contains
505 The optional `cache` parameter is a dictionary that may contains
498 precomputed successors sets. It is meant to reuse the computation of
506 precomputed successors sets. It is meant to reuse the computation of
499 previous call to `successorssets` when multiple calls are made at the same
507 previous call to `successorssets` when multiple calls are made at the same
500 time. The cache dictionary is updated in place. The caller is responsible
508 time. The cache dictionary is updated in place. The caller is responsible
501 for its live spawn. Code that makes multiple calls to `successorssets`
509 for its live spawn. Code that makes multiple calls to `successorssets`
502 *must* use this cache mechanism or suffer terrible performances."""
510 *must* use this cache mechanism or suffer terrible performances."""
503
511
504 succmarkers = repo.obsstore.successors
512 succmarkers = repo.obsstore.successors
505
513
506 # Stack of nodes we search successors sets for
514 # Stack of nodes we search successors sets for
507 toproceed = [initialnode]
515 toproceed = [initialnode]
508 # set version of above list for fast loop detection
516 # set version of above list for fast loop detection
509 # element added to "toproceed" must be added here
517 # element added to "toproceed" must be added here
510 stackedset = set(toproceed)
518 stackedset = set(toproceed)
511 if cache is None:
519 if cache is None:
512 cache = {}
520 cache = {}
513
521
514 # This while loop is the flattened version of a recursive search for
522 # This while loop is the flattened version of a recursive search for
515 # successors sets
523 # successors sets
516 #
524 #
517 # def successorssets(x):
525 # def successorssets(x):
518 # successors = directsuccessors(x)
526 # successors = directsuccessors(x)
519 # ss = [[]]
527 # ss = [[]]
520 # for succ in directsuccessors(x):
528 # for succ in directsuccessors(x):
521 # # product as in itertools cartesian product
529 # # product as in itertools cartesian product
522 # ss = product(ss, successorssets(succ))
530 # ss = product(ss, successorssets(succ))
523 # return ss
531 # return ss
524 #
532 #
525 # But we can not use plain recursive calls here:
533 # But we can not use plain recursive calls here:
526 # - that would blow the python call stack
534 # - that would blow the python call stack
527 # - obsolescence markers may have cycles, we need to handle them.
535 # - obsolescence markers may have cycles, we need to handle them.
528 #
536 #
529 # The `toproceed` list act as our call stack. Every node we search
537 # The `toproceed` list act as our call stack. Every node we search
530 # successors set for are stacked there.
538 # successors set for are stacked there.
531 #
539 #
532 # The `stackedset` is set version of this stack used to check if a node is
540 # The `stackedset` is set version of this stack used to check if a node is
533 # already stacked. This check is used to detect cycles and prevent infinite
541 # already stacked. This check is used to detect cycles and prevent infinite
534 # loop.
542 # loop.
535 #
543 #
536 # successors set of all nodes are stored in the `cache` dictionary.
544 # successors set of all nodes are stored in the `cache` dictionary.
537 #
545 #
538 # After this while loop ends we use the cache to return the successors sets
546 # After this while loop ends we use the cache to return the successors sets
539 # for the node requested by the caller.
547 # for the node requested by the caller.
540 while toproceed:
548 while toproceed:
541 # Every iteration tries to compute the successors sets of the topmost
549 # Every iteration tries to compute the successors sets of the topmost
542 # node of the stack: CURRENT.
550 # node of the stack: CURRENT.
543 #
551 #
544 # There are four possible outcomes:
552 # There are four possible outcomes:
545 #
553 #
546 # 1) We already know the successors sets of CURRENT:
554 # 1) We already know the successors sets of CURRENT:
547 # -> mission accomplished, pop it from the stack.
555 # -> mission accomplished, pop it from the stack.
548 # 2) Node is not obsolete:
556 # 2) Node is not obsolete:
549 # -> the node is its own successors sets. Add it to the cache.
557 # -> the node is its own successors sets. Add it to the cache.
550 # 3) We do not know successors set of direct successors of CURRENT:
558 # 3) We do not know successors set of direct successors of CURRENT:
551 # -> We add those successors to the stack.
559 # -> We add those successors to the stack.
552 # 4) We know successors sets of all direct successors of CURRENT:
560 # 4) We know successors sets of all direct successors of CURRENT:
553 # -> We can compute CURRENT successors set and add it to the
561 # -> We can compute CURRENT successors set and add it to the
554 # cache.
562 # cache.
555 #
563 #
556 current = toproceed[-1]
564 current = toproceed[-1]
557 if current in cache:
565 if current in cache:
558 # case (1): We already know the successors sets
566 # case (1): We already know the successors sets
559 stackedset.remove(toproceed.pop())
567 stackedset.remove(toproceed.pop())
560 elif current not in succmarkers:
568 elif current not in succmarkers:
561 # case (2): The node is not obsolete.
569 # case (2): The node is not obsolete.
562 if current in repo:
570 if current in repo:
563 # We have a valid last successors.
571 # We have a valid last successors.
564 cache[current] = [(current,)]
572 cache[current] = [(current,)]
565 else:
573 else:
566 # Final obsolete version is unknown locally.
574 # Final obsolete version is unknown locally.
567 # Do not count that as a valid successors
575 # Do not count that as a valid successors
568 cache[current] = []
576 cache[current] = []
569 else:
577 else:
570 # cases (3) and (4)
578 # cases (3) and (4)
571 #
579 #
572 # We proceed in two phases. Phase 1 aims to distinguish case (3)
580 # We proceed in two phases. Phase 1 aims to distinguish case (3)
573 # from case (4):
581 # from case (4):
574 #
582 #
575 # For each direct successors of CURRENT, we check whether its
583 # For each direct successors of CURRENT, we check whether its
576 # successors sets are known. If they are not, we stack the
584 # successors sets are known. If they are not, we stack the
577 # unknown node and proceed to the next iteration of the while
585 # unknown node and proceed to the next iteration of the while
578 # loop. (case 3)
586 # loop. (case 3)
579 #
587 #
580 # During this step, we may detect obsolescence cycles: a node
588 # During this step, we may detect obsolescence cycles: a node
581 # with unknown successors sets but already in the call stack.
589 # with unknown successors sets but already in the call stack.
582 # In such a situation, we arbitrary set the successors sets of
590 # In such a situation, we arbitrary set the successors sets of
583 # the node to nothing (node pruned) to break the cycle.
591 # the node to nothing (node pruned) to break the cycle.
584 #
592 #
585 # If no break was encountered we proceed to phase 2.
593 # If no break was encountered we proceed to phase 2.
586 #
594 #
587 # Phase 2 computes successors sets of CURRENT (case 4); see details
595 # Phase 2 computes successors sets of CURRENT (case 4); see details
588 # in phase 2 itself.
596 # in phase 2 itself.
589 #
597 #
590 # Note the two levels of iteration in each phase.
598 # Note the two levels of iteration in each phase.
591 # - The first one handles obsolescence markers using CURRENT as
599 # - The first one handles obsolescence markers using CURRENT as
592 # precursor (successors markers of CURRENT).
600 # precursor (successors markers of CURRENT).
593 #
601 #
594 # Having multiple entry here means divergence.
602 # Having multiple entry here means divergence.
595 #
603 #
596 # - The second one handles successors defined in each marker.
604 # - The second one handles successors defined in each marker.
597 #
605 #
598 # Having none means pruned node, multiple successors means split,
606 # Having none means pruned node, multiple successors means split,
599 # single successors are standard replacement.
607 # single successors are standard replacement.
600 #
608 #
601 for mark in sorted(succmarkers[current]):
609 for mark in sorted(succmarkers[current]):
602 for suc in mark[1]:
610 for suc in mark[1]:
603 if suc not in cache:
611 if suc not in cache:
604 if suc in stackedset:
612 if suc in stackedset:
605 # cycle breaking
613 # cycle breaking
606 cache[suc] = []
614 cache[suc] = []
607 else:
615 else:
608 # case (3) If we have not computed successors sets
616 # case (3) If we have not computed successors sets
609 # of one of those successors we add it to the
617 # of one of those successors we add it to the
610 # `toproceed` stack and stop all work for this
618 # `toproceed` stack and stop all work for this
611 # iteration.
619 # iteration.
612 toproceed.append(suc)
620 toproceed.append(suc)
613 stackedset.add(suc)
621 stackedset.add(suc)
614 break
622 break
615 else:
623 else:
616 continue
624 continue
617 break
625 break
618 else:
626 else:
619 # case (4): we know all successors sets of all direct
627 # case (4): we know all successors sets of all direct
620 # successors
628 # successors
621 #
629 #
622 # Successors set contributed by each marker depends on the
630 # Successors set contributed by each marker depends on the
623 # successors sets of all its "successors" node.
631 # successors sets of all its "successors" node.
624 #
632 #
625 # Each different marker is a divergence in the obsolescence
633 # Each different marker is a divergence in the obsolescence
626 # history. It contributes successors sets distinct from other
634 # history. It contributes successors sets distinct from other
627 # markers.
635 # markers.
628 #
636 #
629 # Within a marker, a successor may have divergent successors
637 # Within a marker, a successor may have divergent successors
630 # sets. In such a case, the marker will contribute multiple
638 # sets. In such a case, the marker will contribute multiple
631 # divergent successors sets. If multiple successors have
639 # divergent successors sets. If multiple successors have
632 # divergent successors sets, a cartesian product is used.
640 # divergent successors sets, a cartesian product is used.
633 #
641 #
634 # At the end we post-process successors sets to remove
642 # At the end we post-process successors sets to remove
635 # duplicated entry and successors set that are strict subset of
643 # duplicated entry and successors set that are strict subset of
636 # another one.
644 # another one.
637 succssets = []
645 succssets = []
638 for mark in sorted(succmarkers[current]):
646 for mark in sorted(succmarkers[current]):
639 # successors sets contributed by this marker
647 # successors sets contributed by this marker
640 markss = [[]]
648 markss = [[]]
641 for suc in mark[1]:
649 for suc in mark[1]:
642 # cardinal product with previous successors
650 # cardinal product with previous successors
643 productresult = []
651 productresult = []
644 for prefix in markss:
652 for prefix in markss:
645 for suffix in cache[suc]:
653 for suffix in cache[suc]:
646 newss = list(prefix)
654 newss = list(prefix)
647 for part in suffix:
655 for part in suffix:
648 # do not duplicated entry in successors set
656 # do not duplicated entry in successors set
649 # first entry wins.
657 # first entry wins.
650 if part not in newss:
658 if part not in newss:
651 newss.append(part)
659 newss.append(part)
652 productresult.append(newss)
660 productresult.append(newss)
653 markss = productresult
661 markss = productresult
654 succssets.extend(markss)
662 succssets.extend(markss)
655 # remove duplicated and subset
663 # remove duplicated and subset
656 seen = []
664 seen = []
657 final = []
665 final = []
658 candidate = sorted(((set(s), s) for s in succssets if s),
666 candidate = sorted(((set(s), s) for s in succssets if s),
659 key=lambda x: len(x[1]), reverse=True)
667 key=lambda x: len(x[1]), reverse=True)
660 for setversion, listversion in candidate:
668 for setversion, listversion in candidate:
661 for seenset in seen:
669 for seenset in seen:
662 if setversion.issubset(seenset):
670 if setversion.issubset(seenset):
663 break
671 break
664 else:
672 else:
665 final.append(listversion)
673 final.append(listversion)
666 seen.append(setversion)
674 seen.append(setversion)
667 final.reverse() # put small successors set first
675 final.reverse() # put small successors set first
668 cache[current] = final
676 cache[current] = final
669 return cache[initialnode]
677 return cache[initialnode]
670
678
671 def _knownrevs(repo, nodes):
679 def _knownrevs(repo, nodes):
672 """yield revision numbers of known nodes passed in parameters
680 """yield revision numbers of known nodes passed in parameters
673
681
674 Unknown revisions are silently ignored."""
682 Unknown revisions are silently ignored."""
675 torev = repo.changelog.nodemap.get
683 torev = repo.changelog.nodemap.get
676 for n in nodes:
684 for n in nodes:
677 rev = torev(n)
685 rev = torev(n)
678 if rev is not None:
686 if rev is not None:
679 yield rev
687 yield rev
680
688
681 # mapping of 'set-name' -> <function to compute this set>
689 # mapping of 'set-name' -> <function to compute this set>
682 cachefuncs = {}
690 cachefuncs = {}
683 def cachefor(name):
691 def cachefor(name):
684 """Decorator to register a function as computing the cache for a set"""
692 """Decorator to register a function as computing the cache for a set"""
685 def decorator(func):
693 def decorator(func):
686 assert name not in cachefuncs
694 assert name not in cachefuncs
687 cachefuncs[name] = func
695 cachefuncs[name] = func
688 return func
696 return func
689 return decorator
697 return decorator
690
698
691 def getrevs(repo, name):
699 def getrevs(repo, name):
692 """Return the set of revision that belong to the <name> set
700 """Return the set of revision that belong to the <name> set
693
701
694 Such access may compute the set and cache it for future use"""
702 Such access may compute the set and cache it for future use"""
695 repo = repo.unfiltered()
703 repo = repo.unfiltered()
696 if not repo.obsstore:
704 if not repo.obsstore:
697 return ()
705 return ()
698 if name not in repo.obsstore.caches:
706 if name not in repo.obsstore.caches:
699 repo.obsstore.caches[name] = cachefuncs[name](repo)
707 repo.obsstore.caches[name] = cachefuncs[name](repo)
700 return repo.obsstore.caches[name]
708 return repo.obsstore.caches[name]
701
709
702 # To be simple we need to invalidate obsolescence cache when:
710 # To be simple we need to invalidate obsolescence cache when:
703 #
711 #
704 # - new changeset is added:
712 # - new changeset is added:
705 # - public phase is changed
713 # - public phase is changed
706 # - obsolescence marker are added
714 # - obsolescence marker are added
707 # - strip is used a repo
715 # - strip is used a repo
708 def clearobscaches(repo):
716 def clearobscaches(repo):
709 """Remove all obsolescence related cache from a repo
717 """Remove all obsolescence related cache from a repo
710
718
711 This remove all cache in obsstore is the obsstore already exist on the
719 This remove all cache in obsstore is the obsstore already exist on the
712 repo.
720 repo.
713
721
714 (We could be smarter here given the exact event that trigger the cache
722 (We could be smarter here given the exact event that trigger the cache
715 clearing)"""
723 clearing)"""
716 # only clear cache is there is obsstore data in this repo
724 # only clear cache is there is obsstore data in this repo
717 if 'obsstore' in repo._filecache:
725 if 'obsstore' in repo._filecache:
718 repo.obsstore.caches.clear()
726 repo.obsstore.caches.clear()
719
727
720 @cachefor('obsolete')
728 @cachefor('obsolete')
721 def _computeobsoleteset(repo):
729 def _computeobsoleteset(repo):
722 """the set of obsolete revisions"""
730 """the set of obsolete revisions"""
723 obs = set()
731 obs = set()
724 getrev = repo.changelog.nodemap.get
732 getrev = repo.changelog.nodemap.get
725 getphase = repo._phasecache.phase
733 getphase = repo._phasecache.phase
726 for node in repo.obsstore.successors:
734 for node in repo.obsstore.successors:
727 rev = getrev(node)
735 rev = getrev(node)
728 if rev is not None and getphase(repo, rev):
736 if rev is not None and getphase(repo, rev):
729 obs.add(rev)
737 obs.add(rev)
730 return obs
738 return obs
731
739
732 @cachefor('unstable')
740 @cachefor('unstable')
733 def _computeunstableset(repo):
741 def _computeunstableset(repo):
734 """the set of non obsolete revisions with obsolete parents"""
742 """the set of non obsolete revisions with obsolete parents"""
735 # revset is not efficient enough here
743 # revset is not efficient enough here
736 # we do (obsolete()::) - obsolete() by hand
744 # we do (obsolete()::) - obsolete() by hand
737 obs = getrevs(repo, 'obsolete')
745 obs = getrevs(repo, 'obsolete')
738 if not obs:
746 if not obs:
739 return set()
747 return set()
740 cl = repo.changelog
748 cl = repo.changelog
741 return set(r for r in cl.descendants(obs) if r not in obs)
749 return set(r for r in cl.descendants(obs) if r not in obs)
742
750
743 @cachefor('suspended')
751 @cachefor('suspended')
744 def _computesuspendedset(repo):
752 def _computesuspendedset(repo):
745 """the set of obsolete parents with non obsolete descendants"""
753 """the set of obsolete parents with non obsolete descendants"""
746 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
754 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
747 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
755 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
748
756
749 @cachefor('extinct')
757 @cachefor('extinct')
750 def _computeextinctset(repo):
758 def _computeextinctset(repo):
751 """the set of obsolete parents without non obsolete descendants"""
759 """the set of obsolete parents without non obsolete descendants"""
752 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
760 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
753
761
754
762
755 @cachefor('bumped')
763 @cachefor('bumped')
756 def _computebumpedset(repo):
764 def _computebumpedset(repo):
757 """the set of revs trying to obsolete public revisions"""
765 """the set of revs trying to obsolete public revisions"""
758 # get all possible bumped changesets
766 # get all possible bumped changesets
759 tonode = repo.changelog.node
767 tonode = repo.changelog.node
760 publicnodes = (tonode(r) for r in repo.revs('public()'))
768 publicnodes = (tonode(r) for r in repo.revs('public()'))
761 successors = allsuccessors(repo.obsstore, publicnodes,
769 successors = allsuccessors(repo.obsstore, publicnodes,
762 ignoreflags=bumpedfix)
770 ignoreflags=bumpedfix)
763 # revision public or already obsolete don't count as bumped
771 # revision public or already obsolete don't count as bumped
764 query = '%ld - obsolete() - public()'
772 query = '%ld - obsolete() - public()'
765 return set(repo.revs(query, _knownrevs(repo, successors)))
773 return set(repo.revs(query, _knownrevs(repo, successors)))
766
774
767 @cachefor('divergent')
775 @cachefor('divergent')
768 def _computedivergentset(repo):
776 def _computedivergentset(repo):
769 """the set of rev that compete to be the final successors of some revision.
777 """the set of rev that compete to be the final successors of some revision.
770 """
778 """
771 divergent = set()
779 divergent = set()
772 obsstore = repo.obsstore
780 obsstore = repo.obsstore
773 newermap = {}
781 newermap = {}
774 for ctx in repo.set('(not public()) - obsolete()'):
782 for ctx in repo.set('(not public()) - obsolete()'):
775 mark = obsstore.precursors.get(ctx.node(), ())
783 mark = obsstore.precursors.get(ctx.node(), ())
776 toprocess = set(mark)
784 toprocess = set(mark)
777 while toprocess:
785 while toprocess:
778 prec = toprocess.pop()[0]
786 prec = toprocess.pop()[0]
779 if prec not in newermap:
787 if prec not in newermap:
780 successorssets(repo, prec, newermap)
788 successorssets(repo, prec, newermap)
781 newer = [n for n in newermap[prec] if n]
789 newer = [n for n in newermap[prec] if n]
782 if len(newer) > 1:
790 if len(newer) > 1:
783 divergent.add(ctx.rev())
791 divergent.add(ctx.rev())
784 break
792 break
785 toprocess.update(obsstore.precursors.get(prec, ()))
793 toprocess.update(obsstore.precursors.get(prec, ()))
786 return divergent
794 return divergent
787
795
788
796
789 def createmarkers(repo, relations, flag=0, metadata=None):
797 def createmarkers(repo, relations, flag=0, metadata=None):
790 """Add obsolete markers between changesets in a repo
798 """Add obsolete markers between changesets in a repo
791
799
792 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
800 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
793 `old` and `news` are changectx.
801 `old` and `news` are changectx.
794
802
795 Trying to obsolete a public changeset will raise an exception.
803 Trying to obsolete a public changeset will raise an exception.
796
804
797 Current user and date are used except if specified otherwise in the
805 Current user and date are used except if specified otherwise in the
798 metadata attribute.
806 metadata attribute.
799
807
800 This function operates within a transaction of its own, but does
808 This function operates within a transaction of its own, but does
801 not take any lock on the repo.
809 not take any lock on the repo.
802 """
810 """
803 # prepare metadata
811 # prepare metadata
804 if metadata is None:
812 if metadata is None:
805 metadata = {}
813 metadata = {}
806 if 'date' not in metadata:
814 if 'date' not in metadata:
807 metadata['date'] = '%i %i' % util.makedate()
815 metadata['date'] = '%i %i' % util.makedate()
808 if 'user' not in metadata:
816 if 'user' not in metadata:
809 metadata['user'] = repo.ui.username()
817 metadata['user'] = repo.ui.username()
810 tr = repo.transaction('add-obsolescence-marker')
818 tr = repo.transaction('add-obsolescence-marker')
811 try:
819 try:
812 for prec, sucs in relations:
820 for prec, sucs in relations:
813 if not prec.mutable():
821 if not prec.mutable():
814 raise util.Abort("cannot obsolete immutable changeset: %s"
822 raise util.Abort("cannot obsolete immutable changeset: %s"
815 % prec)
823 % prec)
816 nprec = prec.node()
824 nprec = prec.node()
817 nsucs = tuple(s.node() for s in sucs)
825 nsucs = tuple(s.node() for s in sucs)
818 if nprec in nsucs:
826 if nprec in nsucs:
819 raise util.Abort("changeset %s cannot obsolete itself" % prec)
827 raise util.Abort("changeset %s cannot obsolete itself" % prec)
820 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
828 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
821 repo.filteredrevcache.clear()
829 repo.filteredrevcache.clear()
822 tr.close()
830 tr.close()
823 finally:
831 finally:
824 tr.release()
832 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now