##// END OF EJS Templates
obsstore: add a __len__ method...
Pierre-Yves David -
r20585:f3c8db3d default
parent child Browse files
Show More
@@ -1,857 +1,860 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete markers handling
9 """Obsolete markers handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewriting operations, and help
17 transformations performed by history rewriting operations, and help
18 building new tools to reconciliate conflicting rewriting actions. To
18 building new tools to reconciliate conflicting rewriting actions. To
19 facilitate conflicts resolution, markers include various annotations
19 facilitate conflicts resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called "precursor" and possible replacements are
23 The old obsoleted changeset is called "precursor" and possible replacements are
24 called "successors". Markers that used changeset X as a precursors are called
24 called "successors". Markers that used changeset X as a precursors are called
25 "successor markers of X" because they hold information about the successors of
25 "successor markers of X" because they hold information about the successors of
26 X. Markers that use changeset Y as a successors are call "precursor markers of
26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 Y" because they hold information about the precursors of Y.
27 Y" because they hold information about the precursors of Y.
28
28
29 Examples:
29 Examples:
30
30
31 - When changeset A is replacement by a changeset A', one marker is stored:
31 - When changeset A is replacement by a changeset A', one marker is stored:
32
32
33 (A, (A'))
33 (A, (A'))
34
34
35 - When changesets A and B are folded into a new changeset C two markers are
35 - When changesets A and B are folded into a new changeset C two markers are
36 stored:
36 stored:
37
37
38 (A, (C,)) and (B, (C,))
38 (A, (C,)) and (B, (C,))
39
39
40 - When changeset A is simply "pruned" from the graph, a marker in create:
40 - When changeset A is simply "pruned" from the graph, a marker in create:
41
41
42 (A, ())
42 (A, ())
43
43
44 - When changeset A is split into B and C, a single marker are used:
44 - When changeset A is split into B and C, a single marker are used:
45
45
46 (A, (C, C))
46 (A, (C, C))
47
47
48 We use a single marker to distinct the "split" case from the "divergence"
48 We use a single marker to distinct the "split" case from the "divergence"
49 case. If two independents operation rewrite the same changeset A in to A' and
49 case. If two independents operation rewrite the same changeset A in to A' and
50 A'' when have an error case: divergent rewriting. We can detect it because
50 A'' when have an error case: divergent rewriting. We can detect it because
51 two markers will be created independently:
51 two markers will be created independently:
52
52
53 (A, (B,)) and (A, (C,))
53 (A, (B,)) and (A, (C,))
54
54
55 Format
55 Format
56 ------
56 ------
57
57
58 Markers are stored in an append-only file stored in
58 Markers are stored in an append-only file stored in
59 '.hg/store/obsstore'.
59 '.hg/store/obsstore'.
60
60
61 The file starts with a version header:
61 The file starts with a version header:
62
62
63 - 1 unsigned byte: version number, starting at zero.
63 - 1 unsigned byte: version number, starting at zero.
64
64
65
65
66 The header is followed by the markers. Each marker is made of:
66 The header is followed by the markers. Each marker is made of:
67
67
68 - 1 unsigned byte: number of new changesets "R", could be zero.
68 - 1 unsigned byte: number of new changesets "R", could be zero.
69
69
70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71
71
72 - 1 byte: a bit field. It is reserved for flags used in obsolete
72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 markers common operations, to avoid repeated decoding of metadata
73 markers common operations, to avoid repeated decoding of metadata
74 entries.
74 entries.
75
75
76 - 20 bytes: obsoleted changeset identifier.
76 - 20 bytes: obsoleted changeset identifier.
77
77
78 - N*20 bytes: new changesets identifiers.
78 - N*20 bytes: new changesets identifiers.
79
79
80 - M bytes: metadata as a sequence of nul-terminated strings. Each
80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 string contains a key and a value, separated by a color ':', without
81 string contains a key and a value, separated by a color ':', without
82 additional encoding. Keys cannot contain '\0' or ':' and values
82 additional encoding. Keys cannot contain '\0' or ':' and values
83 cannot contain '\0'.
83 cannot contain '\0'.
84 """
84 """
85 import struct
85 import struct
86 import util, base85, node
86 import util, base85, node
87 import phases
87 import phases
88 from i18n import _
88 from i18n import _
89
89
90 _pack = struct.pack
90 _pack = struct.pack
91 _unpack = struct.unpack
91 _unpack = struct.unpack
92
92
93 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
94
94
95 # the obsolete feature is not mature enough to be enabled by default.
95 # the obsolete feature is not mature enough to be enabled by default.
96 # you have to rely on third party extension extension to enable this.
96 # you have to rely on third party extension extension to enable this.
97 _enabled = False
97 _enabled = False
98
98
99 # data used for parsing and writing
99 # data used for parsing and writing
100 _fmversion = 0
100 _fmversion = 0
101 _fmfixed = '>BIB20s'
101 _fmfixed = '>BIB20s'
102 _fmnode = '20s'
102 _fmnode = '20s'
103 _fmfsize = struct.calcsize(_fmfixed)
103 _fmfsize = struct.calcsize(_fmfixed)
104 _fnodesize = struct.calcsize(_fmnode)
104 _fnodesize = struct.calcsize(_fmnode)
105
105
106 ### obsolescence marker flag
106 ### obsolescence marker flag
107
107
108 ## bumpedfix flag
108 ## bumpedfix flag
109 #
109 #
110 # When a changeset A' succeed to a changeset A which became public, we call A'
110 # When a changeset A' succeed to a changeset A which became public, we call A'
111 # "bumped" because it's a successors of a public changesets
111 # "bumped" because it's a successors of a public changesets
112 #
112 #
113 # o A' (bumped)
113 # o A' (bumped)
114 # |`:
114 # |`:
115 # | o A
115 # | o A
116 # |/
116 # |/
117 # o Z
117 # o Z
118 #
118 #
119 # The way to solve this situation is to create a new changeset Ad as children
119 # The way to solve this situation is to create a new changeset Ad as children
120 # of A. This changeset have the same content than A'. So the diff from A to A'
120 # of A. This changeset have the same content than A'. So the diff from A to A'
121 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
122 #
122 #
123 # o Ad
123 # o Ad
124 # |`:
124 # |`:
125 # | x A'
125 # | x A'
126 # |'|
126 # |'|
127 # o | A
127 # o | A
128 # |/
128 # |/
129 # o Z
129 # o Z
130 #
130 #
131 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
132 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
133 # This flag mean that the successors express the changes between the public and
133 # This flag mean that the successors express the changes between the public and
134 # bumped version and fix the situation, breaking the transitivity of
134 # bumped version and fix the situation, breaking the transitivity of
135 # "bumped" here.
135 # "bumped" here.
136 bumpedfix = 1
136 bumpedfix = 1
137
137
138 def _readmarkers(data):
138 def _readmarkers(data):
139 """Read and enumerate markers from raw data"""
139 """Read and enumerate markers from raw data"""
140 off = 0
140 off = 0
141 diskversion = _unpack('>B', data[off:off + 1])[0]
141 diskversion = _unpack('>B', data[off:off + 1])[0]
142 off += 1
142 off += 1
143 if diskversion != _fmversion:
143 if diskversion != _fmversion:
144 raise util.Abort(_('parsing obsolete marker: unknown version %r')
144 raise util.Abort(_('parsing obsolete marker: unknown version %r')
145 % diskversion)
145 % diskversion)
146
146
147 # Loop on markers
147 # Loop on markers
148 l = len(data)
148 l = len(data)
149 while off + _fmfsize <= l:
149 while off + _fmfsize <= l:
150 # read fixed part
150 # read fixed part
151 cur = data[off:off + _fmfsize]
151 cur = data[off:off + _fmfsize]
152 off += _fmfsize
152 off += _fmfsize
153 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
153 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
154 # read replacement
154 # read replacement
155 sucs = ()
155 sucs = ()
156 if nbsuc:
156 if nbsuc:
157 s = (_fnodesize * nbsuc)
157 s = (_fnodesize * nbsuc)
158 cur = data[off:off + s]
158 cur = data[off:off + s]
159 sucs = _unpack(_fmnode * nbsuc, cur)
159 sucs = _unpack(_fmnode * nbsuc, cur)
160 off += s
160 off += s
161 # read metadata
161 # read metadata
162 # (metadata will be decoded on demand)
162 # (metadata will be decoded on demand)
163 metadata = data[off:off + mdsize]
163 metadata = data[off:off + mdsize]
164 if len(metadata) != mdsize:
164 if len(metadata) != mdsize:
165 raise util.Abort(_('parsing obsolete marker: metadata is too '
165 raise util.Abort(_('parsing obsolete marker: metadata is too '
166 'short, %d bytes expected, got %d')
166 'short, %d bytes expected, got %d')
167 % (mdsize, len(metadata)))
167 % (mdsize, len(metadata)))
168 off += mdsize
168 off += mdsize
169 yield (pre, sucs, flags, metadata)
169 yield (pre, sucs, flags, metadata)
170
170
171 def encodemeta(meta):
171 def encodemeta(meta):
172 """Return encoded metadata string to string mapping.
172 """Return encoded metadata string to string mapping.
173
173
174 Assume no ':' in key and no '\0' in both key and value."""
174 Assume no ':' in key and no '\0' in both key and value."""
175 for key, value in meta.iteritems():
175 for key, value in meta.iteritems():
176 if ':' in key or '\0' in key:
176 if ':' in key or '\0' in key:
177 raise ValueError("':' and '\0' are forbidden in metadata key'")
177 raise ValueError("':' and '\0' are forbidden in metadata key'")
178 if '\0' in value:
178 if '\0' in value:
179 raise ValueError("':' are forbidden in metadata value'")
179 raise ValueError("':' are forbidden in metadata value'")
180 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
180 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
181
181
182 def decodemeta(data):
182 def decodemeta(data):
183 """Return string to string dictionary from encoded version."""
183 """Return string to string dictionary from encoded version."""
184 d = {}
184 d = {}
185 for l in data.split('\0'):
185 for l in data.split('\0'):
186 if l:
186 if l:
187 key, value = l.split(':')
187 key, value = l.split(':')
188 d[key] = value
188 d[key] = value
189 return d
189 return d
190
190
191 class marker(object):
191 class marker(object):
192 """Wrap obsolete marker raw data"""
192 """Wrap obsolete marker raw data"""
193
193
194 def __init__(self, repo, data):
194 def __init__(self, repo, data):
195 # the repo argument will be used to create changectx in later version
195 # the repo argument will be used to create changectx in later version
196 self._repo = repo
196 self._repo = repo
197 self._data = data
197 self._data = data
198 self._decodedmeta = None
198 self._decodedmeta = None
199
199
200 def __hash__(self):
200 def __hash__(self):
201 return hash(self._data)
201 return hash(self._data)
202
202
203 def __eq__(self, other):
203 def __eq__(self, other):
204 if type(other) != type(self):
204 if type(other) != type(self):
205 return False
205 return False
206 return self._data == other._data
206 return self._data == other._data
207
207
208 def precnode(self):
208 def precnode(self):
209 """Precursor changeset node identifier"""
209 """Precursor changeset node identifier"""
210 return self._data[0]
210 return self._data[0]
211
211
212 def succnodes(self):
212 def succnodes(self):
213 """List of successor changesets node identifiers"""
213 """List of successor changesets node identifiers"""
214 return self._data[1]
214 return self._data[1]
215
215
216 def metadata(self):
216 def metadata(self):
217 """Decoded metadata dictionary"""
217 """Decoded metadata dictionary"""
218 if self._decodedmeta is None:
218 if self._decodedmeta is None:
219 self._decodedmeta = decodemeta(self._data[3])
219 self._decodedmeta = decodemeta(self._data[3])
220 return self._decodedmeta
220 return self._decodedmeta
221
221
222 def date(self):
222 def date(self):
223 """Creation date as (unixtime, offset)"""
223 """Creation date as (unixtime, offset)"""
224 parts = self.metadata()['date'].split(' ')
224 parts = self.metadata()['date'].split(' ')
225 return (float(parts[0]), int(parts[1]))
225 return (float(parts[0]), int(parts[1]))
226
226
227 class obsstore(object):
227 class obsstore(object):
228 """Store obsolete markers
228 """Store obsolete markers
229
229
230 Markers can be accessed with two mappings:
230 Markers can be accessed with two mappings:
231 - precursors[x] -> set(markers on precursors edges of x)
231 - precursors[x] -> set(markers on precursors edges of x)
232 - successors[x] -> set(markers on successors edges of x)
232 - successors[x] -> set(markers on successors edges of x)
233 """
233 """
234
234
235 def __init__(self, sopener):
235 def __init__(self, sopener):
236 # caches for various obsolescence related cache
236 # caches for various obsolescence related cache
237 self.caches = {}
237 self.caches = {}
238 self._all = []
238 self._all = []
239 # new markers to serialize
239 # new markers to serialize
240 self.precursors = {}
240 self.precursors = {}
241 self.successors = {}
241 self.successors = {}
242 self.sopener = sopener
242 self.sopener = sopener
243 data = sopener.tryread('obsstore')
243 data = sopener.tryread('obsstore')
244 if data:
244 if data:
245 self._load(_readmarkers(data))
245 self._load(_readmarkers(data))
246
246
247 def __iter__(self):
247 def __iter__(self):
248 return iter(self._all)
248 return iter(self._all)
249
249
250 def __len__(self):
251 return len(self._all)
252
250 def __nonzero__(self):
253 def __nonzero__(self):
251 return bool(self._all)
254 return bool(self._all)
252
255
253 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
256 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
254 """obsolete: add a new obsolete marker
257 """obsolete: add a new obsolete marker
255
258
256 * ensuring it is hashable
259 * ensuring it is hashable
257 * check mandatory metadata
260 * check mandatory metadata
258 * encode metadata
261 * encode metadata
259
262
260 If you are a human writing code creating marker you want to use the
263 If you are a human writing code creating marker you want to use the
261 `createmarkers` function in this module instead.
264 `createmarkers` function in this module instead.
262
265
263 return True if a new marker have been added, False if the markers
266 return True if a new marker have been added, False if the markers
264 already existed (no op).
267 already existed (no op).
265 """
268 """
266 if metadata is None:
269 if metadata is None:
267 metadata = {}
270 metadata = {}
268 if 'date' not in metadata:
271 if 'date' not in metadata:
269 metadata['date'] = "%d %d" % util.makedate()
272 metadata['date'] = "%d %d" % util.makedate()
270 if len(prec) != 20:
273 if len(prec) != 20:
271 raise ValueError(prec)
274 raise ValueError(prec)
272 for succ in succs:
275 for succ in succs:
273 if len(succ) != 20:
276 if len(succ) != 20:
274 raise ValueError(succ)
277 raise ValueError(succ)
275 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
278 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
276 return bool(self.add(transaction, [marker]))
279 return bool(self.add(transaction, [marker]))
277
280
278 def add(self, transaction, markers):
281 def add(self, transaction, markers):
279 """Add new markers to the store
282 """Add new markers to the store
280
283
281 Take care of filtering duplicate.
284 Take care of filtering duplicate.
282 Return the number of new marker."""
285 Return the number of new marker."""
283 if not _enabled:
286 if not _enabled:
284 raise util.Abort('obsolete feature is not enabled on this repo')
287 raise util.Abort('obsolete feature is not enabled on this repo')
285 known = set(self._all)
288 known = set(self._all)
286 new = []
289 new = []
287 for m in markers:
290 for m in markers:
288 if m not in known:
291 if m not in known:
289 known.add(m)
292 known.add(m)
290 new.append(m)
293 new.append(m)
291 if new:
294 if new:
292 f = self.sopener('obsstore', 'ab')
295 f = self.sopener('obsstore', 'ab')
293 try:
296 try:
294 # Whether the file's current position is at the begin or at
297 # Whether the file's current position is at the begin or at
295 # the end after opening a file for appending is implementation
298 # the end after opening a file for appending is implementation
296 # defined. So we must seek to the end before calling tell(),
299 # defined. So we must seek to the end before calling tell(),
297 # or we may get a zero offset for non-zero sized files on
300 # or we may get a zero offset for non-zero sized files on
298 # some platforms (issue3543).
301 # some platforms (issue3543).
299 f.seek(0, _SEEK_END)
302 f.seek(0, _SEEK_END)
300 offset = f.tell()
303 offset = f.tell()
301 transaction.add('obsstore', offset)
304 transaction.add('obsstore', offset)
302 # offset == 0: new file - add the version header
305 # offset == 0: new file - add the version header
303 for bytes in _encodemarkers(new, offset == 0):
306 for bytes in _encodemarkers(new, offset == 0):
304 f.write(bytes)
307 f.write(bytes)
305 finally:
308 finally:
306 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
309 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
307 # call 'filecacheentry.refresh()' here
310 # call 'filecacheentry.refresh()' here
308 f.close()
311 f.close()
309 self._load(new)
312 self._load(new)
310 # new marker *may* have changed several set. invalidate the cache.
313 # new marker *may* have changed several set. invalidate the cache.
311 self.caches.clear()
314 self.caches.clear()
312 return len(new)
315 return len(new)
313
316
314 def mergemarkers(self, transaction, data):
317 def mergemarkers(self, transaction, data):
315 markers = _readmarkers(data)
318 markers = _readmarkers(data)
316 self.add(transaction, markers)
319 self.add(transaction, markers)
317
320
318 def _load(self, markers):
321 def _load(self, markers):
319 for mark in markers:
322 for mark in markers:
320 self._all.append(mark)
323 self._all.append(mark)
321 pre, sucs = mark[:2]
324 pre, sucs = mark[:2]
322 self.successors.setdefault(pre, set()).add(mark)
325 self.successors.setdefault(pre, set()).add(mark)
323 for suc in sucs:
326 for suc in sucs:
324 self.precursors.setdefault(suc, set()).add(mark)
327 self.precursors.setdefault(suc, set()).add(mark)
325 if node.nullid in self.precursors:
328 if node.nullid in self.precursors:
326 raise util.Abort(_('bad obsolescence marker detected: '
329 raise util.Abort(_('bad obsolescence marker detected: '
327 'invalid successors nullid'))
330 'invalid successors nullid'))
328
331
329 def _encodemarkers(markers, addheader=False):
332 def _encodemarkers(markers, addheader=False):
330 # Kept separate from flushmarkers(), it will be reused for
333 # Kept separate from flushmarkers(), it will be reused for
331 # markers exchange.
334 # markers exchange.
332 if addheader:
335 if addheader:
333 yield _pack('>B', _fmversion)
336 yield _pack('>B', _fmversion)
334 for marker in markers:
337 for marker in markers:
335 yield _encodeonemarker(marker)
338 yield _encodeonemarker(marker)
336
339
337
340
338 def _encodeonemarker(marker):
341 def _encodeonemarker(marker):
339 pre, sucs, flags, metadata = marker
342 pre, sucs, flags, metadata = marker
340 nbsuc = len(sucs)
343 nbsuc = len(sucs)
341 format = _fmfixed + (_fmnode * nbsuc)
344 format = _fmfixed + (_fmnode * nbsuc)
342 data = [nbsuc, len(metadata), flags, pre]
345 data = [nbsuc, len(metadata), flags, pre]
343 data.extend(sucs)
346 data.extend(sucs)
344 return _pack(format, *data) + metadata
347 return _pack(format, *data) + metadata
345
348
346 # arbitrary picked to fit into 8K limit from HTTP server
349 # arbitrary picked to fit into 8K limit from HTTP server
347 # you have to take in account:
350 # you have to take in account:
348 # - the version header
351 # - the version header
349 # - the base85 encoding
352 # - the base85 encoding
350 _maxpayload = 5300
353 _maxpayload = 5300
351
354
352 def listmarkers(repo):
355 def listmarkers(repo):
353 """List markers over pushkey"""
356 """List markers over pushkey"""
354 if not repo.obsstore:
357 if not repo.obsstore:
355 return {}
358 return {}
356 keys = {}
359 keys = {}
357 parts = []
360 parts = []
358 currentlen = _maxpayload * 2 # ensure we create a new part
361 currentlen = _maxpayload * 2 # ensure we create a new part
359 for marker in repo.obsstore:
362 for marker in repo.obsstore:
360 nextdata = _encodeonemarker(marker)
363 nextdata = _encodeonemarker(marker)
361 if (len(nextdata) + currentlen > _maxpayload):
364 if (len(nextdata) + currentlen > _maxpayload):
362 currentpart = []
365 currentpart = []
363 currentlen = 0
366 currentlen = 0
364 parts.append(currentpart)
367 parts.append(currentpart)
365 currentpart.append(nextdata)
368 currentpart.append(nextdata)
366 currentlen += len(nextdata)
369 currentlen += len(nextdata)
367 for idx, part in enumerate(reversed(parts)):
370 for idx, part in enumerate(reversed(parts)):
368 data = ''.join([_pack('>B', _fmversion)] + part)
371 data = ''.join([_pack('>B', _fmversion)] + part)
369 keys['dump%i' % idx] = base85.b85encode(data)
372 keys['dump%i' % idx] = base85.b85encode(data)
370 return keys
373 return keys
371
374
372 def pushmarker(repo, key, old, new):
375 def pushmarker(repo, key, old, new):
373 """Push markers over pushkey"""
376 """Push markers over pushkey"""
374 if not key.startswith('dump'):
377 if not key.startswith('dump'):
375 repo.ui.warn(_('unknown key: %r') % key)
378 repo.ui.warn(_('unknown key: %r') % key)
376 return 0
379 return 0
377 if old:
380 if old:
378 repo.ui.warn(_('unexpected old value') % key)
381 repo.ui.warn(_('unexpected old value') % key)
379 return 0
382 return 0
380 data = base85.b85decode(new)
383 data = base85.b85decode(new)
381 lock = repo.lock()
384 lock = repo.lock()
382 try:
385 try:
383 tr = repo.transaction('pushkey: obsolete markers')
386 tr = repo.transaction('pushkey: obsolete markers')
384 try:
387 try:
385 repo.obsstore.mergemarkers(tr, data)
388 repo.obsstore.mergemarkers(tr, data)
386 tr.close()
389 tr.close()
387 return 1
390 return 1
388 finally:
391 finally:
389 tr.release()
392 tr.release()
390 finally:
393 finally:
391 lock.release()
394 lock.release()
392
395
393 def allmarkers(repo):
396 def allmarkers(repo):
394 """all obsolete markers known in a repository"""
397 """all obsolete markers known in a repository"""
395 for markerdata in repo.obsstore:
398 for markerdata in repo.obsstore:
396 yield marker(repo, markerdata)
399 yield marker(repo, markerdata)
397
400
398 def precursormarkers(ctx):
401 def precursormarkers(ctx):
399 """obsolete marker marking this changeset as a successors"""
402 """obsolete marker marking this changeset as a successors"""
400 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
403 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
401 yield marker(ctx._repo, data)
404 yield marker(ctx._repo, data)
402
405
403 def successormarkers(ctx):
406 def successormarkers(ctx):
404 """obsolete marker making this changeset obsolete"""
407 """obsolete marker making this changeset obsolete"""
405 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
408 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
406 yield marker(ctx._repo, data)
409 yield marker(ctx._repo, data)
407
410
408 def allsuccessors(obsstore, nodes, ignoreflags=0):
411 def allsuccessors(obsstore, nodes, ignoreflags=0):
409 """Yield node for every successor of <nodes>.
412 """Yield node for every successor of <nodes>.
410
413
411 Some successors may be unknown locally.
414 Some successors may be unknown locally.
412
415
413 This is a linear yield unsuited to detecting split changesets. It includes
416 This is a linear yield unsuited to detecting split changesets. It includes
414 initial nodes too."""
417 initial nodes too."""
415 remaining = set(nodes)
418 remaining = set(nodes)
416 seen = set(remaining)
419 seen = set(remaining)
417 while remaining:
420 while remaining:
418 current = remaining.pop()
421 current = remaining.pop()
419 yield current
422 yield current
420 for mark in obsstore.successors.get(current, ()):
423 for mark in obsstore.successors.get(current, ()):
421 # ignore marker flagged with specified flag
424 # ignore marker flagged with specified flag
422 if mark[2] & ignoreflags:
425 if mark[2] & ignoreflags:
423 continue
426 continue
424 for suc in mark[1]:
427 for suc in mark[1]:
425 if suc not in seen:
428 if suc not in seen:
426 seen.add(suc)
429 seen.add(suc)
427 remaining.add(suc)
430 remaining.add(suc)
428
431
429 def allprecursors(obsstore, nodes, ignoreflags=0):
432 def allprecursors(obsstore, nodes, ignoreflags=0):
430 """Yield node for every precursors of <nodes>.
433 """Yield node for every precursors of <nodes>.
431
434
432 Some precursors may be unknown locally.
435 Some precursors may be unknown locally.
433
436
434 This is a linear yield unsuited to detecting folded changesets. It includes
437 This is a linear yield unsuited to detecting folded changesets. It includes
435 initial nodes too."""
438 initial nodes too."""
436
439
437 remaining = set(nodes)
440 remaining = set(nodes)
438 seen = set(remaining)
441 seen = set(remaining)
439 while remaining:
442 while remaining:
440 current = remaining.pop()
443 current = remaining.pop()
441 yield current
444 yield current
442 for mark in obsstore.precursors.get(current, ()):
445 for mark in obsstore.precursors.get(current, ()):
443 # ignore marker flagged with specified flag
446 # ignore marker flagged with specified flag
444 if mark[2] & ignoreflags:
447 if mark[2] & ignoreflags:
445 continue
448 continue
446 suc = mark[0]
449 suc = mark[0]
447 if suc not in seen:
450 if suc not in seen:
448 seen.add(suc)
451 seen.add(suc)
449 remaining.add(suc)
452 remaining.add(suc)
450
453
451 def foreground(repo, nodes):
454 def foreground(repo, nodes):
452 """return all nodes in the "foreground" of other node
455 """return all nodes in the "foreground" of other node
453
456
454 The foreground of a revision is anything reachable using parent -> children
457 The foreground of a revision is anything reachable using parent -> children
455 or precursor -> successor relation. It is very similar to "descendant" but
458 or precursor -> successor relation. It is very similar to "descendant" but
456 augmented with obsolescence information.
459 augmented with obsolescence information.
457
460
458 Beware that possible obsolescence cycle may result if complex situation.
461 Beware that possible obsolescence cycle may result if complex situation.
459 """
462 """
460 repo = repo.unfiltered()
463 repo = repo.unfiltered()
461 foreground = set(repo.set('%ln::', nodes))
464 foreground = set(repo.set('%ln::', nodes))
462 if repo.obsstore:
465 if repo.obsstore:
463 # We only need this complicated logic if there is obsolescence
466 # We only need this complicated logic if there is obsolescence
464 # XXX will probably deserve an optimised revset.
467 # XXX will probably deserve an optimised revset.
465 nm = repo.changelog.nodemap
468 nm = repo.changelog.nodemap
466 plen = -1
469 plen = -1
467 # compute the whole set of successors or descendants
470 # compute the whole set of successors or descendants
468 while len(foreground) != plen:
471 while len(foreground) != plen:
469 plen = len(foreground)
472 plen = len(foreground)
470 succs = set(c.node() for c in foreground)
473 succs = set(c.node() for c in foreground)
471 mutable = [c.node() for c in foreground if c.mutable()]
474 mutable = [c.node() for c in foreground if c.mutable()]
472 succs.update(allsuccessors(repo.obsstore, mutable))
475 succs.update(allsuccessors(repo.obsstore, mutable))
473 known = (n for n in succs if n in nm)
476 known = (n for n in succs if n in nm)
474 foreground = set(repo.set('%ln::', known))
477 foreground = set(repo.set('%ln::', known))
475 return set(c.node() for c in foreground)
478 return set(c.node() for c in foreground)
476
479
477
480
478 def successorssets(repo, initialnode, cache=None):
481 def successorssets(repo, initialnode, cache=None):
479 """Return all set of successors of initial nodes
482 """Return all set of successors of initial nodes
480
483
481 The successors set of a changeset A are a group of revisions that succeed
484 The successors set of a changeset A are a group of revisions that succeed
482 A. It succeeds A as a consistent whole, each revision being only a partial
485 A. It succeeds A as a consistent whole, each revision being only a partial
483 replacement. The successors set contains non-obsolete changesets only.
486 replacement. The successors set contains non-obsolete changesets only.
484
487
485 This function returns the full list of successor sets which is why it
488 This function returns the full list of successor sets which is why it
486 returns a list of tuples and not just a single tuple. Each tuple is a valid
489 returns a list of tuples and not just a single tuple. Each tuple is a valid
487 successors set. Not that (A,) may be a valid successors set for changeset A
490 successors set. Not that (A,) may be a valid successors set for changeset A
488 (see below).
491 (see below).
489
492
490 In most cases, a changeset A will have a single element (e.g. the changeset
493 In most cases, a changeset A will have a single element (e.g. the changeset
491 A is replaced by A') in its successors set. Though, it is also common for a
494 A is replaced by A') in its successors set. Though, it is also common for a
492 changeset A to have no elements in its successor set (e.g. the changeset
495 changeset A to have no elements in its successor set (e.g. the changeset
493 has been pruned). Therefore, the returned list of successors sets will be
496 has been pruned). Therefore, the returned list of successors sets will be
494 [(A',)] or [], respectively.
497 [(A',)] or [], respectively.
495
498
496 When a changeset A is split into A' and B', however, it will result in a
499 When a changeset A is split into A' and B', however, it will result in a
497 successors set containing more than a single element, i.e. [(A',B')].
500 successors set containing more than a single element, i.e. [(A',B')].
498 Divergent changesets will result in multiple successors sets, i.e. [(A',),
501 Divergent changesets will result in multiple successors sets, i.e. [(A',),
499 (A'')].
502 (A'')].
500
503
501 If a changeset A is not obsolete, then it will conceptually have no
504 If a changeset A is not obsolete, then it will conceptually have no
502 successors set. To distinguish this from a pruned changeset, the successor
505 successors set. To distinguish this from a pruned changeset, the successor
503 set will only contain itself, i.e. [(A,)].
506 set will only contain itself, i.e. [(A,)].
504
507
505 Finally, successors unknown locally are considered to be pruned (obsoleted
508 Finally, successors unknown locally are considered to be pruned (obsoleted
506 without any successors).
509 without any successors).
507
510
508 The optional `cache` parameter is a dictionary that may contain precomputed
511 The optional `cache` parameter is a dictionary that may contain precomputed
509 successors sets. It is meant to reuse the computation of a previous call to
512 successors sets. It is meant to reuse the computation of a previous call to
510 `successorssets` when multiple calls are made at the same time. The cache
513 `successorssets` when multiple calls are made at the same time. The cache
511 dictionary is updated in place. The caller is responsible for its live
514 dictionary is updated in place. The caller is responsible for its live
512 spawn. Code that makes multiple calls to `successorssets` *must* use this
515 spawn. Code that makes multiple calls to `successorssets` *must* use this
513 cache mechanism or suffer terrible performances.
516 cache mechanism or suffer terrible performances.
514
517
515 """
518 """
516
519
517 succmarkers = repo.obsstore.successors
520 succmarkers = repo.obsstore.successors
518
521
519 # Stack of nodes we search successors sets for
522 # Stack of nodes we search successors sets for
520 toproceed = [initialnode]
523 toproceed = [initialnode]
521 # set version of above list for fast loop detection
524 # set version of above list for fast loop detection
522 # element added to "toproceed" must be added here
525 # element added to "toproceed" must be added here
523 stackedset = set(toproceed)
526 stackedset = set(toproceed)
524 if cache is None:
527 if cache is None:
525 cache = {}
528 cache = {}
526
529
527 # This while loop is the flattened version of a recursive search for
530 # This while loop is the flattened version of a recursive search for
528 # successors sets
531 # successors sets
529 #
532 #
530 # def successorssets(x):
533 # def successorssets(x):
531 # successors = directsuccessors(x)
534 # successors = directsuccessors(x)
532 # ss = [[]]
535 # ss = [[]]
533 # for succ in directsuccessors(x):
536 # for succ in directsuccessors(x):
534 # # product as in itertools cartesian product
537 # # product as in itertools cartesian product
535 # ss = product(ss, successorssets(succ))
538 # ss = product(ss, successorssets(succ))
536 # return ss
539 # return ss
537 #
540 #
538 # But we can not use plain recursive calls here:
541 # But we can not use plain recursive calls here:
539 # - that would blow the python call stack
542 # - that would blow the python call stack
540 # - obsolescence markers may have cycles, we need to handle them.
543 # - obsolescence markers may have cycles, we need to handle them.
541 #
544 #
542 # The `toproceed` list act as our call stack. Every node we search
545 # The `toproceed` list act as our call stack. Every node we search
543 # successors set for are stacked there.
546 # successors set for are stacked there.
544 #
547 #
545 # The `stackedset` is set version of this stack used to check if a node is
548 # The `stackedset` is set version of this stack used to check if a node is
546 # already stacked. This check is used to detect cycles and prevent infinite
549 # already stacked. This check is used to detect cycles and prevent infinite
547 # loop.
550 # loop.
548 #
551 #
549 # successors set of all nodes are stored in the `cache` dictionary.
552 # successors set of all nodes are stored in the `cache` dictionary.
550 #
553 #
551 # After this while loop ends we use the cache to return the successors sets
554 # After this while loop ends we use the cache to return the successors sets
552 # for the node requested by the caller.
555 # for the node requested by the caller.
553 while toproceed:
556 while toproceed:
554 # Every iteration tries to compute the successors sets of the topmost
557 # Every iteration tries to compute the successors sets of the topmost
555 # node of the stack: CURRENT.
558 # node of the stack: CURRENT.
556 #
559 #
557 # There are four possible outcomes:
560 # There are four possible outcomes:
558 #
561 #
559 # 1) We already know the successors sets of CURRENT:
562 # 1) We already know the successors sets of CURRENT:
560 # -> mission accomplished, pop it from the stack.
563 # -> mission accomplished, pop it from the stack.
561 # 2) Node is not obsolete:
564 # 2) Node is not obsolete:
562 # -> the node is its own successors sets. Add it to the cache.
565 # -> the node is its own successors sets. Add it to the cache.
563 # 3) We do not know successors set of direct successors of CURRENT:
566 # 3) We do not know successors set of direct successors of CURRENT:
564 # -> We add those successors to the stack.
567 # -> We add those successors to the stack.
565 # 4) We know successors sets of all direct successors of CURRENT:
568 # 4) We know successors sets of all direct successors of CURRENT:
566 # -> We can compute CURRENT successors set and add it to the
569 # -> We can compute CURRENT successors set and add it to the
567 # cache.
570 # cache.
568 #
571 #
569 current = toproceed[-1]
572 current = toproceed[-1]
570 if current in cache:
573 if current in cache:
571 # case (1): We already know the successors sets
574 # case (1): We already know the successors sets
572 stackedset.remove(toproceed.pop())
575 stackedset.remove(toproceed.pop())
573 elif current not in succmarkers:
576 elif current not in succmarkers:
574 # case (2): The node is not obsolete.
577 # case (2): The node is not obsolete.
575 if current in repo:
578 if current in repo:
576 # We have a valid last successors.
579 # We have a valid last successors.
577 cache[current] = [(current,)]
580 cache[current] = [(current,)]
578 else:
581 else:
579 # Final obsolete version is unknown locally.
582 # Final obsolete version is unknown locally.
580 # Do not count that as a valid successors
583 # Do not count that as a valid successors
581 cache[current] = []
584 cache[current] = []
582 else:
585 else:
583 # cases (3) and (4)
586 # cases (3) and (4)
584 #
587 #
585 # We proceed in two phases. Phase 1 aims to distinguish case (3)
588 # We proceed in two phases. Phase 1 aims to distinguish case (3)
586 # from case (4):
589 # from case (4):
587 #
590 #
588 # For each direct successors of CURRENT, we check whether its
591 # For each direct successors of CURRENT, we check whether its
589 # successors sets are known. If they are not, we stack the
592 # successors sets are known. If they are not, we stack the
590 # unknown node and proceed to the next iteration of the while
593 # unknown node and proceed to the next iteration of the while
591 # loop. (case 3)
594 # loop. (case 3)
592 #
595 #
593 # During this step, we may detect obsolescence cycles: a node
596 # During this step, we may detect obsolescence cycles: a node
594 # with unknown successors sets but already in the call stack.
597 # with unknown successors sets but already in the call stack.
595 # In such a situation, we arbitrary set the successors sets of
598 # In such a situation, we arbitrary set the successors sets of
596 # the node to nothing (node pruned) to break the cycle.
599 # the node to nothing (node pruned) to break the cycle.
597 #
600 #
598 # If no break was encountered we proceed to phase 2.
601 # If no break was encountered we proceed to phase 2.
599 #
602 #
600 # Phase 2 computes successors sets of CURRENT (case 4); see details
603 # Phase 2 computes successors sets of CURRENT (case 4); see details
601 # in phase 2 itself.
604 # in phase 2 itself.
602 #
605 #
603 # Note the two levels of iteration in each phase.
606 # Note the two levels of iteration in each phase.
604 # - The first one handles obsolescence markers using CURRENT as
607 # - The first one handles obsolescence markers using CURRENT as
605 # precursor (successors markers of CURRENT).
608 # precursor (successors markers of CURRENT).
606 #
609 #
607 # Having multiple entry here means divergence.
610 # Having multiple entry here means divergence.
608 #
611 #
609 # - The second one handles successors defined in each marker.
612 # - The second one handles successors defined in each marker.
610 #
613 #
611 # Having none means pruned node, multiple successors means split,
614 # Having none means pruned node, multiple successors means split,
612 # single successors are standard replacement.
615 # single successors are standard replacement.
613 #
616 #
614 for mark in sorted(succmarkers[current]):
617 for mark in sorted(succmarkers[current]):
615 for suc in mark[1]:
618 for suc in mark[1]:
616 if suc not in cache:
619 if suc not in cache:
617 if suc in stackedset:
620 if suc in stackedset:
618 # cycle breaking
621 # cycle breaking
619 cache[suc] = []
622 cache[suc] = []
620 else:
623 else:
621 # case (3) If we have not computed successors sets
624 # case (3) If we have not computed successors sets
622 # of one of those successors we add it to the
625 # of one of those successors we add it to the
623 # `toproceed` stack and stop all work for this
626 # `toproceed` stack and stop all work for this
624 # iteration.
627 # iteration.
625 toproceed.append(suc)
628 toproceed.append(suc)
626 stackedset.add(suc)
629 stackedset.add(suc)
627 break
630 break
628 else:
631 else:
629 continue
632 continue
630 break
633 break
631 else:
634 else:
632 # case (4): we know all successors sets of all direct
635 # case (4): we know all successors sets of all direct
633 # successors
636 # successors
634 #
637 #
635 # Successors set contributed by each marker depends on the
638 # Successors set contributed by each marker depends on the
636 # successors sets of all its "successors" node.
639 # successors sets of all its "successors" node.
637 #
640 #
638 # Each different marker is a divergence in the obsolescence
641 # Each different marker is a divergence in the obsolescence
639 # history. It contributes successors sets distinct from other
642 # history. It contributes successors sets distinct from other
640 # markers.
643 # markers.
641 #
644 #
642 # Within a marker, a successor may have divergent successors
645 # Within a marker, a successor may have divergent successors
643 # sets. In such a case, the marker will contribute multiple
646 # sets. In such a case, the marker will contribute multiple
644 # divergent successors sets. If multiple successors have
647 # divergent successors sets. If multiple successors have
645 # divergent successors sets, a cartesian product is used.
648 # divergent successors sets, a cartesian product is used.
646 #
649 #
647 # At the end we post-process successors sets to remove
650 # At the end we post-process successors sets to remove
648 # duplicated entry and successors set that are strict subset of
651 # duplicated entry and successors set that are strict subset of
649 # another one.
652 # another one.
650 succssets = []
653 succssets = []
651 for mark in sorted(succmarkers[current]):
654 for mark in sorted(succmarkers[current]):
652 # successors sets contributed by this marker
655 # successors sets contributed by this marker
653 markss = [[]]
656 markss = [[]]
654 for suc in mark[1]:
657 for suc in mark[1]:
655 # cardinal product with previous successors
658 # cardinal product with previous successors
656 productresult = []
659 productresult = []
657 for prefix in markss:
660 for prefix in markss:
658 for suffix in cache[suc]:
661 for suffix in cache[suc]:
659 newss = list(prefix)
662 newss = list(prefix)
660 for part in suffix:
663 for part in suffix:
661 # do not duplicated entry in successors set
664 # do not duplicated entry in successors set
662 # first entry wins.
665 # first entry wins.
663 if part not in newss:
666 if part not in newss:
664 newss.append(part)
667 newss.append(part)
665 productresult.append(newss)
668 productresult.append(newss)
666 markss = productresult
669 markss = productresult
667 succssets.extend(markss)
670 succssets.extend(markss)
668 # remove duplicated and subset
671 # remove duplicated and subset
669 seen = []
672 seen = []
670 final = []
673 final = []
671 candidate = sorted(((set(s), s) for s in succssets if s),
674 candidate = sorted(((set(s), s) for s in succssets if s),
672 key=lambda x: len(x[1]), reverse=True)
675 key=lambda x: len(x[1]), reverse=True)
673 for setversion, listversion in candidate:
676 for setversion, listversion in candidate:
674 for seenset in seen:
677 for seenset in seen:
675 if setversion.issubset(seenset):
678 if setversion.issubset(seenset):
676 break
679 break
677 else:
680 else:
678 final.append(listversion)
681 final.append(listversion)
679 seen.append(setversion)
682 seen.append(setversion)
680 final.reverse() # put small successors set first
683 final.reverse() # put small successors set first
681 cache[current] = final
684 cache[current] = final
682 return cache[initialnode]
685 return cache[initialnode]
683
686
684 def _knownrevs(repo, nodes):
687 def _knownrevs(repo, nodes):
685 """yield revision numbers of known nodes passed in parameters
688 """yield revision numbers of known nodes passed in parameters
686
689
687 Unknown revisions are silently ignored."""
690 Unknown revisions are silently ignored."""
688 torev = repo.changelog.nodemap.get
691 torev = repo.changelog.nodemap.get
689 for n in nodes:
692 for n in nodes:
690 rev = torev(n)
693 rev = torev(n)
691 if rev is not None:
694 if rev is not None:
692 yield rev
695 yield rev
693
696
694 # mapping of 'set-name' -> <function to compute this set>
697 # mapping of 'set-name' -> <function to compute this set>
695 cachefuncs = {}
698 cachefuncs = {}
696 def cachefor(name):
699 def cachefor(name):
697 """Decorator to register a function as computing the cache for a set"""
700 """Decorator to register a function as computing the cache for a set"""
698 def decorator(func):
701 def decorator(func):
699 assert name not in cachefuncs
702 assert name not in cachefuncs
700 cachefuncs[name] = func
703 cachefuncs[name] = func
701 return func
704 return func
702 return decorator
705 return decorator
703
706
704 def getrevs(repo, name):
707 def getrevs(repo, name):
705 """Return the set of revision that belong to the <name> set
708 """Return the set of revision that belong to the <name> set
706
709
707 Such access may compute the set and cache it for future use"""
710 Such access may compute the set and cache it for future use"""
708 repo = repo.unfiltered()
711 repo = repo.unfiltered()
709 if not repo.obsstore:
712 if not repo.obsstore:
710 return ()
713 return ()
711 if name not in repo.obsstore.caches:
714 if name not in repo.obsstore.caches:
712 repo.obsstore.caches[name] = cachefuncs[name](repo)
715 repo.obsstore.caches[name] = cachefuncs[name](repo)
713 return repo.obsstore.caches[name]
716 return repo.obsstore.caches[name]
714
717
715 # To be simple we need to invalidate obsolescence cache when:
718 # To be simple we need to invalidate obsolescence cache when:
716 #
719 #
717 # - new changeset is added:
720 # - new changeset is added:
718 # - public phase is changed
721 # - public phase is changed
719 # - obsolescence marker are added
722 # - obsolescence marker are added
720 # - strip is used a repo
723 # - strip is used a repo
721 def clearobscaches(repo):
724 def clearobscaches(repo):
722 """Remove all obsolescence related cache from a repo
725 """Remove all obsolescence related cache from a repo
723
726
724 This remove all cache in obsstore is the obsstore already exist on the
727 This remove all cache in obsstore is the obsstore already exist on the
725 repo.
728 repo.
726
729
727 (We could be smarter here given the exact event that trigger the cache
730 (We could be smarter here given the exact event that trigger the cache
728 clearing)"""
731 clearing)"""
729 # only clear cache is there is obsstore data in this repo
732 # only clear cache is there is obsstore data in this repo
730 if 'obsstore' in repo._filecache:
733 if 'obsstore' in repo._filecache:
731 repo.obsstore.caches.clear()
734 repo.obsstore.caches.clear()
732
735
733 @cachefor('obsolete')
736 @cachefor('obsolete')
734 def _computeobsoleteset(repo):
737 def _computeobsoleteset(repo):
735 """the set of obsolete revisions"""
738 """the set of obsolete revisions"""
736 obs = set()
739 obs = set()
737 getrev = repo.changelog.nodemap.get
740 getrev = repo.changelog.nodemap.get
738 getphase = repo._phasecache.phase
741 getphase = repo._phasecache.phase
739 for node in repo.obsstore.successors:
742 for node in repo.obsstore.successors:
740 rev = getrev(node)
743 rev = getrev(node)
741 if rev is not None and getphase(repo, rev):
744 if rev is not None and getphase(repo, rev):
742 obs.add(rev)
745 obs.add(rev)
743 return obs
746 return obs
744
747
745 @cachefor('unstable')
748 @cachefor('unstable')
746 def _computeunstableset(repo):
749 def _computeunstableset(repo):
747 """the set of non obsolete revisions with obsolete parents"""
750 """the set of non obsolete revisions with obsolete parents"""
748 # revset is not efficient enough here
751 # revset is not efficient enough here
749 # we do (obsolete()::) - obsolete() by hand
752 # we do (obsolete()::) - obsolete() by hand
750 obs = getrevs(repo, 'obsolete')
753 obs = getrevs(repo, 'obsolete')
751 if not obs:
754 if not obs:
752 return set()
755 return set()
753 cl = repo.changelog
756 cl = repo.changelog
754 return set(r for r in cl.descendants(obs) if r not in obs)
757 return set(r for r in cl.descendants(obs) if r not in obs)
755
758
756 @cachefor('suspended')
759 @cachefor('suspended')
757 def _computesuspendedset(repo):
760 def _computesuspendedset(repo):
758 """the set of obsolete parents with non obsolete descendants"""
761 """the set of obsolete parents with non obsolete descendants"""
759 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
762 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
760 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
763 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
761
764
762 @cachefor('extinct')
765 @cachefor('extinct')
763 def _computeextinctset(repo):
766 def _computeextinctset(repo):
764 """the set of obsolete parents without non obsolete descendants"""
767 """the set of obsolete parents without non obsolete descendants"""
765 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
768 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
766
769
767
770
768 @cachefor('bumped')
771 @cachefor('bumped')
769 def _computebumpedset(repo):
772 def _computebumpedset(repo):
770 """the set of revs trying to obsolete public revisions"""
773 """the set of revs trying to obsolete public revisions"""
771 bumped = set()
774 bumped = set()
772 # utils function (avoid attribute lookup in the loop)
775 # utils function (avoid attribute lookup in the loop)
773 phase = repo._phasecache.phase # would be faster to grab the full list
776 phase = repo._phasecache.phase # would be faster to grab the full list
774 public = phases.public
777 public = phases.public
775 cl = repo.changelog
778 cl = repo.changelog
776 torev = cl.nodemap.get
779 torev = cl.nodemap.get
777 obs = getrevs(repo, 'obsolete')
780 obs = getrevs(repo, 'obsolete')
778 for rev in repo:
781 for rev in repo:
779 # We only evaluate mutable, non-obsolete revision
782 # We only evaluate mutable, non-obsolete revision
780 if (public < phase(repo, rev)) and (rev not in obs):
783 if (public < phase(repo, rev)) and (rev not in obs):
781 node = cl.node(rev)
784 node = cl.node(rev)
782 # (future) A cache of precursors may worth if split is very common
785 # (future) A cache of precursors may worth if split is very common
783 for pnode in allprecursors(repo.obsstore, [node],
786 for pnode in allprecursors(repo.obsstore, [node],
784 ignoreflags=bumpedfix):
787 ignoreflags=bumpedfix):
785 prev = torev(pnode) # unfiltered! but so is phasecache
788 prev = torev(pnode) # unfiltered! but so is phasecache
786 if (prev is not None) and (phase(repo, prev) <= public):
789 if (prev is not None) and (phase(repo, prev) <= public):
787 # we have a public precursors
790 # we have a public precursors
788 bumped.add(rev)
791 bumped.add(rev)
789 break # Next draft!
792 break # Next draft!
790 return bumped
793 return bumped
791
794
792 @cachefor('divergent')
795 @cachefor('divergent')
793 def _computedivergentset(repo):
796 def _computedivergentset(repo):
794 """the set of rev that compete to be the final successors of some revision.
797 """the set of rev that compete to be the final successors of some revision.
795 """
798 """
796 divergent = set()
799 divergent = set()
797 obsstore = repo.obsstore
800 obsstore = repo.obsstore
798 newermap = {}
801 newermap = {}
799 for ctx in repo.set('(not public()) - obsolete()'):
802 for ctx in repo.set('(not public()) - obsolete()'):
800 mark = obsstore.precursors.get(ctx.node(), ())
803 mark = obsstore.precursors.get(ctx.node(), ())
801 toprocess = set(mark)
804 toprocess = set(mark)
802 while toprocess:
805 while toprocess:
803 prec = toprocess.pop()[0]
806 prec = toprocess.pop()[0]
804 if prec not in newermap:
807 if prec not in newermap:
805 successorssets(repo, prec, newermap)
808 successorssets(repo, prec, newermap)
806 newer = [n for n in newermap[prec] if n]
809 newer = [n for n in newermap[prec] if n]
807 if len(newer) > 1:
810 if len(newer) > 1:
808 divergent.add(ctx.rev())
811 divergent.add(ctx.rev())
809 break
812 break
810 toprocess.update(obsstore.precursors.get(prec, ()))
813 toprocess.update(obsstore.precursors.get(prec, ()))
811 return divergent
814 return divergent
812
815
813
816
814 def createmarkers(repo, relations, flag=0, metadata=None):
817 def createmarkers(repo, relations, flag=0, metadata=None):
815 """Add obsolete markers between changesets in a repo
818 """Add obsolete markers between changesets in a repo
816
819
817 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
820 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
818 tuple. `old` and `news` are changectx. metadata is an optional dictionnary
821 tuple. `old` and `news` are changectx. metadata is an optional dictionnary
819 containing metadata for this marker only. It is merged with the global
822 containing metadata for this marker only. It is merged with the global
820 metadata specified through the `metadata` argument of this function,
823 metadata specified through the `metadata` argument of this function,
821
824
822 Trying to obsolete a public changeset will raise an exception.
825 Trying to obsolete a public changeset will raise an exception.
823
826
824 Current user and date are used except if specified otherwise in the
827 Current user and date are used except if specified otherwise in the
825 metadata attribute.
828 metadata attribute.
826
829
827 This function operates within a transaction of its own, but does
830 This function operates within a transaction of its own, but does
828 not take any lock on the repo.
831 not take any lock on the repo.
829 """
832 """
830 # prepare metadata
833 # prepare metadata
831 if metadata is None:
834 if metadata is None:
832 metadata = {}
835 metadata = {}
833 if 'date' not in metadata:
836 if 'date' not in metadata:
834 metadata['date'] = '%i %i' % util.makedate()
837 metadata['date'] = '%i %i' % util.makedate()
835 if 'user' not in metadata:
838 if 'user' not in metadata:
836 metadata['user'] = repo.ui.username()
839 metadata['user'] = repo.ui.username()
837 tr = repo.transaction('add-obsolescence-marker')
840 tr = repo.transaction('add-obsolescence-marker')
838 try:
841 try:
839 for rel in relations:
842 for rel in relations:
840 prec = rel[0]
843 prec = rel[0]
841 sucs = rel[1]
844 sucs = rel[1]
842 localmetadata = metadata.copy()
845 localmetadata = metadata.copy()
843 if 2 < len(rel):
846 if 2 < len(rel):
844 localmetadata.update(rel[2])
847 localmetadata.update(rel[2])
845
848
846 if not prec.mutable():
849 if not prec.mutable():
847 raise util.Abort("cannot obsolete immutable changeset: %s"
850 raise util.Abort("cannot obsolete immutable changeset: %s"
848 % prec)
851 % prec)
849 nprec = prec.node()
852 nprec = prec.node()
850 nsucs = tuple(s.node() for s in sucs)
853 nsucs = tuple(s.node() for s in sucs)
851 if nprec in nsucs:
854 if nprec in nsucs:
852 raise util.Abort("changeset %s cannot obsolete itself" % prec)
855 raise util.Abort("changeset %s cannot obsolete itself" % prec)
853 repo.obsstore.create(tr, nprec, nsucs, flag, localmetadata)
856 repo.obsstore.create(tr, nprec, nsucs, flag, localmetadata)
854 repo.filteredrevcache.clear()
857 repo.filteredrevcache.clear()
855 tr.close()
858 tr.close()
856 finally:
859 finally:
857 tr.release()
860 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now