##// END OF EJS Templates
obsstore: add an explicit `date` argument to obsstore.create...
Pierre-Yves David -
r22217:570f8742 default
parent child Browse files
Show More
@@ -1,876 +1,879 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66
66
67 The header is followed by the markers. Each marker is made of:
67 The header is followed by the markers. Each marker is made of:
68
68
69 - 1 unsigned byte: number of new changesets "N", can be zero.
69 - 1 unsigned byte: number of new changesets "N", can be zero.
70
70
71 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
72
72
73 - 1 byte: a bit field. It is reserved for flags used in common
73 - 1 byte: a bit field. It is reserved for flags used in common
74 obsolete marker operations, to avoid repeated decoding of metadata
74 obsolete marker operations, to avoid repeated decoding of metadata
75 entries.
75 entries.
76
76
77 - 20 bytes: obsoleted changeset identifier.
77 - 20 bytes: obsoleted changeset identifier.
78
78
79 - N*20 bytes: new changesets identifiers.
79 - N*20 bytes: new changesets identifiers.
80
80
81 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 - M bytes: metadata as a sequence of nul-terminated strings. Each
82 string contains a key and a value, separated by a colon ':', without
82 string contains a key and a value, separated by a colon ':', without
83 additional encoding. Keys cannot contain '\0' or ':' and values
83 additional encoding. Keys cannot contain '\0' or ':' and values
84 cannot contain '\0'.
84 cannot contain '\0'.
85
85
86 """
86 """
87 import struct
87 import struct
88 import util, base85, node
88 import util, base85, node
89 import phases
89 import phases
90 from i18n import _
90 from i18n import _
91
91
92 _pack = struct.pack
92 _pack = struct.pack
93 _unpack = struct.unpack
93 _unpack = struct.unpack
94
94
95 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
95 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
96
96
97 # the obsolete feature is not mature enough to be enabled by default.
97 # the obsolete feature is not mature enough to be enabled by default.
98 # you have to rely on third party extension extension to enable this.
98 # you have to rely on third party extension extension to enable this.
99 _enabled = False
99 _enabled = False
100
100
101 # data used for parsing and writing
101 # data used for parsing and writing
102 _fmversion = 0
102 _fmversion = 0
103 _fmfixed = '>BIB20s'
103 _fmfixed = '>BIB20s'
104 _fmnode = '20s'
104 _fmnode = '20s'
105 _fmfsize = struct.calcsize(_fmfixed)
105 _fmfsize = struct.calcsize(_fmfixed)
106 _fnodesize = struct.calcsize(_fmnode)
106 _fnodesize = struct.calcsize(_fmnode)
107
107
108 ### obsolescence marker flag
108 ### obsolescence marker flag
109
109
110 ## bumpedfix flag
110 ## bumpedfix flag
111 #
111 #
112 # When a changeset A' succeed to a changeset A which became public, we call A'
112 # When a changeset A' succeed to a changeset A which became public, we call A'
113 # "bumped" because it's a successors of a public changesets
113 # "bumped" because it's a successors of a public changesets
114 #
114 #
115 # o A' (bumped)
115 # o A' (bumped)
116 # |`:
116 # |`:
117 # | o A
117 # | o A
118 # |/
118 # |/
119 # o Z
119 # o Z
120 #
120 #
121 # The way to solve this situation is to create a new changeset Ad as children
121 # The way to solve this situation is to create a new changeset Ad as children
122 # of A. This changeset have the same content than A'. So the diff from A to A'
122 # of A. This changeset have the same content than A'. So the diff from A to A'
123 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
123 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
124 #
124 #
125 # o Ad
125 # o Ad
126 # |`:
126 # |`:
127 # | x A'
127 # | x A'
128 # |'|
128 # |'|
129 # o | A
129 # o | A
130 # |/
130 # |/
131 # o Z
131 # o Z
132 #
132 #
133 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
133 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
134 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
134 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
135 # This flag mean that the successors express the changes between the public and
135 # This flag mean that the successors express the changes between the public and
136 # bumped version and fix the situation, breaking the transitivity of
136 # bumped version and fix the situation, breaking the transitivity of
137 # "bumped" here.
137 # "bumped" here.
138 bumpedfix = 1
138 bumpedfix = 1
139
139
140 def _readmarkers(data):
140 def _readmarkers(data):
141 """Read and enumerate markers from raw data"""
141 """Read and enumerate markers from raw data"""
142 off = 0
142 off = 0
143 diskversion = _unpack('>B', data[off:off + 1])[0]
143 diskversion = _unpack('>B', data[off:off + 1])[0]
144 off += 1
144 off += 1
145 if diskversion != _fmversion:
145 if diskversion != _fmversion:
146 raise util.Abort(_('parsing obsolete marker: unknown version %r')
146 raise util.Abort(_('parsing obsolete marker: unknown version %r')
147 % diskversion)
147 % diskversion)
148
148
149 # Loop on markers
149 # Loop on markers
150 l = len(data)
150 l = len(data)
151 while off + _fmfsize <= l:
151 while off + _fmfsize <= l:
152 # read fixed part
152 # read fixed part
153 cur = data[off:off + _fmfsize]
153 cur = data[off:off + _fmfsize]
154 off += _fmfsize
154 off += _fmfsize
155 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
155 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
156 # read replacement
156 # read replacement
157 sucs = ()
157 sucs = ()
158 if nbsuc:
158 if nbsuc:
159 s = (_fnodesize * nbsuc)
159 s = (_fnodesize * nbsuc)
160 cur = data[off:off + s]
160 cur = data[off:off + s]
161 sucs = _unpack(_fmnode * nbsuc, cur)
161 sucs = _unpack(_fmnode * nbsuc, cur)
162 off += s
162 off += s
163 # read metadata
163 # read metadata
164 # (metadata will be decoded on demand)
164 # (metadata will be decoded on demand)
165 metadata = data[off:off + mdsize]
165 metadata = data[off:off + mdsize]
166 if len(metadata) != mdsize:
166 if len(metadata) != mdsize:
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 'short, %d bytes expected, got %d')
168 'short, %d bytes expected, got %d')
169 % (mdsize, len(metadata)))
169 % (mdsize, len(metadata)))
170 off += mdsize
170 off += mdsize
171 yield (pre, sucs, flags, metadata)
171 yield (pre, sucs, flags, metadata)
172
172
173 def encodemeta(meta):
173 def encodemeta(meta):
174 """Return encoded metadata string to string mapping.
174 """Return encoded metadata string to string mapping.
175
175
176 Assume no ':' in key and no '\0' in both key and value."""
176 Assume no ':' in key and no '\0' in both key and value."""
177 for key, value in meta.iteritems():
177 for key, value in meta.iteritems():
178 if ':' in key or '\0' in key:
178 if ':' in key or '\0' in key:
179 raise ValueError("':' and '\0' are forbidden in metadata key'")
179 raise ValueError("':' and '\0' are forbidden in metadata key'")
180 if '\0' in value:
180 if '\0' in value:
181 raise ValueError("':' is forbidden in metadata value'")
181 raise ValueError("':' is forbidden in metadata value'")
182 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
182 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
183
183
184 def decodemeta(data):
184 def decodemeta(data):
185 """Return string to string dictionary from encoded version."""
185 """Return string to string dictionary from encoded version."""
186 d = {}
186 d = {}
187 for l in data.split('\0'):
187 for l in data.split('\0'):
188 if l:
188 if l:
189 key, value = l.split(':')
189 key, value = l.split(':')
190 d[key] = value
190 d[key] = value
191 return d
191 return d
192
192
193 class marker(object):
193 class marker(object):
194 """Wrap obsolete marker raw data"""
194 """Wrap obsolete marker raw data"""
195
195
196 def __init__(self, repo, data):
196 def __init__(self, repo, data):
197 # the repo argument will be used to create changectx in later version
197 # the repo argument will be used to create changectx in later version
198 self._repo = repo
198 self._repo = repo
199 self._data = data
199 self._data = data
200 self._decodedmeta = None
200 self._decodedmeta = None
201
201
202 def __hash__(self):
202 def __hash__(self):
203 return hash(self._data)
203 return hash(self._data)
204
204
205 def __eq__(self, other):
205 def __eq__(self, other):
206 if type(other) != type(self):
206 if type(other) != type(self):
207 return False
207 return False
208 return self._data == other._data
208 return self._data == other._data
209
209
210 def precnode(self):
210 def precnode(self):
211 """Precursor changeset node identifier"""
211 """Precursor changeset node identifier"""
212 return self._data[0]
212 return self._data[0]
213
213
214 def succnodes(self):
214 def succnodes(self):
215 """List of successor changesets node identifiers"""
215 """List of successor changesets node identifiers"""
216 return self._data[1]
216 return self._data[1]
217
217
218 def metadata(self):
218 def metadata(self):
219 """Decoded metadata dictionary"""
219 """Decoded metadata dictionary"""
220 if self._decodedmeta is None:
220 if self._decodedmeta is None:
221 self._decodedmeta = decodemeta(self._data[3])
221 self._decodedmeta = decodemeta(self._data[3])
222 return self._decodedmeta
222 return self._decodedmeta
223
223
224 def date(self):
224 def date(self):
225 """Creation date as (unixtime, offset)"""
225 """Creation date as (unixtime, offset)"""
226 parts = self.metadata()['date'].split(' ')
226 parts = self.metadata()['date'].split(' ')
227 return (float(parts[0]), int(parts[1]))
227 return (float(parts[0]), int(parts[1]))
228
228
229 def flags(self):
229 def flags(self):
230 """The flags field of the marker"""
230 """The flags field of the marker"""
231 return self._data[2]
231 return self._data[2]
232
232
233 class obsstore(object):
233 class obsstore(object):
234 """Store obsolete markers
234 """Store obsolete markers
235
235
236 Markers can be accessed with two mappings:
236 Markers can be accessed with two mappings:
237 - precursors[x] -> set(markers on precursors edges of x)
237 - precursors[x] -> set(markers on precursors edges of x)
238 - successors[x] -> set(markers on successors edges of x)
238 - successors[x] -> set(markers on successors edges of x)
239 """
239 """
240
240
241 def __init__(self, sopener):
241 def __init__(self, sopener):
242 # caches for various obsolescence related cache
242 # caches for various obsolescence related cache
243 self.caches = {}
243 self.caches = {}
244 self._all = []
244 self._all = []
245 # new markers to serialize
245 # new markers to serialize
246 self.precursors = {}
246 self.precursors = {}
247 self.successors = {}
247 self.successors = {}
248 self.sopener = sopener
248 self.sopener = sopener
249 data = sopener.tryread('obsstore')
249 data = sopener.tryread('obsstore')
250 if data:
250 if data:
251 self._load(_readmarkers(data))
251 self._load(_readmarkers(data))
252
252
253 def __iter__(self):
253 def __iter__(self):
254 return iter(self._all)
254 return iter(self._all)
255
255
256 def __len__(self):
256 def __len__(self):
257 return len(self._all)
257 return len(self._all)
258
258
259 def __nonzero__(self):
259 def __nonzero__(self):
260 return bool(self._all)
260 return bool(self._all)
261
261
262 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
262 def create(self, transaction, prec, succs=(), flag=0, date=None,
263 metadata=None):
263 """obsolete: add a new obsolete marker
264 """obsolete: add a new obsolete marker
264
265
265 * ensuring it is hashable
266 * ensuring it is hashable
266 * check mandatory metadata
267 * check mandatory metadata
267 * encode metadata
268 * encode metadata
268
269
269 If you are a human writing code creating marker you want to use the
270 If you are a human writing code creating marker you want to use the
270 `createmarkers` function in this module instead.
271 `createmarkers` function in this module instead.
271
272
272 return True if a new marker have been added, False if the markers
273 return True if a new marker have been added, False if the markers
273 already existed (no op).
274 already existed (no op).
274 """
275 """
275 if metadata is None:
276 if metadata is None:
276 metadata = {}
277 metadata = {}
277 if 'date' not in metadata:
278 if 'date' not in metadata:
278 metadata['date'] = "%d %d" % util.makedate()
279 if date is None:
280 date = util.makedate()
281 metadata['date'] = "%d %d" % date
279 if len(prec) != 20:
282 if len(prec) != 20:
280 raise ValueError(prec)
283 raise ValueError(prec)
281 for succ in succs:
284 for succ in succs:
282 if len(succ) != 20:
285 if len(succ) != 20:
283 raise ValueError(succ)
286 raise ValueError(succ)
284 if prec in succs:
287 if prec in succs:
285 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
288 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
286 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
289 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
287 return bool(self.add(transaction, [marker]))
290 return bool(self.add(transaction, [marker]))
288
291
289 def add(self, transaction, markers):
292 def add(self, transaction, markers):
290 """Add new markers to the store
293 """Add new markers to the store
291
294
292 Take care of filtering duplicate.
295 Take care of filtering duplicate.
293 Return the number of new marker."""
296 Return the number of new marker."""
294 if not _enabled:
297 if not _enabled:
295 raise util.Abort('obsolete feature is not enabled on this repo')
298 raise util.Abort('obsolete feature is not enabled on this repo')
296 known = set(self._all)
299 known = set(self._all)
297 new = []
300 new = []
298 for m in markers:
301 for m in markers:
299 if m not in known:
302 if m not in known:
300 known.add(m)
303 known.add(m)
301 new.append(m)
304 new.append(m)
302 if new:
305 if new:
303 f = self.sopener('obsstore', 'ab')
306 f = self.sopener('obsstore', 'ab')
304 try:
307 try:
305 # Whether the file's current position is at the begin or at
308 # Whether the file's current position is at the begin or at
306 # the end after opening a file for appending is implementation
309 # the end after opening a file for appending is implementation
307 # defined. So we must seek to the end before calling tell(),
310 # defined. So we must seek to the end before calling tell(),
308 # or we may get a zero offset for non-zero sized files on
311 # or we may get a zero offset for non-zero sized files on
309 # some platforms (issue3543).
312 # some platforms (issue3543).
310 f.seek(0, _SEEK_END)
313 f.seek(0, _SEEK_END)
311 offset = f.tell()
314 offset = f.tell()
312 transaction.add('obsstore', offset)
315 transaction.add('obsstore', offset)
313 # offset == 0: new file - add the version header
316 # offset == 0: new file - add the version header
314 for bytes in _encodemarkers(new, offset == 0):
317 for bytes in _encodemarkers(new, offset == 0):
315 f.write(bytes)
318 f.write(bytes)
316 finally:
319 finally:
317 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
320 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
318 # call 'filecacheentry.refresh()' here
321 # call 'filecacheentry.refresh()' here
319 f.close()
322 f.close()
320 self._load(new)
323 self._load(new)
321 # new marker *may* have changed several set. invalidate the cache.
324 # new marker *may* have changed several set. invalidate the cache.
322 self.caches.clear()
325 self.caches.clear()
323 return len(new)
326 return len(new)
324
327
325 def mergemarkers(self, transaction, data):
328 def mergemarkers(self, transaction, data):
326 markers = _readmarkers(data)
329 markers = _readmarkers(data)
327 self.add(transaction, markers)
330 self.add(transaction, markers)
328
331
329 def _load(self, markers):
332 def _load(self, markers):
330 for mark in markers:
333 for mark in markers:
331 self._all.append(mark)
334 self._all.append(mark)
332 pre, sucs = mark[:2]
335 pre, sucs = mark[:2]
333 self.successors.setdefault(pre, set()).add(mark)
336 self.successors.setdefault(pre, set()).add(mark)
334 for suc in sucs:
337 for suc in sucs:
335 self.precursors.setdefault(suc, set()).add(mark)
338 self.precursors.setdefault(suc, set()).add(mark)
336 if node.nullid in self.precursors:
339 if node.nullid in self.precursors:
337 raise util.Abort(_('bad obsolescence marker detected: '
340 raise util.Abort(_('bad obsolescence marker detected: '
338 'invalid successors nullid'))
341 'invalid successors nullid'))
339
342
340 def _encodemarkers(markers, addheader=False):
343 def _encodemarkers(markers, addheader=False):
341 # Kept separate from flushmarkers(), it will be reused for
344 # Kept separate from flushmarkers(), it will be reused for
342 # markers exchange.
345 # markers exchange.
343 if addheader:
346 if addheader:
344 yield _pack('>B', _fmversion)
347 yield _pack('>B', _fmversion)
345 for marker in markers:
348 for marker in markers:
346 yield _encodeonemarker(marker)
349 yield _encodeonemarker(marker)
347
350
348
351
349 def _encodeonemarker(marker):
352 def _encodeonemarker(marker):
350 pre, sucs, flags, metadata = marker
353 pre, sucs, flags, metadata = marker
351 nbsuc = len(sucs)
354 nbsuc = len(sucs)
352 format = _fmfixed + (_fmnode * nbsuc)
355 format = _fmfixed + (_fmnode * nbsuc)
353 data = [nbsuc, len(metadata), flags, pre]
356 data = [nbsuc, len(metadata), flags, pre]
354 data.extend(sucs)
357 data.extend(sucs)
355 return _pack(format, *data) + metadata
358 return _pack(format, *data) + metadata
356
359
357 # arbitrary picked to fit into 8K limit from HTTP server
360 # arbitrary picked to fit into 8K limit from HTTP server
358 # you have to take in account:
361 # you have to take in account:
359 # - the version header
362 # - the version header
360 # - the base85 encoding
363 # - the base85 encoding
361 _maxpayload = 5300
364 _maxpayload = 5300
362
365
363 def _pushkeyescape(markers):
366 def _pushkeyescape(markers):
364 """encode markers into a dict suitable for pushkey exchange
367 """encode markers into a dict suitable for pushkey exchange
365
368
366 - binary data is base85 encoded
369 - binary data is base85 encoded
367 - split in chunks smaller than 5300 bytes"""
370 - split in chunks smaller than 5300 bytes"""
368 keys = {}
371 keys = {}
369 parts = []
372 parts = []
370 currentlen = _maxpayload * 2 # ensure we create a new part
373 currentlen = _maxpayload * 2 # ensure we create a new part
371 for marker in markers:
374 for marker in markers:
372 nextdata = _encodeonemarker(marker)
375 nextdata = _encodeonemarker(marker)
373 if (len(nextdata) + currentlen > _maxpayload):
376 if (len(nextdata) + currentlen > _maxpayload):
374 currentpart = []
377 currentpart = []
375 currentlen = 0
378 currentlen = 0
376 parts.append(currentpart)
379 parts.append(currentpart)
377 currentpart.append(nextdata)
380 currentpart.append(nextdata)
378 currentlen += len(nextdata)
381 currentlen += len(nextdata)
379 for idx, part in enumerate(reversed(parts)):
382 for idx, part in enumerate(reversed(parts)):
380 data = ''.join([_pack('>B', _fmversion)] + part)
383 data = ''.join([_pack('>B', _fmversion)] + part)
381 keys['dump%i' % idx] = base85.b85encode(data)
384 keys['dump%i' % idx] = base85.b85encode(data)
382 return keys
385 return keys
383
386
384 def listmarkers(repo):
387 def listmarkers(repo):
385 """List markers over pushkey"""
388 """List markers over pushkey"""
386 if not repo.obsstore:
389 if not repo.obsstore:
387 return {}
390 return {}
388 return _pushkeyescape(repo.obsstore)
391 return _pushkeyescape(repo.obsstore)
389
392
390 def pushmarker(repo, key, old, new):
393 def pushmarker(repo, key, old, new):
391 """Push markers over pushkey"""
394 """Push markers over pushkey"""
392 if not key.startswith('dump'):
395 if not key.startswith('dump'):
393 repo.ui.warn(_('unknown key: %r') % key)
396 repo.ui.warn(_('unknown key: %r') % key)
394 return 0
397 return 0
395 if old:
398 if old:
396 repo.ui.warn(_('unexpected old value for %r') % key)
399 repo.ui.warn(_('unexpected old value for %r') % key)
397 return 0
400 return 0
398 data = base85.b85decode(new)
401 data = base85.b85decode(new)
399 lock = repo.lock()
402 lock = repo.lock()
400 try:
403 try:
401 tr = repo.transaction('pushkey: obsolete markers')
404 tr = repo.transaction('pushkey: obsolete markers')
402 try:
405 try:
403 repo.obsstore.mergemarkers(tr, data)
406 repo.obsstore.mergemarkers(tr, data)
404 tr.close()
407 tr.close()
405 return 1
408 return 1
406 finally:
409 finally:
407 tr.release()
410 tr.release()
408 finally:
411 finally:
409 lock.release()
412 lock.release()
410
413
411 def allmarkers(repo):
414 def allmarkers(repo):
412 """all obsolete markers known in a repository"""
415 """all obsolete markers known in a repository"""
413 for markerdata in repo.obsstore:
416 for markerdata in repo.obsstore:
414 yield marker(repo, markerdata)
417 yield marker(repo, markerdata)
415
418
416 def precursormarkers(ctx):
419 def precursormarkers(ctx):
417 """obsolete marker marking this changeset as a successors"""
420 """obsolete marker marking this changeset as a successors"""
418 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
421 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
419 yield marker(ctx._repo, data)
422 yield marker(ctx._repo, data)
420
423
421 def successormarkers(ctx):
424 def successormarkers(ctx):
422 """obsolete marker making this changeset obsolete"""
425 """obsolete marker making this changeset obsolete"""
423 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
426 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
424 yield marker(ctx._repo, data)
427 yield marker(ctx._repo, data)
425
428
426 def allsuccessors(obsstore, nodes, ignoreflags=0):
429 def allsuccessors(obsstore, nodes, ignoreflags=0):
427 """Yield node for every successor of <nodes>.
430 """Yield node for every successor of <nodes>.
428
431
429 Some successors may be unknown locally.
432 Some successors may be unknown locally.
430
433
431 This is a linear yield unsuited to detecting split changesets. It includes
434 This is a linear yield unsuited to detecting split changesets. It includes
432 initial nodes too."""
435 initial nodes too."""
433 remaining = set(nodes)
436 remaining = set(nodes)
434 seen = set(remaining)
437 seen = set(remaining)
435 while remaining:
438 while remaining:
436 current = remaining.pop()
439 current = remaining.pop()
437 yield current
440 yield current
438 for mark in obsstore.successors.get(current, ()):
441 for mark in obsstore.successors.get(current, ()):
439 # ignore marker flagged with specified flag
442 # ignore marker flagged with specified flag
440 if mark[2] & ignoreflags:
443 if mark[2] & ignoreflags:
441 continue
444 continue
442 for suc in mark[1]:
445 for suc in mark[1]:
443 if suc not in seen:
446 if suc not in seen:
444 seen.add(suc)
447 seen.add(suc)
445 remaining.add(suc)
448 remaining.add(suc)
446
449
447 def allprecursors(obsstore, nodes, ignoreflags=0):
450 def allprecursors(obsstore, nodes, ignoreflags=0):
448 """Yield node for every precursors of <nodes>.
451 """Yield node for every precursors of <nodes>.
449
452
450 Some precursors may be unknown locally.
453 Some precursors may be unknown locally.
451
454
452 This is a linear yield unsuited to detecting folded changesets. It includes
455 This is a linear yield unsuited to detecting folded changesets. It includes
453 initial nodes too."""
456 initial nodes too."""
454
457
455 remaining = set(nodes)
458 remaining = set(nodes)
456 seen = set(remaining)
459 seen = set(remaining)
457 while remaining:
460 while remaining:
458 current = remaining.pop()
461 current = remaining.pop()
459 yield current
462 yield current
460 for mark in obsstore.precursors.get(current, ()):
463 for mark in obsstore.precursors.get(current, ()):
461 # ignore marker flagged with specified flag
464 # ignore marker flagged with specified flag
462 if mark[2] & ignoreflags:
465 if mark[2] & ignoreflags:
463 continue
466 continue
464 suc = mark[0]
467 suc = mark[0]
465 if suc not in seen:
468 if suc not in seen:
466 seen.add(suc)
469 seen.add(suc)
467 remaining.add(suc)
470 remaining.add(suc)
468
471
469 def foreground(repo, nodes):
472 def foreground(repo, nodes):
470 """return all nodes in the "foreground" of other node
473 """return all nodes in the "foreground" of other node
471
474
472 The foreground of a revision is anything reachable using parent -> children
475 The foreground of a revision is anything reachable using parent -> children
473 or precursor -> successor relation. It is very similar to "descendant" but
476 or precursor -> successor relation. It is very similar to "descendant" but
474 augmented with obsolescence information.
477 augmented with obsolescence information.
475
478
476 Beware that possible obsolescence cycle may result if complex situation.
479 Beware that possible obsolescence cycle may result if complex situation.
477 """
480 """
478 repo = repo.unfiltered()
481 repo = repo.unfiltered()
479 foreground = set(repo.set('%ln::', nodes))
482 foreground = set(repo.set('%ln::', nodes))
480 if repo.obsstore:
483 if repo.obsstore:
481 # We only need this complicated logic if there is obsolescence
484 # We only need this complicated logic if there is obsolescence
482 # XXX will probably deserve an optimised revset.
485 # XXX will probably deserve an optimised revset.
483 nm = repo.changelog.nodemap
486 nm = repo.changelog.nodemap
484 plen = -1
487 plen = -1
485 # compute the whole set of successors or descendants
488 # compute the whole set of successors or descendants
486 while len(foreground) != plen:
489 while len(foreground) != plen:
487 plen = len(foreground)
490 plen = len(foreground)
488 succs = set(c.node() for c in foreground)
491 succs = set(c.node() for c in foreground)
489 mutable = [c.node() for c in foreground if c.mutable()]
492 mutable = [c.node() for c in foreground if c.mutable()]
490 succs.update(allsuccessors(repo.obsstore, mutable))
493 succs.update(allsuccessors(repo.obsstore, mutable))
491 known = (n for n in succs if n in nm)
494 known = (n for n in succs if n in nm)
492 foreground = set(repo.set('%ln::', known))
495 foreground = set(repo.set('%ln::', known))
493 return set(c.node() for c in foreground)
496 return set(c.node() for c in foreground)
494
497
495
498
496 def successorssets(repo, initialnode, cache=None):
499 def successorssets(repo, initialnode, cache=None):
497 """Return all set of successors of initial nodes
500 """Return all set of successors of initial nodes
498
501
499 The successors set of a changeset A are a group of revisions that succeed
502 The successors set of a changeset A are a group of revisions that succeed
500 A. It succeeds A as a consistent whole, each revision being only a partial
503 A. It succeeds A as a consistent whole, each revision being only a partial
501 replacement. The successors set contains non-obsolete changesets only.
504 replacement. The successors set contains non-obsolete changesets only.
502
505
503 This function returns the full list of successor sets which is why it
506 This function returns the full list of successor sets which is why it
504 returns a list of tuples and not just a single tuple. Each tuple is a valid
507 returns a list of tuples and not just a single tuple. Each tuple is a valid
505 successors set. Not that (A,) may be a valid successors set for changeset A
508 successors set. Not that (A,) may be a valid successors set for changeset A
506 (see below).
509 (see below).
507
510
508 In most cases, a changeset A will have a single element (e.g. the changeset
511 In most cases, a changeset A will have a single element (e.g. the changeset
509 A is replaced by A') in its successors set. Though, it is also common for a
512 A is replaced by A') in its successors set. Though, it is also common for a
510 changeset A to have no elements in its successor set (e.g. the changeset
513 changeset A to have no elements in its successor set (e.g. the changeset
511 has been pruned). Therefore, the returned list of successors sets will be
514 has been pruned). Therefore, the returned list of successors sets will be
512 [(A',)] or [], respectively.
515 [(A',)] or [], respectively.
513
516
514 When a changeset A is split into A' and B', however, it will result in a
517 When a changeset A is split into A' and B', however, it will result in a
515 successors set containing more than a single element, i.e. [(A',B')].
518 successors set containing more than a single element, i.e. [(A',B')].
516 Divergent changesets will result in multiple successors sets, i.e. [(A',),
519 Divergent changesets will result in multiple successors sets, i.e. [(A',),
517 (A'')].
520 (A'')].
518
521
519 If a changeset A is not obsolete, then it will conceptually have no
522 If a changeset A is not obsolete, then it will conceptually have no
520 successors set. To distinguish this from a pruned changeset, the successor
523 successors set. To distinguish this from a pruned changeset, the successor
521 set will only contain itself, i.e. [(A,)].
524 set will only contain itself, i.e. [(A,)].
522
525
523 Finally, successors unknown locally are considered to be pruned (obsoleted
526 Finally, successors unknown locally are considered to be pruned (obsoleted
524 without any successors).
527 without any successors).
525
528
526 The optional `cache` parameter is a dictionary that may contain precomputed
529 The optional `cache` parameter is a dictionary that may contain precomputed
527 successors sets. It is meant to reuse the computation of a previous call to
530 successors sets. It is meant to reuse the computation of a previous call to
528 `successorssets` when multiple calls are made at the same time. The cache
531 `successorssets` when multiple calls are made at the same time. The cache
529 dictionary is updated in place. The caller is responsible for its live
532 dictionary is updated in place. The caller is responsible for its live
530 spawn. Code that makes multiple calls to `successorssets` *must* use this
533 spawn. Code that makes multiple calls to `successorssets` *must* use this
531 cache mechanism or suffer terrible performances.
534 cache mechanism or suffer terrible performances.
532
535
533 """
536 """
534
537
535 succmarkers = repo.obsstore.successors
538 succmarkers = repo.obsstore.successors
536
539
537 # Stack of nodes we search successors sets for
540 # Stack of nodes we search successors sets for
538 toproceed = [initialnode]
541 toproceed = [initialnode]
539 # set version of above list for fast loop detection
542 # set version of above list for fast loop detection
540 # element added to "toproceed" must be added here
543 # element added to "toproceed" must be added here
541 stackedset = set(toproceed)
544 stackedset = set(toproceed)
542 if cache is None:
545 if cache is None:
543 cache = {}
546 cache = {}
544
547
545 # This while loop is the flattened version of a recursive search for
548 # This while loop is the flattened version of a recursive search for
546 # successors sets
549 # successors sets
547 #
550 #
548 # def successorssets(x):
551 # def successorssets(x):
549 # successors = directsuccessors(x)
552 # successors = directsuccessors(x)
550 # ss = [[]]
553 # ss = [[]]
551 # for succ in directsuccessors(x):
554 # for succ in directsuccessors(x):
552 # # product as in itertools cartesian product
555 # # product as in itertools cartesian product
553 # ss = product(ss, successorssets(succ))
556 # ss = product(ss, successorssets(succ))
554 # return ss
557 # return ss
555 #
558 #
556 # But we can not use plain recursive calls here:
559 # But we can not use plain recursive calls here:
557 # - that would blow the python call stack
560 # - that would blow the python call stack
558 # - obsolescence markers may have cycles, we need to handle them.
561 # - obsolescence markers may have cycles, we need to handle them.
559 #
562 #
560 # The `toproceed` list act as our call stack. Every node we search
563 # The `toproceed` list act as our call stack. Every node we search
561 # successors set for are stacked there.
564 # successors set for are stacked there.
562 #
565 #
563 # The `stackedset` is set version of this stack used to check if a node is
566 # The `stackedset` is set version of this stack used to check if a node is
564 # already stacked. This check is used to detect cycles and prevent infinite
567 # already stacked. This check is used to detect cycles and prevent infinite
565 # loop.
568 # loop.
566 #
569 #
567 # successors set of all nodes are stored in the `cache` dictionary.
570 # successors set of all nodes are stored in the `cache` dictionary.
568 #
571 #
569 # After this while loop ends we use the cache to return the successors sets
572 # After this while loop ends we use the cache to return the successors sets
570 # for the node requested by the caller.
573 # for the node requested by the caller.
571 while toproceed:
574 while toproceed:
572 # Every iteration tries to compute the successors sets of the topmost
575 # Every iteration tries to compute the successors sets of the topmost
573 # node of the stack: CURRENT.
576 # node of the stack: CURRENT.
574 #
577 #
575 # There are four possible outcomes:
578 # There are four possible outcomes:
576 #
579 #
577 # 1) We already know the successors sets of CURRENT:
580 # 1) We already know the successors sets of CURRENT:
578 # -> mission accomplished, pop it from the stack.
581 # -> mission accomplished, pop it from the stack.
579 # 2) Node is not obsolete:
582 # 2) Node is not obsolete:
580 # -> the node is its own successors sets. Add it to the cache.
583 # -> the node is its own successors sets. Add it to the cache.
581 # 3) We do not know successors set of direct successors of CURRENT:
584 # 3) We do not know successors set of direct successors of CURRENT:
582 # -> We add those successors to the stack.
585 # -> We add those successors to the stack.
583 # 4) We know successors sets of all direct successors of CURRENT:
586 # 4) We know successors sets of all direct successors of CURRENT:
584 # -> We can compute CURRENT successors set and add it to the
587 # -> We can compute CURRENT successors set and add it to the
585 # cache.
588 # cache.
586 #
589 #
587 current = toproceed[-1]
590 current = toproceed[-1]
588 if current in cache:
591 if current in cache:
589 # case (1): We already know the successors sets
592 # case (1): We already know the successors sets
590 stackedset.remove(toproceed.pop())
593 stackedset.remove(toproceed.pop())
591 elif current not in succmarkers:
594 elif current not in succmarkers:
592 # case (2): The node is not obsolete.
595 # case (2): The node is not obsolete.
593 if current in repo:
596 if current in repo:
594 # We have a valid last successors.
597 # We have a valid last successors.
595 cache[current] = [(current,)]
598 cache[current] = [(current,)]
596 else:
599 else:
597 # Final obsolete version is unknown locally.
600 # Final obsolete version is unknown locally.
598 # Do not count that as a valid successors
601 # Do not count that as a valid successors
599 cache[current] = []
602 cache[current] = []
600 else:
603 else:
601 # cases (3) and (4)
604 # cases (3) and (4)
602 #
605 #
603 # We proceed in two phases. Phase 1 aims to distinguish case (3)
606 # We proceed in two phases. Phase 1 aims to distinguish case (3)
604 # from case (4):
607 # from case (4):
605 #
608 #
606 # For each direct successors of CURRENT, we check whether its
609 # For each direct successors of CURRENT, we check whether its
607 # successors sets are known. If they are not, we stack the
610 # successors sets are known. If they are not, we stack the
608 # unknown node and proceed to the next iteration of the while
611 # unknown node and proceed to the next iteration of the while
609 # loop. (case 3)
612 # loop. (case 3)
610 #
613 #
611 # During this step, we may detect obsolescence cycles: a node
614 # During this step, we may detect obsolescence cycles: a node
612 # with unknown successors sets but already in the call stack.
615 # with unknown successors sets but already in the call stack.
613 # In such a situation, we arbitrary set the successors sets of
616 # In such a situation, we arbitrary set the successors sets of
614 # the node to nothing (node pruned) to break the cycle.
617 # the node to nothing (node pruned) to break the cycle.
615 #
618 #
616 # If no break was encountered we proceed to phase 2.
619 # If no break was encountered we proceed to phase 2.
617 #
620 #
618 # Phase 2 computes successors sets of CURRENT (case 4); see details
621 # Phase 2 computes successors sets of CURRENT (case 4); see details
619 # in phase 2 itself.
622 # in phase 2 itself.
620 #
623 #
621 # Note the two levels of iteration in each phase.
624 # Note the two levels of iteration in each phase.
622 # - The first one handles obsolescence markers using CURRENT as
625 # - The first one handles obsolescence markers using CURRENT as
623 # precursor (successors markers of CURRENT).
626 # precursor (successors markers of CURRENT).
624 #
627 #
625 # Having multiple entry here means divergence.
628 # Having multiple entry here means divergence.
626 #
629 #
627 # - The second one handles successors defined in each marker.
630 # - The second one handles successors defined in each marker.
628 #
631 #
629 # Having none means pruned node, multiple successors means split,
632 # Having none means pruned node, multiple successors means split,
630 # single successors are standard replacement.
633 # single successors are standard replacement.
631 #
634 #
632 for mark in sorted(succmarkers[current]):
635 for mark in sorted(succmarkers[current]):
633 for suc in mark[1]:
636 for suc in mark[1]:
634 if suc not in cache:
637 if suc not in cache:
635 if suc in stackedset:
638 if suc in stackedset:
636 # cycle breaking
639 # cycle breaking
637 cache[suc] = []
640 cache[suc] = []
638 else:
641 else:
639 # case (3) If we have not computed successors sets
642 # case (3) If we have not computed successors sets
640 # of one of those successors we add it to the
643 # of one of those successors we add it to the
641 # `toproceed` stack and stop all work for this
644 # `toproceed` stack and stop all work for this
642 # iteration.
645 # iteration.
643 toproceed.append(suc)
646 toproceed.append(suc)
644 stackedset.add(suc)
647 stackedset.add(suc)
645 break
648 break
646 else:
649 else:
647 continue
650 continue
648 break
651 break
649 else:
652 else:
650 # case (4): we know all successors sets of all direct
653 # case (4): we know all successors sets of all direct
651 # successors
654 # successors
652 #
655 #
653 # Successors set contributed by each marker depends on the
656 # Successors set contributed by each marker depends on the
654 # successors sets of all its "successors" node.
657 # successors sets of all its "successors" node.
655 #
658 #
656 # Each different marker is a divergence in the obsolescence
659 # Each different marker is a divergence in the obsolescence
657 # history. It contributes successors sets distinct from other
660 # history. It contributes successors sets distinct from other
658 # markers.
661 # markers.
659 #
662 #
660 # Within a marker, a successor may have divergent successors
663 # Within a marker, a successor may have divergent successors
661 # sets. In such a case, the marker will contribute multiple
664 # sets. In such a case, the marker will contribute multiple
662 # divergent successors sets. If multiple successors have
665 # divergent successors sets. If multiple successors have
663 # divergent successors sets, a Cartesian product is used.
666 # divergent successors sets, a Cartesian product is used.
664 #
667 #
665 # At the end we post-process successors sets to remove
668 # At the end we post-process successors sets to remove
666 # duplicated entry and successors set that are strict subset of
669 # duplicated entry and successors set that are strict subset of
667 # another one.
670 # another one.
668 succssets = []
671 succssets = []
669 for mark in sorted(succmarkers[current]):
672 for mark in sorted(succmarkers[current]):
670 # successors sets contributed by this marker
673 # successors sets contributed by this marker
671 markss = [[]]
674 markss = [[]]
672 for suc in mark[1]:
675 for suc in mark[1]:
673 # cardinal product with previous successors
676 # cardinal product with previous successors
674 productresult = []
677 productresult = []
675 for prefix in markss:
678 for prefix in markss:
676 for suffix in cache[suc]:
679 for suffix in cache[suc]:
677 newss = list(prefix)
680 newss = list(prefix)
678 for part in suffix:
681 for part in suffix:
679 # do not duplicated entry in successors set
682 # do not duplicated entry in successors set
680 # first entry wins.
683 # first entry wins.
681 if part not in newss:
684 if part not in newss:
682 newss.append(part)
685 newss.append(part)
683 productresult.append(newss)
686 productresult.append(newss)
684 markss = productresult
687 markss = productresult
685 succssets.extend(markss)
688 succssets.extend(markss)
686 # remove duplicated and subset
689 # remove duplicated and subset
687 seen = []
690 seen = []
688 final = []
691 final = []
689 candidate = sorted(((set(s), s) for s in succssets if s),
692 candidate = sorted(((set(s), s) for s in succssets if s),
690 key=lambda x: len(x[1]), reverse=True)
693 key=lambda x: len(x[1]), reverse=True)
691 for setversion, listversion in candidate:
694 for setversion, listversion in candidate:
692 for seenset in seen:
695 for seenset in seen:
693 if setversion.issubset(seenset):
696 if setversion.issubset(seenset):
694 break
697 break
695 else:
698 else:
696 final.append(listversion)
699 final.append(listversion)
697 seen.append(setversion)
700 seen.append(setversion)
698 final.reverse() # put small successors set first
701 final.reverse() # put small successors set first
699 cache[current] = final
702 cache[current] = final
700 return cache[initialnode]
703 return cache[initialnode]
701
704
702 def _knownrevs(repo, nodes):
705 def _knownrevs(repo, nodes):
703 """yield revision numbers of known nodes passed in parameters
706 """yield revision numbers of known nodes passed in parameters
704
707
705 Unknown revisions are silently ignored."""
708 Unknown revisions are silently ignored."""
706 torev = repo.changelog.nodemap.get
709 torev = repo.changelog.nodemap.get
707 for n in nodes:
710 for n in nodes:
708 rev = torev(n)
711 rev = torev(n)
709 if rev is not None:
712 if rev is not None:
710 yield rev
713 yield rev
711
714
712 # mapping of 'set-name' -> <function to compute this set>
715 # mapping of 'set-name' -> <function to compute this set>
713 cachefuncs = {}
716 cachefuncs = {}
714 def cachefor(name):
717 def cachefor(name):
715 """Decorator to register a function as computing the cache for a set"""
718 """Decorator to register a function as computing the cache for a set"""
716 def decorator(func):
719 def decorator(func):
717 assert name not in cachefuncs
720 assert name not in cachefuncs
718 cachefuncs[name] = func
721 cachefuncs[name] = func
719 return func
722 return func
720 return decorator
723 return decorator
721
724
722 def getrevs(repo, name):
725 def getrevs(repo, name):
723 """Return the set of revision that belong to the <name> set
726 """Return the set of revision that belong to the <name> set
724
727
725 Such access may compute the set and cache it for future use"""
728 Such access may compute the set and cache it for future use"""
726 repo = repo.unfiltered()
729 repo = repo.unfiltered()
727 if not repo.obsstore:
730 if not repo.obsstore:
728 return ()
731 return ()
729 if name not in repo.obsstore.caches:
732 if name not in repo.obsstore.caches:
730 repo.obsstore.caches[name] = cachefuncs[name](repo)
733 repo.obsstore.caches[name] = cachefuncs[name](repo)
731 return repo.obsstore.caches[name]
734 return repo.obsstore.caches[name]
732
735
733 # To be simple we need to invalidate obsolescence cache when:
736 # To be simple we need to invalidate obsolescence cache when:
734 #
737 #
735 # - new changeset is added:
738 # - new changeset is added:
736 # - public phase is changed
739 # - public phase is changed
737 # - obsolescence marker are added
740 # - obsolescence marker are added
738 # - strip is used a repo
741 # - strip is used a repo
739 def clearobscaches(repo):
742 def clearobscaches(repo):
740 """Remove all obsolescence related cache from a repo
743 """Remove all obsolescence related cache from a repo
741
744
742 This remove all cache in obsstore is the obsstore already exist on the
745 This remove all cache in obsstore is the obsstore already exist on the
743 repo.
746 repo.
744
747
745 (We could be smarter here given the exact event that trigger the cache
748 (We could be smarter here given the exact event that trigger the cache
746 clearing)"""
749 clearing)"""
747 # only clear cache is there is obsstore data in this repo
750 # only clear cache is there is obsstore data in this repo
748 if 'obsstore' in repo._filecache:
751 if 'obsstore' in repo._filecache:
749 repo.obsstore.caches.clear()
752 repo.obsstore.caches.clear()
750
753
751 @cachefor('obsolete')
754 @cachefor('obsolete')
752 def _computeobsoleteset(repo):
755 def _computeobsoleteset(repo):
753 """the set of obsolete revisions"""
756 """the set of obsolete revisions"""
754 obs = set()
757 obs = set()
755 getrev = repo.changelog.nodemap.get
758 getrev = repo.changelog.nodemap.get
756 getphase = repo._phasecache.phase
759 getphase = repo._phasecache.phase
757 for n in repo.obsstore.successors:
760 for n in repo.obsstore.successors:
758 rev = getrev(n)
761 rev = getrev(n)
759 if rev is not None and getphase(repo, rev):
762 if rev is not None and getphase(repo, rev):
760 obs.add(rev)
763 obs.add(rev)
761 return obs
764 return obs
762
765
763 @cachefor('unstable')
766 @cachefor('unstable')
764 def _computeunstableset(repo):
767 def _computeunstableset(repo):
765 """the set of non obsolete revisions with obsolete parents"""
768 """the set of non obsolete revisions with obsolete parents"""
766 # revset is not efficient enough here
769 # revset is not efficient enough here
767 # we do (obsolete()::) - obsolete() by hand
770 # we do (obsolete()::) - obsolete() by hand
768 obs = getrevs(repo, 'obsolete')
771 obs = getrevs(repo, 'obsolete')
769 if not obs:
772 if not obs:
770 return set()
773 return set()
771 cl = repo.changelog
774 cl = repo.changelog
772 return set(r for r in cl.descendants(obs) if r not in obs)
775 return set(r for r in cl.descendants(obs) if r not in obs)
773
776
774 @cachefor('suspended')
777 @cachefor('suspended')
775 def _computesuspendedset(repo):
778 def _computesuspendedset(repo):
776 """the set of obsolete parents with non obsolete descendants"""
779 """the set of obsolete parents with non obsolete descendants"""
777 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
780 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
778 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
781 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
779
782
780 @cachefor('extinct')
783 @cachefor('extinct')
781 def _computeextinctset(repo):
784 def _computeextinctset(repo):
782 """the set of obsolete parents without non obsolete descendants"""
785 """the set of obsolete parents without non obsolete descendants"""
783 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
786 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
784
787
785
788
786 @cachefor('bumped')
789 @cachefor('bumped')
787 def _computebumpedset(repo):
790 def _computebumpedset(repo):
788 """the set of revs trying to obsolete public revisions"""
791 """the set of revs trying to obsolete public revisions"""
789 bumped = set()
792 bumped = set()
790 # util function (avoid attribute lookup in the loop)
793 # util function (avoid attribute lookup in the loop)
791 phase = repo._phasecache.phase # would be faster to grab the full list
794 phase = repo._phasecache.phase # would be faster to grab the full list
792 public = phases.public
795 public = phases.public
793 cl = repo.changelog
796 cl = repo.changelog
794 torev = cl.nodemap.get
797 torev = cl.nodemap.get
795 obs = getrevs(repo, 'obsolete')
798 obs = getrevs(repo, 'obsolete')
796 for rev in repo:
799 for rev in repo:
797 # We only evaluate mutable, non-obsolete revision
800 # We only evaluate mutable, non-obsolete revision
798 if (public < phase(repo, rev)) and (rev not in obs):
801 if (public < phase(repo, rev)) and (rev not in obs):
799 node = cl.node(rev)
802 node = cl.node(rev)
800 # (future) A cache of precursors may worth if split is very common
803 # (future) A cache of precursors may worth if split is very common
801 for pnode in allprecursors(repo.obsstore, [node],
804 for pnode in allprecursors(repo.obsstore, [node],
802 ignoreflags=bumpedfix):
805 ignoreflags=bumpedfix):
803 prev = torev(pnode) # unfiltered! but so is phasecache
806 prev = torev(pnode) # unfiltered! but so is phasecache
804 if (prev is not None) and (phase(repo, prev) <= public):
807 if (prev is not None) and (phase(repo, prev) <= public):
805 # we have a public precursors
808 # we have a public precursors
806 bumped.add(rev)
809 bumped.add(rev)
807 break # Next draft!
810 break # Next draft!
808 return bumped
811 return bumped
809
812
810 @cachefor('divergent')
813 @cachefor('divergent')
811 def _computedivergentset(repo):
814 def _computedivergentset(repo):
812 """the set of rev that compete to be the final successors of some revision.
815 """the set of rev that compete to be the final successors of some revision.
813 """
816 """
814 divergent = set()
817 divergent = set()
815 obsstore = repo.obsstore
818 obsstore = repo.obsstore
816 newermap = {}
819 newermap = {}
817 for ctx in repo.set('(not public()) - obsolete()'):
820 for ctx in repo.set('(not public()) - obsolete()'):
818 mark = obsstore.precursors.get(ctx.node(), ())
821 mark = obsstore.precursors.get(ctx.node(), ())
819 toprocess = set(mark)
822 toprocess = set(mark)
820 while toprocess:
823 while toprocess:
821 prec = toprocess.pop()[0]
824 prec = toprocess.pop()[0]
822 if prec not in newermap:
825 if prec not in newermap:
823 successorssets(repo, prec, newermap)
826 successorssets(repo, prec, newermap)
824 newer = [n for n in newermap[prec] if n]
827 newer = [n for n in newermap[prec] if n]
825 if len(newer) > 1:
828 if len(newer) > 1:
826 divergent.add(ctx.rev())
829 divergent.add(ctx.rev())
827 break
830 break
828 toprocess.update(obsstore.precursors.get(prec, ()))
831 toprocess.update(obsstore.precursors.get(prec, ()))
829 return divergent
832 return divergent
830
833
831
834
832 def createmarkers(repo, relations, flag=0, metadata=None):
835 def createmarkers(repo, relations, flag=0, metadata=None):
833 """Add obsolete markers between changesets in a repo
836 """Add obsolete markers between changesets in a repo
834
837
835 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
838 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
836 tuple. `old` and `news` are changectx. metadata is an optional dictionary
839 tuple. `old` and `news` are changectx. metadata is an optional dictionary
837 containing metadata for this marker only. It is merged with the global
840 containing metadata for this marker only. It is merged with the global
838 metadata specified through the `metadata` argument of this function,
841 metadata specified through the `metadata` argument of this function,
839
842
840 Trying to obsolete a public changeset will raise an exception.
843 Trying to obsolete a public changeset will raise an exception.
841
844
842 Current user and date are used except if specified otherwise in the
845 Current user and date are used except if specified otherwise in the
843 metadata attribute.
846 metadata attribute.
844
847
845 This function operates within a transaction of its own, but does
848 This function operates within a transaction of its own, but does
846 not take any lock on the repo.
849 not take any lock on the repo.
847 """
850 """
848 # prepare metadata
851 # prepare metadata
849 if metadata is None:
852 if metadata is None:
850 metadata = {}
853 metadata = {}
851 if 'date' not in metadata:
854 if 'date' not in metadata:
852 metadata['date'] = '%i %i' % util.makedate()
855 metadata['date'] = '%i %i' % util.makedate()
853 if 'user' not in metadata:
856 if 'user' not in metadata:
854 metadata['user'] = repo.ui.username()
857 metadata['user'] = repo.ui.username()
855 tr = repo.transaction('add-obsolescence-marker')
858 tr = repo.transaction('add-obsolescence-marker')
856 try:
859 try:
857 for rel in relations:
860 for rel in relations:
858 prec = rel[0]
861 prec = rel[0]
859 sucs = rel[1]
862 sucs = rel[1]
860 localmetadata = metadata.copy()
863 localmetadata = metadata.copy()
861 if 2 < len(rel):
864 if 2 < len(rel):
862 localmetadata.update(rel[2])
865 localmetadata.update(rel[2])
863
866
864 if not prec.mutable():
867 if not prec.mutable():
865 raise util.Abort("cannot obsolete immutable changeset: %s"
868 raise util.Abort("cannot obsolete immutable changeset: %s"
866 % prec)
869 % prec)
867 nprec = prec.node()
870 nprec = prec.node()
868 nsucs = tuple(s.node() for s in sucs)
871 nsucs = tuple(s.node() for s in sucs)
869 if nprec in nsucs:
872 if nprec in nsucs:
870 raise util.Abort("changeset %s cannot obsolete itself" % prec)
873 raise util.Abort("changeset %s cannot obsolete itself" % prec)
871 repo.obsstore.create(tr, nprec, nsucs, flag,
874 repo.obsstore.create(tr, nprec, nsucs, flag,
872 metadata=localmetadata)
875 metadata=localmetadata)
873 repo.filteredrevcache.clear()
876 repo.filteredrevcache.clear()
874 tr.close()
877 tr.close()
875 finally:
878 finally:
876 tr.release()
879 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now