##// END OF EJS Templates
obsolete: add a high level function to create an obsolete marker...
Pierre-Yves David -
r17474:f85816af default
parent child Browse files
Show More
@@ -1,397 +1,433 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete markers handling
9 """Obsolete markers handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewriting operations, and help
17 transformations performed by history rewriting operations, and help
18 building new tools to reconciliate conflicting rewriting actions. To
18 building new tools to reconciliate conflicting rewriting actions. To
19 facilitate conflicts resolution, markers include various annotations
19 facilitate conflicts resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23
23
24 Format
24 Format
25 ------
25 ------
26
26
27 Markers are stored in an append-only file stored in
27 Markers are stored in an append-only file stored in
28 '.hg/store/obsstore'.
28 '.hg/store/obsstore'.
29
29
30 The file starts with a version header:
30 The file starts with a version header:
31
31
32 - 1 unsigned byte: version number, starting at zero.
32 - 1 unsigned byte: version number, starting at zero.
33
33
34
34
35 The header is followed by the markers. Each marker is made of:
35 The header is followed by the markers. Each marker is made of:
36
36
37 - 1 unsigned byte: number of new changesets "R", could be zero.
37 - 1 unsigned byte: number of new changesets "R", could be zero.
38
38
39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
40
40
41 - 1 byte: a bit field. It is reserved for flags used in obsolete
41 - 1 byte: a bit field. It is reserved for flags used in obsolete
42 markers common operations, to avoid repeated decoding of metadata
42 markers common operations, to avoid repeated decoding of metadata
43 entries.
43 entries.
44
44
45 - 20 bytes: obsoleted changeset identifier.
45 - 20 bytes: obsoleted changeset identifier.
46
46
47 - N*20 bytes: new changesets identifiers.
47 - N*20 bytes: new changesets identifiers.
48
48
49 - M bytes: metadata as a sequence of nul-terminated strings. Each
49 - M bytes: metadata as a sequence of nul-terminated strings. Each
50 string contains a key and a value, separated by a color ':', without
50 string contains a key and a value, separated by a color ':', without
51 additional encoding. Keys cannot contain '\0' or ':' and values
51 additional encoding. Keys cannot contain '\0' or ':' and values
52 cannot contain '\0'.
52 cannot contain '\0'.
53 """
53 """
54 import struct
54 import struct
55 import util, base85
55 import util, base85
56 from i18n import _
56 from i18n import _
57
57
58 _pack = struct.pack
58 _pack = struct.pack
59 _unpack = struct.unpack
59 _unpack = struct.unpack
60
60
61 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
61 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
62
62
63 # the obsolete feature is not mature enough to be enabled by default.
63 # the obsolete feature is not mature enough to be enabled by default.
64 # you have to rely on third party extension extension to enable this.
64 # you have to rely on third party extension extension to enable this.
65 _enabled = False
65 _enabled = False
66
66
67 # data used for parsing and writing
67 # data used for parsing and writing
68 _fmversion = 0
68 _fmversion = 0
69 _fmfixed = '>BIB20s'
69 _fmfixed = '>BIB20s'
70 _fmnode = '20s'
70 _fmnode = '20s'
71 _fmfsize = struct.calcsize(_fmfixed)
71 _fmfsize = struct.calcsize(_fmfixed)
72 _fnodesize = struct.calcsize(_fmnode)
72 _fnodesize = struct.calcsize(_fmnode)
73
73
74 def _readmarkers(data):
74 def _readmarkers(data):
75 """Read and enumerate markers from raw data"""
75 """Read and enumerate markers from raw data"""
76 off = 0
76 off = 0
77 diskversion = _unpack('>B', data[off:off + 1])[0]
77 diskversion = _unpack('>B', data[off:off + 1])[0]
78 off += 1
78 off += 1
79 if diskversion != _fmversion:
79 if diskversion != _fmversion:
80 raise util.Abort(_('parsing obsolete marker: unknown version %r')
80 raise util.Abort(_('parsing obsolete marker: unknown version %r')
81 % diskversion)
81 % diskversion)
82
82
83 # Loop on markers
83 # Loop on markers
84 l = len(data)
84 l = len(data)
85 while off + _fmfsize <= l:
85 while off + _fmfsize <= l:
86 # read fixed part
86 # read fixed part
87 cur = data[off:off + _fmfsize]
87 cur = data[off:off + _fmfsize]
88 off += _fmfsize
88 off += _fmfsize
89 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
89 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
90 # read replacement
90 # read replacement
91 sucs = ()
91 sucs = ()
92 if nbsuc:
92 if nbsuc:
93 s = (_fnodesize * nbsuc)
93 s = (_fnodesize * nbsuc)
94 cur = data[off:off + s]
94 cur = data[off:off + s]
95 sucs = _unpack(_fmnode * nbsuc, cur)
95 sucs = _unpack(_fmnode * nbsuc, cur)
96 off += s
96 off += s
97 # read metadata
97 # read metadata
98 # (metadata will be decoded on demand)
98 # (metadata will be decoded on demand)
99 metadata = data[off:off + mdsize]
99 metadata = data[off:off + mdsize]
100 if len(metadata) != mdsize:
100 if len(metadata) != mdsize:
101 raise util.Abort(_('parsing obsolete marker: metadata is too '
101 raise util.Abort(_('parsing obsolete marker: metadata is too '
102 'short, %d bytes expected, got %d')
102 'short, %d bytes expected, got %d')
103 % (mdsize, len(metadata)))
103 % (mdsize, len(metadata)))
104 off += mdsize
104 off += mdsize
105 yield (pre, sucs, flags, metadata)
105 yield (pre, sucs, flags, metadata)
106
106
107 def encodemeta(meta):
107 def encodemeta(meta):
108 """Return encoded metadata string to string mapping.
108 """Return encoded metadata string to string mapping.
109
109
110 Assume no ':' in key and no '\0' in both key and value."""
110 Assume no ':' in key and no '\0' in both key and value."""
111 for key, value in meta.iteritems():
111 for key, value in meta.iteritems():
112 if ':' in key or '\0' in key:
112 if ':' in key or '\0' in key:
113 raise ValueError("':' and '\0' are forbidden in metadata key'")
113 raise ValueError("':' and '\0' are forbidden in metadata key'")
114 if '\0' in value:
114 if '\0' in value:
115 raise ValueError("':' are forbidden in metadata value'")
115 raise ValueError("':' are forbidden in metadata value'")
116 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
116 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
117
117
118 def decodemeta(data):
118 def decodemeta(data):
119 """Return string to string dictionary from encoded version."""
119 """Return string to string dictionary from encoded version."""
120 d = {}
120 d = {}
121 for l in data.split('\0'):
121 for l in data.split('\0'):
122 if l:
122 if l:
123 key, value = l.split(':')
123 key, value = l.split(':')
124 d[key] = value
124 d[key] = value
125 return d
125 return d
126
126
127 class marker(object):
127 class marker(object):
128 """Wrap obsolete marker raw data"""
128 """Wrap obsolete marker raw data"""
129
129
130 def __init__(self, repo, data):
130 def __init__(self, repo, data):
131 # the repo argument will be used to create changectx in later version
131 # the repo argument will be used to create changectx in later version
132 self._repo = repo
132 self._repo = repo
133 self._data = data
133 self._data = data
134 self._decodedmeta = None
134 self._decodedmeta = None
135
135
136 def precnode(self):
136 def precnode(self):
137 """Precursor changeset node identifier"""
137 """Precursor changeset node identifier"""
138 return self._data[0]
138 return self._data[0]
139
139
140 def succnodes(self):
140 def succnodes(self):
141 """List of successor changesets node identifiers"""
141 """List of successor changesets node identifiers"""
142 return self._data[1]
142 return self._data[1]
143
143
144 def metadata(self):
144 def metadata(self):
145 """Decoded metadata dictionary"""
145 """Decoded metadata dictionary"""
146 if self._decodedmeta is None:
146 if self._decodedmeta is None:
147 self._decodedmeta = decodemeta(self._data[3])
147 self._decodedmeta = decodemeta(self._data[3])
148 return self._decodedmeta
148 return self._decodedmeta
149
149
150 def date(self):
150 def date(self):
151 """Creation date as (unixtime, offset)"""
151 """Creation date as (unixtime, offset)"""
152 parts = self.metadata()['date'].split(' ')
152 parts = self.metadata()['date'].split(' ')
153 return (float(parts[0]), int(parts[1]))
153 return (float(parts[0]), int(parts[1]))
154
154
155 class obsstore(object):
155 class obsstore(object):
156 """Store obsolete markers
156 """Store obsolete markers
157
157
158 Markers can be accessed with two mappings:
158 Markers can be accessed with two mappings:
159 - precursors: old -> set(new)
159 - precursors: old -> set(new)
160 - successors: new -> set(old)
160 - successors: new -> set(old)
161 """
161 """
162
162
163 def __init__(self, sopener):
163 def __init__(self, sopener):
164 # caches for various obsolescence related cache
164 # caches for various obsolescence related cache
165 self.caches = {}
165 self.caches = {}
166 self._all = []
166 self._all = []
167 # new markers to serialize
167 # new markers to serialize
168 self.precursors = {}
168 self.precursors = {}
169 self.successors = {}
169 self.successors = {}
170 self.sopener = sopener
170 self.sopener = sopener
171 data = sopener.tryread('obsstore')
171 data = sopener.tryread('obsstore')
172 if data:
172 if data:
173 self._load(_readmarkers(data))
173 self._load(_readmarkers(data))
174
174
175 def __iter__(self):
175 def __iter__(self):
176 return iter(self._all)
176 return iter(self._all)
177
177
178 def __nonzero__(self):
178 def __nonzero__(self):
179 return bool(self._all)
179 return bool(self._all)
180
180
181 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
181 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
182 """obsolete: add a new obsolete marker
182 """obsolete: add a new obsolete marker
183
183
184 * ensuring it is hashable
184 * ensuring it is hashable
185 * check mandatory metadata
185 * check mandatory metadata
186 * encode metadata
186 * encode metadata
187 """
187 """
188 if metadata is None:
188 if metadata is None:
189 metadata = {}
189 metadata = {}
190 if len(prec) != 20:
190 if len(prec) != 20:
191 raise ValueError(prec)
191 raise ValueError(prec)
192 for succ in succs:
192 for succ in succs:
193 if len(succ) != 20:
193 if len(succ) != 20:
194 raise ValueError(succ)
194 raise ValueError(succ)
195 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
195 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
196 self.add(transaction, [marker])
196 self.add(transaction, [marker])
197
197
198 def add(self, transaction, markers):
198 def add(self, transaction, markers):
199 """Add new markers to the store
199 """Add new markers to the store
200
200
201 Take care of filtering duplicate.
201 Take care of filtering duplicate.
202 Return the number of new marker."""
202 Return the number of new marker."""
203 if not _enabled:
203 if not _enabled:
204 raise util.Abort('obsolete feature is not enabled on this repo')
204 raise util.Abort('obsolete feature is not enabled on this repo')
205 new = [m for m in markers if m not in self._all]
205 new = [m for m in markers if m not in self._all]
206 if new:
206 if new:
207 f = self.sopener('obsstore', 'ab')
207 f = self.sopener('obsstore', 'ab')
208 try:
208 try:
209 # Whether the file's current position is at the begin or at
209 # Whether the file's current position is at the begin or at
210 # the end after opening a file for appending is implementation
210 # the end after opening a file for appending is implementation
211 # defined. So we must seek to the end before calling tell(),
211 # defined. So we must seek to the end before calling tell(),
212 # or we may get a zero offset for non-zero sized files on
212 # or we may get a zero offset for non-zero sized files on
213 # some platforms (issue3543).
213 # some platforms (issue3543).
214 f.seek(0, _SEEK_END)
214 f.seek(0, _SEEK_END)
215 offset = f.tell()
215 offset = f.tell()
216 transaction.add('obsstore', offset)
216 transaction.add('obsstore', offset)
217 # offset == 0: new file - add the version header
217 # offset == 0: new file - add the version header
218 for bytes in _encodemarkers(new, offset == 0):
218 for bytes in _encodemarkers(new, offset == 0):
219 f.write(bytes)
219 f.write(bytes)
220 finally:
220 finally:
221 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
221 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
222 # call 'filecacheentry.refresh()' here
222 # call 'filecacheentry.refresh()' here
223 f.close()
223 f.close()
224 self._load(new)
224 self._load(new)
225 # new marker *may* have changed several set. invalidate the cache.
225 # new marker *may* have changed several set. invalidate the cache.
226 self.caches.clear()
226 self.caches.clear()
227 return len(new)
227 return len(new)
228
228
229 def mergemarkers(self, transation, data):
229 def mergemarkers(self, transation, data):
230 markers = _readmarkers(data)
230 markers = _readmarkers(data)
231 self.add(transation, markers)
231 self.add(transation, markers)
232
232
233 def _load(self, markers):
233 def _load(self, markers):
234 for mark in markers:
234 for mark in markers:
235 self._all.append(mark)
235 self._all.append(mark)
236 pre, sucs = mark[:2]
236 pre, sucs = mark[:2]
237 self.precursors.setdefault(pre, set()).add(mark)
237 self.precursors.setdefault(pre, set()).add(mark)
238 for suc in sucs:
238 for suc in sucs:
239 self.successors.setdefault(suc, set()).add(mark)
239 self.successors.setdefault(suc, set()).add(mark)
240
240
241 def _encodemarkers(markers, addheader=False):
241 def _encodemarkers(markers, addheader=False):
242 # Kept separate from flushmarkers(), it will be reused for
242 # Kept separate from flushmarkers(), it will be reused for
243 # markers exchange.
243 # markers exchange.
244 if addheader:
244 if addheader:
245 yield _pack('>B', _fmversion)
245 yield _pack('>B', _fmversion)
246 for marker in markers:
246 for marker in markers:
247 yield _encodeonemarker(marker)
247 yield _encodeonemarker(marker)
248
248
249
249
250 def _encodeonemarker(marker):
250 def _encodeonemarker(marker):
251 pre, sucs, flags, metadata = marker
251 pre, sucs, flags, metadata = marker
252 nbsuc = len(sucs)
252 nbsuc = len(sucs)
253 format = _fmfixed + (_fmnode * nbsuc)
253 format = _fmfixed + (_fmnode * nbsuc)
254 data = [nbsuc, len(metadata), flags, pre]
254 data = [nbsuc, len(metadata), flags, pre]
255 data.extend(sucs)
255 data.extend(sucs)
256 return _pack(format, *data) + metadata
256 return _pack(format, *data) + metadata
257
257
258 # arbitrary picked to fit into 8K limit from HTTP server
258 # arbitrary picked to fit into 8K limit from HTTP server
259 # you have to take in account:
259 # you have to take in account:
260 # - the version header
260 # - the version header
261 # - the base85 encoding
261 # - the base85 encoding
262 _maxpayload = 5300
262 _maxpayload = 5300
263
263
264 def listmarkers(repo):
264 def listmarkers(repo):
265 """List markers over pushkey"""
265 """List markers over pushkey"""
266 if not repo.obsstore:
266 if not repo.obsstore:
267 return {}
267 return {}
268 keys = {}
268 keys = {}
269 parts = []
269 parts = []
270 currentlen = _maxpayload * 2 # ensure we create a new part
270 currentlen = _maxpayload * 2 # ensure we create a new part
271 for marker in repo.obsstore:
271 for marker in repo.obsstore:
272 nextdata = _encodeonemarker(marker)
272 nextdata = _encodeonemarker(marker)
273 if (len(nextdata) + currentlen > _maxpayload):
273 if (len(nextdata) + currentlen > _maxpayload):
274 currentpart = []
274 currentpart = []
275 currentlen = 0
275 currentlen = 0
276 parts.append(currentpart)
276 parts.append(currentpart)
277 currentpart.append(nextdata)
277 currentpart.append(nextdata)
278 currentlen += len(nextdata)
278 currentlen += len(nextdata)
279 for idx, part in enumerate(reversed(parts)):
279 for idx, part in enumerate(reversed(parts)):
280 data = ''.join([_pack('>B', _fmversion)] + part)
280 data = ''.join([_pack('>B', _fmversion)] + part)
281 keys['dump%i' % idx] = base85.b85encode(data)
281 keys['dump%i' % idx] = base85.b85encode(data)
282 return keys
282 return keys
283
283
284 def pushmarker(repo, key, old, new):
284 def pushmarker(repo, key, old, new):
285 """Push markers over pushkey"""
285 """Push markers over pushkey"""
286 if not key.startswith('dump'):
286 if not key.startswith('dump'):
287 repo.ui.warn(_('unknown key: %r') % key)
287 repo.ui.warn(_('unknown key: %r') % key)
288 return 0
288 return 0
289 if old:
289 if old:
290 repo.ui.warn(_('unexpected old value') % key)
290 repo.ui.warn(_('unexpected old value') % key)
291 return 0
291 return 0
292 data = base85.b85decode(new)
292 data = base85.b85decode(new)
293 lock = repo.lock()
293 lock = repo.lock()
294 try:
294 try:
295 tr = repo.transaction('pushkey: obsolete markers')
295 tr = repo.transaction('pushkey: obsolete markers')
296 try:
296 try:
297 repo.obsstore.mergemarkers(tr, data)
297 repo.obsstore.mergemarkers(tr, data)
298 tr.close()
298 tr.close()
299 return 1
299 return 1
300 finally:
300 finally:
301 tr.release()
301 tr.release()
302 finally:
302 finally:
303 lock.release()
303 lock.release()
304
304
305 def allmarkers(repo):
305 def allmarkers(repo):
306 """all obsolete markers known in a repository"""
306 """all obsolete markers known in a repository"""
307 for markerdata in repo.obsstore:
307 for markerdata in repo.obsstore:
308 yield marker(repo, markerdata)
308 yield marker(repo, markerdata)
309
309
310 def precursormarkers(ctx):
310 def precursormarkers(ctx):
311 """obsolete marker making this changeset obsolete"""
311 """obsolete marker making this changeset obsolete"""
312 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
312 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
313 yield marker(ctx._repo, data)
313 yield marker(ctx._repo, data)
314
314
315 def successormarkers(ctx):
315 def successormarkers(ctx):
316 """obsolete marker marking this changeset as a successors"""
316 """obsolete marker marking this changeset as a successors"""
317 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
317 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
318 yield marker(ctx._repo, data)
318 yield marker(ctx._repo, data)
319
319
320 def anysuccessors(obsstore, node):
320 def anysuccessors(obsstore, node):
321 """Yield every successor of <node>
321 """Yield every successor of <node>
322
322
323 This is a linear yield unsuitable to detect split changesets."""
323 This is a linear yield unsuitable to detect split changesets."""
324 remaining = set([node])
324 remaining = set([node])
325 seen = set(remaining)
325 seen = set(remaining)
326 while remaining:
326 while remaining:
327 current = remaining.pop()
327 current = remaining.pop()
328 yield current
328 yield current
329 for mark in obsstore.precursors.get(current, ()):
329 for mark in obsstore.precursors.get(current, ()):
330 for suc in mark[1]:
330 for suc in mark[1]:
331 if suc not in seen:
331 if suc not in seen:
332 seen.add(suc)
332 seen.add(suc)
333 remaining.add(suc)
333 remaining.add(suc)
334
334
335 # mapping of 'set-name' -> <function to computer this set>
335 # mapping of 'set-name' -> <function to computer this set>
336 cachefuncs = {}
336 cachefuncs = {}
337 def cachefor(name):
337 def cachefor(name):
338 """Decorator to register a function as computing the cache for a set"""
338 """Decorator to register a function as computing the cache for a set"""
339 def decorator(func):
339 def decorator(func):
340 assert name not in cachefuncs
340 assert name not in cachefuncs
341 cachefuncs[name] = func
341 cachefuncs[name] = func
342 return func
342 return func
343 return decorator
343 return decorator
344
344
345 def getobscache(repo, name):
345 def getobscache(repo, name):
346 """Return the set of revision that belong to the <name> set
346 """Return the set of revision that belong to the <name> set
347
347
348 Such access may compute the set and cache it for future use"""
348 Such access may compute the set and cache it for future use"""
349 if not repo.obsstore:
349 if not repo.obsstore:
350 return ()
350 return ()
351 if name not in repo.obsstore.caches:
351 if name not in repo.obsstore.caches:
352 repo.obsstore.caches[name] = cachefuncs[name](repo)
352 repo.obsstore.caches[name] = cachefuncs[name](repo)
353 return repo.obsstore.caches[name]
353 return repo.obsstore.caches[name]
354
354
355 # To be simple we need to invalidate obsolescence cache when:
355 # To be simple we need to invalidate obsolescence cache when:
356 #
356 #
357 # - new changeset is added:
357 # - new changeset is added:
358 # - public phase is changed
358 # - public phase is changed
359 # - obsolescence marker are added
359 # - obsolescence marker are added
360 # - strip is used a repo
360 # - strip is used a repo
361 def clearobscaches(repo):
361 def clearobscaches(repo):
362 """Remove all obsolescence related cache from a repo
362 """Remove all obsolescence related cache from a repo
363
363
364 This remove all cache in obsstore is the obsstore already exist on the
364 This remove all cache in obsstore is the obsstore already exist on the
365 repo.
365 repo.
366
366
367 (We could be smarter here given the exact event that trigger the cache
367 (We could be smarter here given the exact event that trigger the cache
368 clearing)"""
368 clearing)"""
369 # only clear cache is there is obsstore data in this repo
369 # only clear cache is there is obsstore data in this repo
370 if 'obsstore' in repo._filecache:
370 if 'obsstore' in repo._filecache:
371 repo.obsstore.caches.clear()
371 repo.obsstore.caches.clear()
372
372
373 @cachefor('obsolete')
373 @cachefor('obsolete')
374 def _computeobsoleteset(repo):
374 def _computeobsoleteset(repo):
375 """the set of obsolete revisions"""
375 """the set of obsolete revisions"""
376 obs = set()
376 obs = set()
377 nm = repo.changelog.nodemap
377 nm = repo.changelog.nodemap
378 for prec in repo.obsstore.precursors:
378 for prec in repo.obsstore.precursors:
379 rev = nm.get(prec)
379 rev = nm.get(prec)
380 if rev is not None:
380 if rev is not None:
381 obs.add(rev)
381 obs.add(rev)
382 return set(repo.revs('%ld - public()', obs))
382 return set(repo.revs('%ld - public()', obs))
383
383
384 @cachefor('unstable')
384 @cachefor('unstable')
385 def _computeunstableset(repo):
385 def _computeunstableset(repo):
386 """the set of non obsolete revisions with obsolete parents"""
386 """the set of non obsolete revisions with obsolete parents"""
387 return set(repo.revs('(obsolete()::) - obsolete()'))
387 return set(repo.revs('(obsolete()::) - obsolete()'))
388
388
389 @cachefor('suspended')
389 @cachefor('suspended')
390 def _computesuspendedset(repo):
390 def _computesuspendedset(repo):
391 """the set of obsolete parents with non obsolete descendants"""
391 """the set of obsolete parents with non obsolete descendants"""
392 return set(repo.revs('obsolete() and obsolete()::unstable()'))
392 return set(repo.revs('obsolete() and obsolete()::unstable()'))
393
393
394 @cachefor('extinct')
394 @cachefor('extinct')
395 def _computeextinctset(repo):
395 def _computeextinctset(repo):
396 """the set of obsolete parents without non obsolete descendants"""
396 """the set of obsolete parents without non obsolete descendants"""
397 return set(repo.revs('obsolete() - obsolete()::unstable()'))
397 return set(repo.revs('obsolete() - obsolete()::unstable()'))
398
399 def createmarkers(repo, relations, flag=0, metadata=None):
400 """Add obsolete markers between changesets in a repo
401
402 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
403 `old` and `news` are changectx.
404
405 Trying to obsolete a public changeset will raise an exception.
406
407 Current user and date are used except if specified otherwise in the
408 metadata attribute.
409
410 This function operates within a transaction of its own, but does
411 not take any lock on the repo.
412 """
413 # prepare metadata
414 if metadata is None:
415 metadata = {}
416 if 'date' not in metadata:
417 metadata['date'] = '%i %i' % util.makedate()
418 if 'user' not in metadata:
419 metadata['user'] = repo.ui.username()
420 tr = repo.transaction('add-obsolescence-marker')
421 try:
422 for prec, sucs in relations:
423 if not prec.mutable():
424 raise util.Abort("cannot obsolete immutable changeset: %s"
425 % prec)
426 nprec = prec.node()
427 nsucs = tuple(s.node() for s in sucs)
428 if nprec in nsucs:
429 raise util.Abort("changeset %s cannot obsolete itself" % prec)
430 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
431 tr.close()
432 finally:
433 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now