##// END OF EJS Templates
pull: move obsolescence marker exchange in the exchange module...
Pierre-Yves David -
r20476:1180c6ec default
parent child Browse files
Show More
@@ -1,484 +1,506 b''
1 1 # exchange.py - utily to exchange data between repo.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno
11 import util, scmutil, changegroup
11 import util, scmutil, changegroup, base85
12 12 import discovery, phases, obsolete, bookmarks
13 13
14 14
15 15 class pushoperation(object):
16 16 """A object that represent a single push operation
17 17
18 18 It purpose is to carry push related state and very common operation.
19 19
20 20 A new should be created at the begining of each push and discarded
21 21 afterward.
22 22 """
23 23
24 24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 25 # repo we push from
26 26 self.repo = repo
27 27 self.ui = repo.ui
28 28 # repo we push to
29 29 self.remote = remote
30 30 # force option provided
31 31 self.force = force
32 32 # revs to be pushed (None is "all")
33 33 self.revs = revs
34 34 # allow push of new branch
35 35 self.newbranch = newbranch
36 36 # did a local lock get acquired?
37 37 self.locallocked = None
38 38 # Integer version of the push result
39 39 # - None means nothing to push
40 40 # - 0 means HTTP error
41 41 # - 1 means we pushed and remote head count is unchanged *or*
42 42 # we have outgoing changesets but refused to push
43 43 # - other values as described by addchangegroup()
44 44 self.ret = None
45 45 # discover.outgoing object (contains common and outgoin data)
46 46 self.outgoing = None
47 47 # all remote heads before the push
48 48 self.remoteheads = None
49 49 # testable as a boolean indicating if any nodes are missing locally.
50 50 self.incoming = None
51 51 # set of all heads common after changeset bundle push
52 52 self.commonheads = None
53 53
54 54 def push(repo, remote, force=False, revs=None, newbranch=False):
55 55 '''Push outgoing changesets (limited by revs) from a local
56 56 repository to remote. Return an integer:
57 57 - None means nothing to push
58 58 - 0 means HTTP error
59 59 - 1 means we pushed and remote head count is unchanged *or*
60 60 we have outgoing changesets but refused to push
61 61 - other values as described by addchangegroup()
62 62 '''
63 63 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 64 if pushop.remote.local():
65 65 missing = (set(pushop.repo.requirements)
66 66 - pushop.remote.local().supported)
67 67 if missing:
68 68 msg = _("required features are not"
69 69 " supported in the destination:"
70 70 " %s") % (', '.join(sorted(missing)))
71 71 raise util.Abort(msg)
72 72
73 73 # there are two ways to push to remote repo:
74 74 #
75 75 # addchangegroup assumes local user can lock remote
76 76 # repo (local filesystem, old ssh servers).
77 77 #
78 78 # unbundle assumes local user cannot lock remote repo (new ssh
79 79 # servers, http servers).
80 80
81 81 if not pushop.remote.canpush():
82 82 raise util.Abort(_("destination does not support push"))
83 83 # get local lock as we might write phase data
84 84 locallock = None
85 85 try:
86 86 locallock = pushop.repo.lock()
87 87 pushop.locallocked = True
88 88 except IOError, err:
89 89 pushop.locallocked = False
90 90 if err.errno != errno.EACCES:
91 91 raise
92 92 # source repo cannot be locked.
93 93 # We do not abort the push, but just disable the local phase
94 94 # synchronisation.
95 95 msg = 'cannot lock source repository: %s\n' % err
96 96 pushop.ui.debug(msg)
97 97 try:
98 98 pushop.repo.checkpush(pushop.force, pushop.revs)
99 99 lock = None
100 100 unbundle = pushop.remote.capable('unbundle')
101 101 if not unbundle:
102 102 lock = pushop.remote.lock()
103 103 try:
104 104 _pushdiscovery(pushop)
105 105 if _pushcheckoutgoing(pushop):
106 106 _pushchangeset(pushop)
107 107 _pushcomputecommonheads(pushop)
108 108 _pushsyncphase(pushop)
109 109 _pushobsolete(pushop)
110 110 finally:
111 111 if lock is not None:
112 112 lock.release()
113 113 finally:
114 114 if locallock is not None:
115 115 locallock.release()
116 116
117 117 _pushbookmark(pushop)
118 118 return pushop.ret
119 119
120 120 def _pushdiscovery(pushop):
121 121 # discovery
122 122 unfi = pushop.repo.unfiltered()
123 123 fci = discovery.findcommonincoming
124 124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 125 common, inc, remoteheads = commoninc
126 126 fco = discovery.findcommonoutgoing
127 127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 128 commoninc=commoninc, force=pushop.force)
129 129 pushop.outgoing = outgoing
130 130 pushop.remoteheads = remoteheads
131 131 pushop.incoming = inc
132 132
133 133 def _pushcheckoutgoing(pushop):
134 134 outgoing = pushop.outgoing
135 135 unfi = pushop.repo.unfiltered()
136 136 if not outgoing.missing:
137 137 # nothing to push
138 138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 139 return False
140 140 # something to push
141 141 if not pushop.force:
142 142 # if repo.obsstore == False --> no obsolete
143 143 # then, save the iteration
144 144 if unfi.obsstore:
145 145 # this message are here for 80 char limit reason
146 146 mso = _("push includes obsolete changeset: %s!")
147 147 mst = "push includes %s changeset: %s!"
148 148 # plain versions for i18n tool to detect them
149 149 _("push includes unstable changeset: %s!")
150 150 _("push includes bumped changeset: %s!")
151 151 _("push includes divergent changeset: %s!")
152 152 # If we are to push if there is at least one
153 153 # obsolete or unstable changeset in missing, at
154 154 # least one of the missinghead will be obsolete or
155 155 # unstable. So checking heads only is ok
156 156 for node in outgoing.missingheads:
157 157 ctx = unfi[node]
158 158 if ctx.obsolete():
159 159 raise util.Abort(mso % ctx)
160 160 elif ctx.troubled():
161 161 raise util.Abort(_(mst)
162 162 % (ctx.troubles()[0],
163 163 ctx))
164 164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 165 discovery.checkheads(unfi, pushop.remote, outgoing,
166 166 pushop.remoteheads,
167 167 pushop.newbranch,
168 168 bool(pushop.incoming),
169 169 newbm)
170 170 return True
171 171
172 172 def _pushchangeset(pushop):
173 173 """Make the actual push of changeset bundle to remote repo"""
174 174 outgoing = pushop.outgoing
175 175 unbundle = pushop.remote.capable('unbundle')
176 176 # TODO: get bundlecaps from remote
177 177 bundlecaps = None
178 178 # create a changegroup from local
179 179 if pushop.revs is None and not (outgoing.excluded
180 180 or pushop.repo.changelog.filteredrevs):
181 181 # push everything,
182 182 # use the fast path, no race possible on push
183 183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 184 cg = pushop.repo._changegroupsubset(outgoing,
185 185 bundler,
186 186 'push',
187 187 fastpath=True)
188 188 else:
189 189 cg = pushop.repo.getlocalbundle('push', outgoing, bundlecaps)
190 190
191 191 # apply changegroup to remote
192 192 if unbundle:
193 193 # local repo finds heads on server, finds out what
194 194 # revs it must push. once revs transferred, if server
195 195 # finds it has different heads (someone else won
196 196 # commit/push race), server aborts.
197 197 if pushop.force:
198 198 remoteheads = ['force']
199 199 else:
200 200 remoteheads = pushop.remoteheads
201 201 # ssh: return remote's addchangegroup()
202 202 # http: return remote's addchangegroup() or 0 for error
203 203 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
204 204 'push')
205 205 else:
206 206 # we return an integer indicating remote head count
207 207 # change
208 208 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
209 209 pushop.repo.url())
210 210
211 211 def _pushcomputecommonheads(pushop):
212 212 unfi = pushop.repo.unfiltered()
213 213 if pushop.ret:
214 214 # push succeed, synchronize target of the push
215 215 cheads = pushop.outgoing.missingheads
216 216 elif pushop.revs is None:
217 217 # All out push fails. synchronize all common
218 218 cheads = pushop.outgoing.commonheads
219 219 else:
220 220 # I want cheads = heads(::missingheads and ::commonheads)
221 221 # (missingheads is revs with secret changeset filtered out)
222 222 #
223 223 # This can be expressed as:
224 224 # cheads = ( (missingheads and ::commonheads)
225 225 # + (commonheads and ::missingheads))"
226 226 # )
227 227 #
228 228 # while trying to push we already computed the following:
229 229 # common = (::commonheads)
230 230 # missing = ((commonheads::missingheads) - commonheads)
231 231 #
232 232 # We can pick:
233 233 # * missingheads part of common (::commonheads)
234 234 common = set(pushop.outgoing.common)
235 235 nm = pushop.repo.changelog.nodemap
236 236 cheads = [node for node in pushop.revs if nm[node] in common]
237 237 # and
238 238 # * commonheads parents on missing
239 239 revset = unfi.set('%ln and parents(roots(%ln))',
240 240 pushop.outgoing.commonheads,
241 241 pushop.outgoing.missing)
242 242 cheads.extend(c.node() for c in revset)
243 243 pushop.commonheads = cheads
244 244
245 245 def _pushsyncphase(pushop):
246 246 """synchronise phase information locally and remotly"""
247 247 unfi = pushop.repo.unfiltered()
248 248 cheads = pushop.commonheads
249 249 if pushop.ret:
250 250 # push succeed, synchronize target of the push
251 251 cheads = pushop.outgoing.missingheads
252 252 elif pushop.revs is None:
253 253 # All out push fails. synchronize all common
254 254 cheads = pushop.outgoing.commonheads
255 255 else:
256 256 # I want cheads = heads(::missingheads and ::commonheads)
257 257 # (missingheads is revs with secret changeset filtered out)
258 258 #
259 259 # This can be expressed as:
260 260 # cheads = ( (missingheads and ::commonheads)
261 261 # + (commonheads and ::missingheads))"
262 262 # )
263 263 #
264 264 # while trying to push we already computed the following:
265 265 # common = (::commonheads)
266 266 # missing = ((commonheads::missingheads) - commonheads)
267 267 #
268 268 # We can pick:
269 269 # * missingheads part of common (::commonheads)
270 270 common = set(pushop.outgoing.common)
271 271 nm = pushop.repo.changelog.nodemap
272 272 cheads = [node for node in pushop.revs if nm[node] in common]
273 273 # and
274 274 # * commonheads parents on missing
275 275 revset = unfi.set('%ln and parents(roots(%ln))',
276 276 pushop.outgoing.commonheads,
277 277 pushop.outgoing.missing)
278 278 cheads.extend(c.node() for c in revset)
279 279 pushop.commonheads = cheads
280 280 # even when we don't push, exchanging phase data is useful
281 281 remotephases = pushop.remote.listkeys('phases')
282 282 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
283 283 and remotephases # server supports phases
284 284 and pushop.ret is None # nothing was pushed
285 285 and remotephases.get('publishing', False)):
286 286 # When:
287 287 # - this is a subrepo push
288 288 # - and remote support phase
289 289 # - and no changeset was pushed
290 290 # - and remote is publishing
291 291 # We may be in issue 3871 case!
292 292 # We drop the possible phase synchronisation done by
293 293 # courtesy to publish changesets possibly locally draft
294 294 # on the remote.
295 295 remotephases = {'publishing': 'True'}
296 296 if not remotephases: # old server or public only rer
297 297 _localphasemove(pushop, cheads)
298 298 # don't push any phase data as there is nothing to push
299 299 else:
300 300 ana = phases.analyzeremotephases(pushop.repo, cheads,
301 301 remotephases)
302 302 pheads, droots = ana
303 303 ### Apply remote phase on local
304 304 if remotephases.get('publishing', False):
305 305 _localphasemove(pushop, cheads)
306 306 else: # publish = False
307 307 _localphasemove(pushop, pheads)
308 308 _localphasemove(pushop, cheads, phases.draft)
309 309 ### Apply local phase on remote
310 310
311 311 # Get the list of all revs draft on remote by public here.
312 312 # XXX Beware that revset break if droots is not strictly
313 313 # XXX root we may want to ensure it is but it is costly
314 314 outdated = unfi.set('heads((%ln::%ln) and public())',
315 315 droots, cheads)
316 316 for newremotehead in outdated:
317 317 r = pushop.remote.pushkey('phases',
318 318 newremotehead.hex(),
319 319 str(phases.draft),
320 320 str(phases.public))
321 321 if not r:
322 322 pushop.ui.warn(_('updating %s to public failed!\n')
323 323 % newremotehead)
324 324
325 325 def _localphasemove(pushop, nodes, phase=phases.public):
326 326 """move <nodes> to <phase> in the local source repo"""
327 327 if pushop.locallocked:
328 328 phases.advanceboundary(pushop.repo, phase, nodes)
329 329 else:
330 330 # repo is not locked, do not change any phases!
331 331 # Informs the user that phases should have been moved when
332 332 # applicable.
333 333 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
334 334 phasestr = phases.phasenames[phase]
335 335 if actualmoves:
336 336 pushop.ui.status(_('cannot lock source repo, skipping '
337 337 'local %s phase update\n') % phasestr)
338 338
339 339 def _pushobsolete(pushop):
340 340 """utility function to push obsolete markers to a remote"""
341 341 pushop.ui.debug('try to push obsolete markers to remote\n')
342 342 repo = pushop.repo
343 343 remote = pushop.remote
344 344 if (obsolete._enabled and repo.obsstore and
345 345 'obsolete' in remote.listkeys('namespaces')):
346 346 rslts = []
347 347 remotedata = repo.listkeys('obsolete')
348 348 for key in sorted(remotedata, reverse=True):
349 349 # reverse sort to ensure we end with dump0
350 350 data = remotedata[key]
351 351 rslts.append(remote.pushkey('obsolete', key, '', data))
352 352 if [r for r in rslts if not r]:
353 353 msg = _('failed to push some obsolete markers!\n')
354 354 repo.ui.warn(msg)
355 355
356 356 def _pushbookmark(pushop):
357 357 """Update bookmark position on remote"""
358 358 ui = pushop.ui
359 359 repo = pushop.repo.unfiltered()
360 360 remote = pushop.remote
361 361 ui.debug("checking for updated bookmarks\n")
362 362 revnums = map(repo.changelog.rev, pushop.revs or [])
363 363 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
364 364 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
365 365 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
366 366 srchex=hex)
367 367
368 368 for b, scid, dcid in advsrc:
369 369 if ancestors and repo[scid].rev() not in ancestors:
370 370 continue
371 371 if remote.pushkey('bookmarks', b, dcid, scid):
372 372 ui.status(_("updating bookmark %s\n") % b)
373 373 else:
374 374 ui.warn(_('updating bookmark %s failed!\n') % b)
375 375
376 376 class pulloperation(object):
377 377 """A object that represent a single pull operation
378 378
379 379 It purpose is to carry push related state and very common operation.
380 380
381 381 A new should be created at the begining of each push and discarded
382 382 afterward.
383 383 """
384 384
385 385 def __init__(self, repo, remote, heads=None, force=False):
386 386 # repo we pull from
387 387 self.repo = repo
388 388 # repo we pull to
389 389 self.remote = remote
390 390 # revision we try to pull (None is "all")
391 391 self.heads = heads
392 392 # do we force pull?
393 393 self.force = force
394 394
395 395 def pull(repo, remote, heads=None, force=False):
396 pullop = pulloperation(repo, remote, heads)
396 pullop = pulloperation(repo, remote, heads, force)
397 397 if pullop.remote.local():
398 398 missing = set(pullop.remote.requirements) - pullop.repo.supported
399 399 if missing:
400 400 msg = _("required features are not"
401 401 " supported in the destination:"
402 402 " %s") % (', '.join(sorted(missing)))
403 403 raise util.Abort(msg)
404 404
405 405 # don't open transaction for nothing or you break future useful
406 406 # rollback call
407 407 tr = None
408 408 trname = 'pull\n' + util.hidepassword(pullop.remote.url())
409 409 lock = pullop.repo.lock()
410 410 try:
411 411 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
412 412 pullop.remote,
413 413 heads=pullop.heads,
414 414 force=force)
415 415 common, fetch, rheads = tmp
416 416 if not fetch:
417 417 pullop.repo.ui.status(_("no changes found\n"))
418 418 result = 0
419 419 else:
420 420 tr = pullop.repo.transaction(trname)
421 421 if pullop.heads is None and list(common) == [nullid]:
422 422 pullop.repo.ui.status(_("requesting all changes\n"))
423 423 elif (pullop.heads is None
424 424 and pullop.remote.capable('changegroupsubset')):
425 425 # issue1320, avoid a race if remote changed after discovery
426 426 pullop.heads = rheads
427 427
428 428 if pullop.remote.capable('getbundle'):
429 429 # TODO: get bundlecaps from remote
430 430 cg = pullop.remote.getbundle('pull', common=common,
431 431 heads=pullop.heads or rheads)
432 432 elif pullop.heads is None:
433 433 cg = pullop.remote.changegroup(fetch, 'pull')
434 434 elif not pullop.remote.capable('changegroupsubset'):
435 435 raise util.Abort(_("partial pull cannot be done because "
436 436 "other repository doesn't support "
437 437 "changegroupsubset."))
438 438 else:
439 439 cg = pullop.remote.changegroupsubset(fetch, pullop.heads,
440 440 'pull')
441 441 result = pullop.repo.addchangegroup(cg, 'pull',
442 442 pullop.remote.url())
443 443
444 444 # compute target subset
445 445 if pullop.heads is None:
446 446 # We pulled every thing possible
447 447 # sync on everything common
448 448 subset = common + rheads
449 449 else:
450 450 # We pulled a specific subset
451 451 # sync on this subset
452 452 subset = pullop.heads
453 453
454 454 # Get remote phases data from remote
455 455 remotephases = pullop.remote.listkeys('phases')
456 456 publishing = bool(remotephases.get('publishing', False))
457 457 if remotephases and not publishing:
458 458 # remote is new and unpublishing
459 459 pheads, _dr = phases.analyzeremotephases(pullop.repo, subset,
460 460 remotephases)
461 461 phases.advanceboundary(pullop.repo, phases.public, pheads)
462 462 phases.advanceboundary(pullop.repo, phases.draft, subset)
463 463 else:
464 464 # Remote is old or publishing all common changesets
465 465 # should be seen as public
466 466 phases.advanceboundary(pullop.repo, phases.public, subset)
467 467
468 468 def gettransaction():
469 469 if tr is None:
470 470 return pullop.repo.transaction(trname)
471 471 return tr
472 472
473 obstr = obsolete.syncpull(pullop.repo, pullop.remote, gettransaction)
473 obstr = _pullobsolete(pullop.repo, pullop.remote, gettransaction)
474 474 if obstr is not None:
475 475 tr = obstr
476 476
477 477 if tr is not None:
478 478 tr.close()
479 479 finally:
480 480 if tr is not None:
481 481 tr.release()
482 482 lock.release()
483 483
484 484 return result
485
486 def _pullobsolete(repo, remote, gettransaction):
487 """utility function to pull obsolete markers from a remote
488
489 The `gettransaction` is function that return the pull transaction, creating
490 one if necessary. We return the transaction to inform the calling code that
491 a new transaction have been created (when applicable).
492
493 Exists mostly to allow overriding for experimentation purpose"""
494 tr = None
495 if obsolete._enabled:
496 repo.ui.debug('fetching remote obsolete markers\n')
497 remoteobs = remote.listkeys('obsolete')
498 if 'dump0' in remoteobs:
499 tr = gettransaction()
500 for key in sorted(remoteobs, reverse=True):
501 if key.startswith('dump'):
502 data = base85.b85decode(remoteobs[key])
503 repo.obsstore.mergemarkers(tr, data)
504 repo.invalidatevolatilesets()
505 return tr
506
@@ -1,864 +1,843 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independents operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 import phases
88 88 from i18n import _
89 89
90 90 _pack = struct.pack
91 91 _unpack = struct.unpack
92 92
93 93 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
94 94
95 95 # the obsolete feature is not mature enough to be enabled by default.
96 96 # you have to rely on third party extension extension to enable this.
97 97 _enabled = False
98 98
99 99 # data used for parsing and writing
100 100 _fmversion = 0
101 101 _fmfixed = '>BIB20s'
102 102 _fmnode = '20s'
103 103 _fmfsize = struct.calcsize(_fmfixed)
104 104 _fnodesize = struct.calcsize(_fmnode)
105 105
106 106 ### obsolescence marker flag
107 107
108 108 ## bumpedfix flag
109 109 #
110 110 # When a changeset A' succeed to a changeset A which became public, we call A'
111 111 # "bumped" because it's a successors of a public changesets
112 112 #
113 113 # o A' (bumped)
114 114 # |`:
115 115 # | o A
116 116 # |/
117 117 # o Z
118 118 #
119 119 # The way to solve this situation is to create a new changeset Ad as children
120 120 # of A. This changeset have the same content than A'. So the diff from A to A'
121 121 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
122 122 #
123 123 # o Ad
124 124 # |`:
125 125 # | x A'
126 126 # |'|
127 127 # o | A
128 128 # |/
129 129 # o Z
130 130 #
131 131 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
132 132 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
133 133 # This flag mean that the successors express the changes between the public and
134 134 # bumped version and fix the situation, breaking the transitivity of
135 135 # "bumped" here.
136 136 bumpedfix = 1
137 137
138 138 def _readmarkers(data):
139 139 """Read and enumerate markers from raw data"""
140 140 off = 0
141 141 diskversion = _unpack('>B', data[off:off + 1])[0]
142 142 off += 1
143 143 if diskversion != _fmversion:
144 144 raise util.Abort(_('parsing obsolete marker: unknown version %r')
145 145 % diskversion)
146 146
147 147 # Loop on markers
148 148 l = len(data)
149 149 while off + _fmfsize <= l:
150 150 # read fixed part
151 151 cur = data[off:off + _fmfsize]
152 152 off += _fmfsize
153 153 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
154 154 # read replacement
155 155 sucs = ()
156 156 if nbsuc:
157 157 s = (_fnodesize * nbsuc)
158 158 cur = data[off:off + s]
159 159 sucs = _unpack(_fmnode * nbsuc, cur)
160 160 off += s
161 161 # read metadata
162 162 # (metadata will be decoded on demand)
163 163 metadata = data[off:off + mdsize]
164 164 if len(metadata) != mdsize:
165 165 raise util.Abort(_('parsing obsolete marker: metadata is too '
166 166 'short, %d bytes expected, got %d')
167 167 % (mdsize, len(metadata)))
168 168 off += mdsize
169 169 yield (pre, sucs, flags, metadata)
170 170
171 171 def encodemeta(meta):
172 172 """Return encoded metadata string to string mapping.
173 173
174 174 Assume no ':' in key and no '\0' in both key and value."""
175 175 for key, value in meta.iteritems():
176 176 if ':' in key or '\0' in key:
177 177 raise ValueError("':' and '\0' are forbidden in metadata key'")
178 178 if '\0' in value:
179 179 raise ValueError("':' are forbidden in metadata value'")
180 180 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
181 181
182 182 def decodemeta(data):
183 183 """Return string to string dictionary from encoded version."""
184 184 d = {}
185 185 for l in data.split('\0'):
186 186 if l:
187 187 key, value = l.split(':')
188 188 d[key] = value
189 189 return d
190 190
191 191 class marker(object):
192 192 """Wrap obsolete marker raw data"""
193 193
194 194 def __init__(self, repo, data):
195 195 # the repo argument will be used to create changectx in later version
196 196 self._repo = repo
197 197 self._data = data
198 198 self._decodedmeta = None
199 199
200 200 def __hash__(self):
201 201 return hash(self._data)
202 202
203 203 def __eq__(self, other):
204 204 if type(other) != type(self):
205 205 return False
206 206 return self._data == other._data
207 207
208 208 def precnode(self):
209 209 """Precursor changeset node identifier"""
210 210 return self._data[0]
211 211
212 212 def succnodes(self):
213 213 """List of successor changesets node identifiers"""
214 214 return self._data[1]
215 215
216 216 def metadata(self):
217 217 """Decoded metadata dictionary"""
218 218 if self._decodedmeta is None:
219 219 self._decodedmeta = decodemeta(self._data[3])
220 220 return self._decodedmeta
221 221
222 222 def date(self):
223 223 """Creation date as (unixtime, offset)"""
224 224 parts = self.metadata()['date'].split(' ')
225 225 return (float(parts[0]), int(parts[1]))
226 226
227 227 class obsstore(object):
228 228 """Store obsolete markers
229 229
230 230 Markers can be accessed with two mappings:
231 231 - precursors[x] -> set(markers on precursors edges of x)
232 232 - successors[x] -> set(markers on successors edges of x)
233 233 """
234 234
235 235 def __init__(self, sopener):
236 236 # caches for various obsolescence related cache
237 237 self.caches = {}
238 238 self._all = []
239 239 # new markers to serialize
240 240 self.precursors = {}
241 241 self.successors = {}
242 242 self.sopener = sopener
243 243 data = sopener.tryread('obsstore')
244 244 if data:
245 245 self._load(_readmarkers(data))
246 246
247 247 def __iter__(self):
248 248 return iter(self._all)
249 249
250 250 def __nonzero__(self):
251 251 return bool(self._all)
252 252
253 253 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
254 254 """obsolete: add a new obsolete marker
255 255
256 256 * ensuring it is hashable
257 257 * check mandatory metadata
258 258 * encode metadata
259 259 """
260 260 if metadata is None:
261 261 metadata = {}
262 262 if 'date' not in metadata:
263 263 metadata['date'] = "%d %d" % util.makedate()
264 264 if len(prec) != 20:
265 265 raise ValueError(prec)
266 266 for succ in succs:
267 267 if len(succ) != 20:
268 268 raise ValueError(succ)
269 269 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
270 270 self.add(transaction, [marker])
271 271
272 272 def add(self, transaction, markers):
273 273 """Add new markers to the store
274 274
275 275 Take care of filtering duplicate.
276 276 Return the number of new marker."""
277 277 if not _enabled:
278 278 raise util.Abort('obsolete feature is not enabled on this repo')
279 279 known = set(self._all)
280 280 new = []
281 281 for m in markers:
282 282 if m not in known:
283 283 known.add(m)
284 284 new.append(m)
285 285 if new:
286 286 f = self.sopener('obsstore', 'ab')
287 287 try:
288 288 # Whether the file's current position is at the begin or at
289 289 # the end after opening a file for appending is implementation
290 290 # defined. So we must seek to the end before calling tell(),
291 291 # or we may get a zero offset for non-zero sized files on
292 292 # some platforms (issue3543).
293 293 f.seek(0, _SEEK_END)
294 294 offset = f.tell()
295 295 transaction.add('obsstore', offset)
296 296 # offset == 0: new file - add the version header
297 297 for bytes in _encodemarkers(new, offset == 0):
298 298 f.write(bytes)
299 299 finally:
300 300 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
301 301 # call 'filecacheentry.refresh()' here
302 302 f.close()
303 303 self._load(new)
304 304 # new marker *may* have changed several set. invalidate the cache.
305 305 self.caches.clear()
306 306 return len(new)
307 307
308 308 def mergemarkers(self, transaction, data):
309 309 markers = _readmarkers(data)
310 310 self.add(transaction, markers)
311 311
312 312 def _load(self, markers):
313 313 for mark in markers:
314 314 self._all.append(mark)
315 315 pre, sucs = mark[:2]
316 316 self.successors.setdefault(pre, set()).add(mark)
317 317 for suc in sucs:
318 318 self.precursors.setdefault(suc, set()).add(mark)
319 319 if node.nullid in self.precursors:
320 320 raise util.Abort(_('bad obsolescence marker detected: '
321 321 'invalid successors nullid'))
322 322
323 323 def _encodemarkers(markers, addheader=False):
324 324 # Kept separate from flushmarkers(), it will be reused for
325 325 # markers exchange.
326 326 if addheader:
327 327 yield _pack('>B', _fmversion)
328 328 for marker in markers:
329 329 yield _encodeonemarker(marker)
330 330
331 331
332 332 def _encodeonemarker(marker):
333 333 pre, sucs, flags, metadata = marker
334 334 nbsuc = len(sucs)
335 335 format = _fmfixed + (_fmnode * nbsuc)
336 336 data = [nbsuc, len(metadata), flags, pre]
337 337 data.extend(sucs)
338 338 return _pack(format, *data) + metadata
339 339
340 340 # arbitrary picked to fit into 8K limit from HTTP server
341 341 # you have to take in account:
342 342 # - the version header
343 343 # - the base85 encoding
344 344 _maxpayload = 5300
345 345
346 346 def listmarkers(repo):
347 347 """List markers over pushkey"""
348 348 if not repo.obsstore:
349 349 return {}
350 350 keys = {}
351 351 parts = []
352 352 currentlen = _maxpayload * 2 # ensure we create a new part
353 353 for marker in repo.obsstore:
354 354 nextdata = _encodeonemarker(marker)
355 355 if (len(nextdata) + currentlen > _maxpayload):
356 356 currentpart = []
357 357 currentlen = 0
358 358 parts.append(currentpart)
359 359 currentpart.append(nextdata)
360 360 currentlen += len(nextdata)
361 361 for idx, part in enumerate(reversed(parts)):
362 362 data = ''.join([_pack('>B', _fmversion)] + part)
363 363 keys['dump%i' % idx] = base85.b85encode(data)
364 364 return keys
365 365
366 366 def pushmarker(repo, key, old, new):
367 367 """Push markers over pushkey"""
368 368 if not key.startswith('dump'):
369 369 repo.ui.warn(_('unknown key: %r') % key)
370 370 return 0
371 371 if old:
372 372 repo.ui.warn(_('unexpected old value') % key)
373 373 return 0
374 374 data = base85.b85decode(new)
375 375 lock = repo.lock()
376 376 try:
377 377 tr = repo.transaction('pushkey: obsolete markers')
378 378 try:
379 379 repo.obsstore.mergemarkers(tr, data)
380 380 tr.close()
381 381 return 1
382 382 finally:
383 383 tr.release()
384 384 finally:
385 385 lock.release()
386 386
387 def syncpull(repo, remote, gettransaction):
388 """utility function to pull obsolete markers from a remote
389
390 The `gettransaction` is function that return the pull transaction, creating
391 one if necessary. We return the transaction to inform the calling code that
392 a new transaction have been created (when applicable).
393
394 Exists mostly to allow overriding for experimentation purpose"""
395 tr = None
396 if _enabled:
397 repo.ui.debug('fetching remote obsolete markers\n')
398 remoteobs = remote.listkeys('obsolete')
399 if 'dump0' in remoteobs:
400 tr = gettransaction()
401 for key in sorted(remoteobs, reverse=True):
402 if key.startswith('dump'):
403 data = base85.b85decode(remoteobs[key])
404 repo.obsstore.mergemarkers(tr, data)
405 repo.invalidatevolatilesets()
406 return tr
407
408 387 def allmarkers(repo):
409 388 """all obsolete markers known in a repository"""
410 389 for markerdata in repo.obsstore:
411 390 yield marker(repo, markerdata)
412 391
413 392 def precursormarkers(ctx):
414 393 """obsolete marker marking this changeset as a successors"""
415 394 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
416 395 yield marker(ctx._repo, data)
417 396
418 397 def successormarkers(ctx):
419 398 """obsolete marker making this changeset obsolete"""
420 399 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
421 400 yield marker(ctx._repo, data)
422 401
423 402 def allsuccessors(obsstore, nodes, ignoreflags=0):
424 403 """Yield node for every successor of <nodes>.
425 404
426 405 Some successors may be unknown locally.
427 406
428 407 This is a linear yield unsuited to detecting split changesets. It includes
429 408 initial nodes too."""
430 409 remaining = set(nodes)
431 410 seen = set(remaining)
432 411 while remaining:
433 412 current = remaining.pop()
434 413 yield current
435 414 for mark in obsstore.successors.get(current, ()):
436 415 # ignore marker flagged with specified flag
437 416 if mark[2] & ignoreflags:
438 417 continue
439 418 for suc in mark[1]:
440 419 if suc not in seen:
441 420 seen.add(suc)
442 421 remaining.add(suc)
443 422
444 423 def allprecursors(obsstore, nodes, ignoreflags=0):
445 424 """Yield node for every precursors of <nodes>.
446 425
447 426 Some precursors may be unknown locally.
448 427
449 428 This is a linear yield unsuited to detecting folded changesets. It includes
450 429 initial nodes too."""
451 430
452 431 remaining = set(nodes)
453 432 seen = set(remaining)
454 433 while remaining:
455 434 current = remaining.pop()
456 435 yield current
457 436 for mark in obsstore.precursors.get(current, ()):
458 437 # ignore marker flagged with specified flag
459 438 if mark[2] & ignoreflags:
460 439 continue
461 440 suc = mark[0]
462 441 if suc not in seen:
463 442 seen.add(suc)
464 443 remaining.add(suc)
465 444
466 445 def foreground(repo, nodes):
467 446 """return all nodes in the "foreground" of other node
468 447
469 448 The foreground of a revision is anything reachable using parent -> children
470 449 or precursor -> successor relation. It is very similar to "descendant" but
471 450 augmented with obsolescence information.
472 451
473 452 Beware that possible obsolescence cycle may result if complex situation.
474 453 """
475 454 repo = repo.unfiltered()
476 455 foreground = set(repo.set('%ln::', nodes))
477 456 if repo.obsstore:
478 457 # We only need this complicated logic if there is obsolescence
479 458 # XXX will probably deserve an optimised revset.
480 459 nm = repo.changelog.nodemap
481 460 plen = -1
482 461 # compute the whole set of successors or descendants
483 462 while len(foreground) != plen:
484 463 plen = len(foreground)
485 464 succs = set(c.node() for c in foreground)
486 465 mutable = [c.node() for c in foreground if c.mutable()]
487 466 succs.update(allsuccessors(repo.obsstore, mutable))
488 467 known = (n for n in succs if n in nm)
489 468 foreground = set(repo.set('%ln::', known))
490 469 return set(c.node() for c in foreground)
491 470
492 471
493 472 def successorssets(repo, initialnode, cache=None):
494 473 """Return all set of successors of initial nodes
495 474
496 475 The successors set of a changeset A are a group of revisions that succeed
497 476 A. It succeeds A as a consistent whole, each revision being only a partial
498 477 replacement. The successors set contains non-obsolete changesets only.
499 478
500 479 This function returns the full list of successor sets which is why it
501 480 returns a list of tuples and not just a single tuple. Each tuple is a valid
502 481 successors set. Not that (A,) may be a valid successors set for changeset A
503 482 (see below).
504 483
505 484 In most cases, a changeset A will have a single element (e.g. the changeset
506 485 A is replaced by A') in its successors set. Though, it is also common for a
507 486 changeset A to have no elements in its successor set (e.g. the changeset
508 487 has been pruned). Therefore, the returned list of successors sets will be
509 488 [(A',)] or [], respectively.
510 489
511 490 When a changeset A is split into A' and B', however, it will result in a
512 491 successors set containing more than a single element, i.e. [(A',B')].
513 492 Divergent changesets will result in multiple successors sets, i.e. [(A',),
514 493 (A'')].
515 494
516 495 If a changeset A is not obsolete, then it will conceptually have no
517 496 successors set. To distinguish this from a pruned changeset, the successor
518 497 set will only contain itself, i.e. [(A,)].
519 498
520 499 Finally, successors unknown locally are considered to be pruned (obsoleted
521 500 without any successors).
522 501
523 502 The optional `cache` parameter is a dictionary that may contain precomputed
524 503 successors sets. It is meant to reuse the computation of a previous call to
525 504 `successorssets` when multiple calls are made at the same time. The cache
526 505 dictionary is updated in place. The caller is responsible for its live
527 506 spawn. Code that makes multiple calls to `successorssets` *must* use this
528 507 cache mechanism or suffer terrible performances.
529 508
530 509 """
531 510
532 511 succmarkers = repo.obsstore.successors
533 512
534 513 # Stack of nodes we search successors sets for
535 514 toproceed = [initialnode]
536 515 # set version of above list for fast loop detection
537 516 # element added to "toproceed" must be added here
538 517 stackedset = set(toproceed)
539 518 if cache is None:
540 519 cache = {}
541 520
542 521 # This while loop is the flattened version of a recursive search for
543 522 # successors sets
544 523 #
545 524 # def successorssets(x):
546 525 # successors = directsuccessors(x)
547 526 # ss = [[]]
548 527 # for succ in directsuccessors(x):
549 528 # # product as in itertools cartesian product
550 529 # ss = product(ss, successorssets(succ))
551 530 # return ss
552 531 #
553 532 # But we can not use plain recursive calls here:
554 533 # - that would blow the python call stack
555 534 # - obsolescence markers may have cycles, we need to handle them.
556 535 #
557 536 # The `toproceed` list act as our call stack. Every node we search
558 537 # successors set for are stacked there.
559 538 #
560 539 # The `stackedset` is set version of this stack used to check if a node is
561 540 # already stacked. This check is used to detect cycles and prevent infinite
562 541 # loop.
563 542 #
564 543 # successors set of all nodes are stored in the `cache` dictionary.
565 544 #
566 545 # After this while loop ends we use the cache to return the successors sets
567 546 # for the node requested by the caller.
568 547 while toproceed:
569 548 # Every iteration tries to compute the successors sets of the topmost
570 549 # node of the stack: CURRENT.
571 550 #
572 551 # There are four possible outcomes:
573 552 #
574 553 # 1) We already know the successors sets of CURRENT:
575 554 # -> mission accomplished, pop it from the stack.
576 555 # 2) Node is not obsolete:
577 556 # -> the node is its own successors sets. Add it to the cache.
578 557 # 3) We do not know successors set of direct successors of CURRENT:
579 558 # -> We add those successors to the stack.
580 559 # 4) We know successors sets of all direct successors of CURRENT:
581 560 # -> We can compute CURRENT successors set and add it to the
582 561 # cache.
583 562 #
584 563 current = toproceed[-1]
585 564 if current in cache:
586 565 # case (1): We already know the successors sets
587 566 stackedset.remove(toproceed.pop())
588 567 elif current not in succmarkers:
589 568 # case (2): The node is not obsolete.
590 569 if current in repo:
591 570 # We have a valid last successors.
592 571 cache[current] = [(current,)]
593 572 else:
594 573 # Final obsolete version is unknown locally.
595 574 # Do not count that as a valid successors
596 575 cache[current] = []
597 576 else:
598 577 # cases (3) and (4)
599 578 #
600 579 # We proceed in two phases. Phase 1 aims to distinguish case (3)
601 580 # from case (4):
602 581 #
603 582 # For each direct successors of CURRENT, we check whether its
604 583 # successors sets are known. If they are not, we stack the
605 584 # unknown node and proceed to the next iteration of the while
606 585 # loop. (case 3)
607 586 #
608 587 # During this step, we may detect obsolescence cycles: a node
609 588 # with unknown successors sets but already in the call stack.
610 589 # In such a situation, we arbitrary set the successors sets of
611 590 # the node to nothing (node pruned) to break the cycle.
612 591 #
613 592 # If no break was encountered we proceed to phase 2.
614 593 #
615 594 # Phase 2 computes successors sets of CURRENT (case 4); see details
616 595 # in phase 2 itself.
617 596 #
618 597 # Note the two levels of iteration in each phase.
619 598 # - The first one handles obsolescence markers using CURRENT as
620 599 # precursor (successors markers of CURRENT).
621 600 #
622 601 # Having multiple entry here means divergence.
623 602 #
624 603 # - The second one handles successors defined in each marker.
625 604 #
626 605 # Having none means pruned node, multiple successors means split,
627 606 # single successors are standard replacement.
628 607 #
629 608 for mark in sorted(succmarkers[current]):
630 609 for suc in mark[1]:
631 610 if suc not in cache:
632 611 if suc in stackedset:
633 612 # cycle breaking
634 613 cache[suc] = []
635 614 else:
636 615 # case (3) If we have not computed successors sets
637 616 # of one of those successors we add it to the
638 617 # `toproceed` stack and stop all work for this
639 618 # iteration.
640 619 toproceed.append(suc)
641 620 stackedset.add(suc)
642 621 break
643 622 else:
644 623 continue
645 624 break
646 625 else:
647 626 # case (4): we know all successors sets of all direct
648 627 # successors
649 628 #
650 629 # Successors set contributed by each marker depends on the
651 630 # successors sets of all its "successors" node.
652 631 #
653 632 # Each different marker is a divergence in the obsolescence
654 633 # history. It contributes successors sets distinct from other
655 634 # markers.
656 635 #
657 636 # Within a marker, a successor may have divergent successors
658 637 # sets. In such a case, the marker will contribute multiple
659 638 # divergent successors sets. If multiple successors have
660 639 # divergent successors sets, a cartesian product is used.
661 640 #
662 641 # At the end we post-process successors sets to remove
663 642 # duplicated entry and successors set that are strict subset of
664 643 # another one.
665 644 succssets = []
666 645 for mark in sorted(succmarkers[current]):
667 646 # successors sets contributed by this marker
668 647 markss = [[]]
669 648 for suc in mark[1]:
670 649 # cardinal product with previous successors
671 650 productresult = []
672 651 for prefix in markss:
673 652 for suffix in cache[suc]:
674 653 newss = list(prefix)
675 654 for part in suffix:
676 655 # do not duplicated entry in successors set
677 656 # first entry wins.
678 657 if part not in newss:
679 658 newss.append(part)
680 659 productresult.append(newss)
681 660 markss = productresult
682 661 succssets.extend(markss)
683 662 # remove duplicated and subset
684 663 seen = []
685 664 final = []
686 665 candidate = sorted(((set(s), s) for s in succssets if s),
687 666 key=lambda x: len(x[1]), reverse=True)
688 667 for setversion, listversion in candidate:
689 668 for seenset in seen:
690 669 if setversion.issubset(seenset):
691 670 break
692 671 else:
693 672 final.append(listversion)
694 673 seen.append(setversion)
695 674 final.reverse() # put small successors set first
696 675 cache[current] = final
697 676 return cache[initialnode]
698 677
699 678 def _knownrevs(repo, nodes):
700 679 """yield revision numbers of known nodes passed in parameters
701 680
702 681 Unknown revisions are silently ignored."""
703 682 torev = repo.changelog.nodemap.get
704 683 for n in nodes:
705 684 rev = torev(n)
706 685 if rev is not None:
707 686 yield rev
708 687
709 688 # mapping of 'set-name' -> <function to compute this set>
710 689 cachefuncs = {}
711 690 def cachefor(name):
712 691 """Decorator to register a function as computing the cache for a set"""
713 692 def decorator(func):
714 693 assert name not in cachefuncs
715 694 cachefuncs[name] = func
716 695 return func
717 696 return decorator
718 697
719 698 def getrevs(repo, name):
720 699 """Return the set of revision that belong to the <name> set
721 700
722 701 Such access may compute the set and cache it for future use"""
723 702 repo = repo.unfiltered()
724 703 if not repo.obsstore:
725 704 return ()
726 705 if name not in repo.obsstore.caches:
727 706 repo.obsstore.caches[name] = cachefuncs[name](repo)
728 707 return repo.obsstore.caches[name]
729 708
730 709 # To be simple we need to invalidate obsolescence cache when:
731 710 #
732 711 # - new changeset is added:
733 712 # - public phase is changed
734 713 # - obsolescence marker are added
735 714 # - strip is used a repo
736 715 def clearobscaches(repo):
737 716 """Remove all obsolescence related cache from a repo
738 717
739 718 This remove all cache in obsstore is the obsstore already exist on the
740 719 repo.
741 720
742 721 (We could be smarter here given the exact event that trigger the cache
743 722 clearing)"""
744 723 # only clear cache is there is obsstore data in this repo
745 724 if 'obsstore' in repo._filecache:
746 725 repo.obsstore.caches.clear()
747 726
748 727 @cachefor('obsolete')
749 728 def _computeobsoleteset(repo):
750 729 """the set of obsolete revisions"""
751 730 obs = set()
752 731 getrev = repo.changelog.nodemap.get
753 732 getphase = repo._phasecache.phase
754 733 for node in repo.obsstore.successors:
755 734 rev = getrev(node)
756 735 if rev is not None and getphase(repo, rev):
757 736 obs.add(rev)
758 737 return obs
759 738
760 739 @cachefor('unstable')
761 740 def _computeunstableset(repo):
762 741 """the set of non obsolete revisions with obsolete parents"""
763 742 # revset is not efficient enough here
764 743 # we do (obsolete()::) - obsolete() by hand
765 744 obs = getrevs(repo, 'obsolete')
766 745 if not obs:
767 746 return set()
768 747 cl = repo.changelog
769 748 return set(r for r in cl.descendants(obs) if r not in obs)
770 749
771 750 @cachefor('suspended')
772 751 def _computesuspendedset(repo):
773 752 """the set of obsolete parents with non obsolete descendants"""
774 753 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
775 754 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
776 755
777 756 @cachefor('extinct')
778 757 def _computeextinctset(repo):
779 758 """the set of obsolete parents without non obsolete descendants"""
780 759 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
781 760
782 761
783 762 @cachefor('bumped')
784 763 def _computebumpedset(repo):
785 764 """the set of revs trying to obsolete public revisions"""
786 765 bumped = set()
787 766 # utils function (avoid attribut lookup in the loop)
788 767 phase = repo._phasecache.phase # would be faster to grab the full list
789 768 public = phases.public
790 769 cl = repo.changelog
791 770 torev = cl.nodemap.get
792 771 obs = getrevs(repo, 'obsolete')
793 772 for rev in repo:
794 773 # We only evaluate mutable, non-obsolete revision
795 774 if (public < phase(repo, rev)) and (rev not in obs):
796 775 node = cl.node(rev)
797 776 # (future) A cache of precursors may worth if split is very common
798 777 for pnode in allprecursors(repo.obsstore, [node],
799 778 ignoreflags=bumpedfix):
800 779 prev = torev(pnode) # unfiltered! but so is phasecache
801 780 if (prev is not None) and (phase(repo, prev) <= public):
802 781 # we have a public precursors
803 782 bumped.add(rev)
804 783 break # Next draft!
805 784 return bumped
806 785
807 786 @cachefor('divergent')
808 787 def _computedivergentset(repo):
809 788 """the set of rev that compete to be the final successors of some revision.
810 789 """
811 790 divergent = set()
812 791 obsstore = repo.obsstore
813 792 newermap = {}
814 793 for ctx in repo.set('(not public()) - obsolete()'):
815 794 mark = obsstore.precursors.get(ctx.node(), ())
816 795 toprocess = set(mark)
817 796 while toprocess:
818 797 prec = toprocess.pop()[0]
819 798 if prec not in newermap:
820 799 successorssets(repo, prec, newermap)
821 800 newer = [n for n in newermap[prec] if n]
822 801 if len(newer) > 1:
823 802 divergent.add(ctx.rev())
824 803 break
825 804 toprocess.update(obsstore.precursors.get(prec, ()))
826 805 return divergent
827 806
828 807
829 808 def createmarkers(repo, relations, flag=0, metadata=None):
830 809 """Add obsolete markers between changesets in a repo
831 810
832 811 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
833 812 `old` and `news` are changectx.
834 813
835 814 Trying to obsolete a public changeset will raise an exception.
836 815
837 816 Current user and date are used except if specified otherwise in the
838 817 metadata attribute.
839 818
840 819 This function operates within a transaction of its own, but does
841 820 not take any lock on the repo.
842 821 """
843 822 # prepare metadata
844 823 if metadata is None:
845 824 metadata = {}
846 825 if 'date' not in metadata:
847 826 metadata['date'] = '%i %i' % util.makedate()
848 827 if 'user' not in metadata:
849 828 metadata['user'] = repo.ui.username()
850 829 tr = repo.transaction('add-obsolescence-marker')
851 830 try:
852 831 for prec, sucs in relations:
853 832 if not prec.mutable():
854 833 raise util.Abort("cannot obsolete immutable changeset: %s"
855 834 % prec)
856 835 nprec = prec.node()
857 836 nsucs = tuple(s.node() for s in sucs)
858 837 if nprec in nsucs:
859 838 raise util.Abort("changeset %s cannot obsolete itself" % prec)
860 839 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
861 840 repo.filteredrevcache.clear()
862 841 tr.close()
863 842 finally:
864 843 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now