##// END OF EJS Templates
obsolete: sort obsmarkers during exchange...
Pierre-Yves David -
r25118:e632a242 default
parent child Browse files
Show More
@@ -1,1332 +1,1334 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13 import lock as lockmod
14 14
15 15 def readbundle(ui, fh, fname, vfs=None):
16 16 header = changegroup.readexactly(fh, 4)
17 17
18 18 alg = None
19 19 if not fname:
20 20 fname = "stream"
21 21 if not header.startswith('HG') and header.startswith('\0'):
22 22 fh = changegroup.headerlessfixup(fh, header)
23 23 header = "HG10"
24 24 alg = 'UN'
25 25 elif vfs:
26 26 fname = vfs.join(fname)
27 27
28 28 magic, version = header[0:2], header[2:4]
29 29
30 30 if magic != 'HG':
31 31 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
32 32 if version == '10':
33 33 if alg is None:
34 34 alg = changegroup.readexactly(fh, 2)
35 35 return changegroup.cg1unpacker(fh, alg)
36 36 elif version.startswith('2'):
37 37 return bundle2.getunbundler(ui, fh, header=magic + version)
38 38 else:
39 39 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
40 40
41 41 def buildobsmarkerspart(bundler, markers):
42 42 """add an obsmarker part to the bundler with <markers>
43 43
44 44 No part is created if markers is empty.
45 45 Raises ValueError if the bundler doesn't support any known obsmarker format.
46 46 """
47 47 if markers:
48 48 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
49 49 version = obsolete.commonversion(remoteversions)
50 50 if version is None:
51 51 raise ValueError('bundler do not support common obsmarker format')
52 52 stream = obsolete.encodemarkers(markers, True, version=version)
53 53 return bundler.newpart('obsmarkers', data=stream)
54 54 return None
55 55
56 56 def _canusebundle2(op):
57 57 """return true if a pull/push can use bundle2
58 58
59 59 Feel free to nuke this function when we drop the experimental option"""
60 60 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
61 61 and op.remote.capable('bundle2'))
62 62
63 63
64 64 class pushoperation(object):
65 65 """A object that represent a single push operation
66 66
67 67 It purpose is to carry push related state and very common operation.
68 68
69 69 A new should be created at the beginning of each push and discarded
70 70 afterward.
71 71 """
72 72
73 73 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
74 74 bookmarks=()):
75 75 # repo we push from
76 76 self.repo = repo
77 77 self.ui = repo.ui
78 78 # repo we push to
79 79 self.remote = remote
80 80 # force option provided
81 81 self.force = force
82 82 # revs to be pushed (None is "all")
83 83 self.revs = revs
84 84 # bookmark explicitly pushed
85 85 self.bookmarks = bookmarks
86 86 # allow push of new branch
87 87 self.newbranch = newbranch
88 88 # did a local lock get acquired?
89 89 self.locallocked = None
90 90 # step already performed
91 91 # (used to check what steps have been already performed through bundle2)
92 92 self.stepsdone = set()
93 93 # Integer version of the changegroup push result
94 94 # - None means nothing to push
95 95 # - 0 means HTTP error
96 96 # - 1 means we pushed and remote head count is unchanged *or*
97 97 # we have outgoing changesets but refused to push
98 98 # - other values as described by addchangegroup()
99 99 self.cgresult = None
100 100 # Boolean value for the bookmark push
101 101 self.bkresult = None
102 102 # discover.outgoing object (contains common and outgoing data)
103 103 self.outgoing = None
104 104 # all remote heads before the push
105 105 self.remoteheads = None
106 106 # testable as a boolean indicating if any nodes are missing locally.
107 107 self.incoming = None
108 108 # phases changes that must be pushed along side the changesets
109 109 self.outdatedphases = None
110 110 # phases changes that must be pushed if changeset push fails
111 111 self.fallbackoutdatedphases = None
112 112 # outgoing obsmarkers
113 113 self.outobsmarkers = set()
114 114 # outgoing bookmarks
115 115 self.outbookmarks = []
116 116 # transaction manager
117 117 self.trmanager = None
118 118
119 119 @util.propertycache
120 120 def futureheads(self):
121 121 """future remote heads if the changeset push succeeds"""
122 122 return self.outgoing.missingheads
123 123
124 124 @util.propertycache
125 125 def fallbackheads(self):
126 126 """future remote heads if the changeset push fails"""
127 127 if self.revs is None:
128 128 # not target to push, all common are relevant
129 129 return self.outgoing.commonheads
130 130 unfi = self.repo.unfiltered()
131 131 # I want cheads = heads(::missingheads and ::commonheads)
132 132 # (missingheads is revs with secret changeset filtered out)
133 133 #
134 134 # This can be expressed as:
135 135 # cheads = ( (missingheads and ::commonheads)
136 136 # + (commonheads and ::missingheads))"
137 137 # )
138 138 #
139 139 # while trying to push we already computed the following:
140 140 # common = (::commonheads)
141 141 # missing = ((commonheads::missingheads) - commonheads)
142 142 #
143 143 # We can pick:
144 144 # * missingheads part of common (::commonheads)
145 145 common = set(self.outgoing.common)
146 146 nm = self.repo.changelog.nodemap
147 147 cheads = [node for node in self.revs if nm[node] in common]
148 148 # and
149 149 # * commonheads parents on missing
150 150 revset = unfi.set('%ln and parents(roots(%ln))',
151 151 self.outgoing.commonheads,
152 152 self.outgoing.missing)
153 153 cheads.extend(c.node() for c in revset)
154 154 return cheads
155 155
156 156 @property
157 157 def commonheads(self):
158 158 """set of all common heads after changeset bundle push"""
159 159 if self.cgresult:
160 160 return self.futureheads
161 161 else:
162 162 return self.fallbackheads
163 163
164 164 # mapping of message used when pushing bookmark
165 165 bookmsgmap = {'update': (_("updating bookmark %s\n"),
166 166 _('updating bookmark %s failed!\n')),
167 167 'export': (_("exporting bookmark %s\n"),
168 168 _('exporting bookmark %s failed!\n')),
169 169 'delete': (_("deleting remote bookmark %s\n"),
170 170 _('deleting remote bookmark %s failed!\n')),
171 171 }
172 172
173 173
174 174 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
175 175 '''Push outgoing changesets (limited by revs) from a local
176 176 repository to remote. Return an integer:
177 177 - None means nothing to push
178 178 - 0 means HTTP error
179 179 - 1 means we pushed and remote head count is unchanged *or*
180 180 we have outgoing changesets but refused to push
181 181 - other values as described by addchangegroup()
182 182 '''
183 183 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
184 184 if pushop.remote.local():
185 185 missing = (set(pushop.repo.requirements)
186 186 - pushop.remote.local().supported)
187 187 if missing:
188 188 msg = _("required features are not"
189 189 " supported in the destination:"
190 190 " %s") % (', '.join(sorted(missing)))
191 191 raise util.Abort(msg)
192 192
193 193 # there are two ways to push to remote repo:
194 194 #
195 195 # addchangegroup assumes local user can lock remote
196 196 # repo (local filesystem, old ssh servers).
197 197 #
198 198 # unbundle assumes local user cannot lock remote repo (new ssh
199 199 # servers, http servers).
200 200
201 201 if not pushop.remote.canpush():
202 202 raise util.Abort(_("destination does not support push"))
203 203 # get local lock as we might write phase data
204 204 localwlock = locallock = None
205 205 try:
206 206 # bundle2 push may receive a reply bundle touching bookmarks or other
207 207 # things requiring the wlock. Take it now to ensure proper ordering.
208 208 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
209 209 if _canusebundle2(pushop) and maypushback:
210 210 localwlock = pushop.repo.wlock()
211 211 locallock = pushop.repo.lock()
212 212 pushop.locallocked = True
213 213 except IOError, err:
214 214 pushop.locallocked = False
215 215 if err.errno != errno.EACCES:
216 216 raise
217 217 # source repo cannot be locked.
218 218 # We do not abort the push, but just disable the local phase
219 219 # synchronisation.
220 220 msg = 'cannot lock source repository: %s\n' % err
221 221 pushop.ui.debug(msg)
222 222 try:
223 223 if pushop.locallocked:
224 224 pushop.trmanager = transactionmanager(repo,
225 225 'push-response',
226 226 pushop.remote.url())
227 227 pushop.repo.checkpush(pushop)
228 228 lock = None
229 229 unbundle = pushop.remote.capable('unbundle')
230 230 if not unbundle:
231 231 lock = pushop.remote.lock()
232 232 try:
233 233 _pushdiscovery(pushop)
234 234 if _canusebundle2(pushop):
235 235 _pushbundle2(pushop)
236 236 _pushchangeset(pushop)
237 237 _pushsyncphase(pushop)
238 238 _pushobsolete(pushop)
239 239 _pushbookmark(pushop)
240 240 finally:
241 241 if lock is not None:
242 242 lock.release()
243 243 if pushop.trmanager:
244 244 pushop.trmanager.close()
245 245 finally:
246 246 if pushop.trmanager:
247 247 pushop.trmanager.release()
248 248 if locallock is not None:
249 249 locallock.release()
250 250 if localwlock is not None:
251 251 localwlock.release()
252 252
253 253 return pushop
254 254
255 255 # list of steps to perform discovery before push
256 256 pushdiscoveryorder = []
257 257
258 258 # Mapping between step name and function
259 259 #
260 260 # This exists to help extensions wrap steps if necessary
261 261 pushdiscoverymapping = {}
262 262
263 263 def pushdiscovery(stepname):
264 264 """decorator for function performing discovery before push
265 265
266 266 The function is added to the step -> function mapping and appended to the
267 267 list of steps. Beware that decorated function will be added in order (this
268 268 may matter).
269 269
270 270 You can only use this decorator for a new step, if you want to wrap a step
271 271 from an extension, change the pushdiscovery dictionary directly."""
272 272 def dec(func):
273 273 assert stepname not in pushdiscoverymapping
274 274 pushdiscoverymapping[stepname] = func
275 275 pushdiscoveryorder.append(stepname)
276 276 return func
277 277 return dec
278 278
279 279 def _pushdiscovery(pushop):
280 280 """Run all discovery steps"""
281 281 for stepname in pushdiscoveryorder:
282 282 step = pushdiscoverymapping[stepname]
283 283 step(pushop)
284 284
285 285 @pushdiscovery('changeset')
286 286 def _pushdiscoverychangeset(pushop):
287 287 """discover the changeset that need to be pushed"""
288 288 fci = discovery.findcommonincoming
289 289 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
290 290 common, inc, remoteheads = commoninc
291 291 fco = discovery.findcommonoutgoing
292 292 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
293 293 commoninc=commoninc, force=pushop.force)
294 294 pushop.outgoing = outgoing
295 295 pushop.remoteheads = remoteheads
296 296 pushop.incoming = inc
297 297
298 298 @pushdiscovery('phase')
299 299 def _pushdiscoveryphase(pushop):
300 300 """discover the phase that needs to be pushed
301 301
302 302 (computed for both success and failure case for changesets push)"""
303 303 outgoing = pushop.outgoing
304 304 unfi = pushop.repo.unfiltered()
305 305 remotephases = pushop.remote.listkeys('phases')
306 306 publishing = remotephases.get('publishing', False)
307 307 ana = phases.analyzeremotephases(pushop.repo,
308 308 pushop.fallbackheads,
309 309 remotephases)
310 310 pheads, droots = ana
311 311 extracond = ''
312 312 if not publishing:
313 313 extracond = ' and public()'
314 314 revset = 'heads((%%ln::%%ln) %s)' % extracond
315 315 # Get the list of all revs draft on remote by public here.
316 316 # XXX Beware that revset break if droots is not strictly
317 317 # XXX root we may want to ensure it is but it is costly
318 318 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
319 319 if not outgoing.missing:
320 320 future = fallback
321 321 else:
322 322 # adds changeset we are going to push as draft
323 323 #
324 324 # should not be necessary for publishing server, but because of an
325 325 # issue fixed in xxxxx we have to do it anyway.
326 326 fdroots = list(unfi.set('roots(%ln + %ln::)',
327 327 outgoing.missing, droots))
328 328 fdroots = [f.node() for f in fdroots]
329 329 future = list(unfi.set(revset, fdroots, pushop.futureheads))
330 330 pushop.outdatedphases = future
331 331 pushop.fallbackoutdatedphases = fallback
332 332
333 333 @pushdiscovery('obsmarker')
334 334 def _pushdiscoveryobsmarkers(pushop):
335 335 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
336 336 and pushop.repo.obsstore
337 337 and 'obsolete' in pushop.remote.listkeys('namespaces')):
338 338 repo = pushop.repo
339 339 # very naive computation, that can be quite expensive on big repo.
340 340 # However: evolution is currently slow on them anyway.
341 341 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
342 342 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
343 343
344 344 @pushdiscovery('bookmarks')
345 345 def _pushdiscoverybookmarks(pushop):
346 346 ui = pushop.ui
347 347 repo = pushop.repo.unfiltered()
348 348 remote = pushop.remote
349 349 ui.debug("checking for updated bookmarks\n")
350 350 ancestors = ()
351 351 if pushop.revs:
352 352 revnums = map(repo.changelog.rev, pushop.revs)
353 353 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
354 354 remotebookmark = remote.listkeys('bookmarks')
355 355
356 356 explicit = set(pushop.bookmarks)
357 357
358 358 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
359 359 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
360 360 for b, scid, dcid in advsrc:
361 361 if b in explicit:
362 362 explicit.remove(b)
363 363 if not ancestors or repo[scid].rev() in ancestors:
364 364 pushop.outbookmarks.append((b, dcid, scid))
365 365 # search added bookmark
366 366 for b, scid, dcid in addsrc:
367 367 if b in explicit:
368 368 explicit.remove(b)
369 369 pushop.outbookmarks.append((b, '', scid))
370 370 # search for overwritten bookmark
371 371 for b, scid, dcid in advdst + diverge + differ:
372 372 if b in explicit:
373 373 explicit.remove(b)
374 374 pushop.outbookmarks.append((b, dcid, scid))
375 375 # search for bookmark to delete
376 376 for b, scid, dcid in adddst:
377 377 if b in explicit:
378 378 explicit.remove(b)
379 379 # treat as "deleted locally"
380 380 pushop.outbookmarks.append((b, dcid, ''))
381 381 # identical bookmarks shouldn't get reported
382 382 for b, scid, dcid in same:
383 383 if b in explicit:
384 384 explicit.remove(b)
385 385
386 386 if explicit:
387 387 explicit = sorted(explicit)
388 388 # we should probably list all of them
389 389 ui.warn(_('bookmark %s does not exist on the local '
390 390 'or remote repository!\n') % explicit[0])
391 391 pushop.bkresult = 2
392 392
393 393 pushop.outbookmarks.sort()
394 394
395 395 def _pushcheckoutgoing(pushop):
396 396 outgoing = pushop.outgoing
397 397 unfi = pushop.repo.unfiltered()
398 398 if not outgoing.missing:
399 399 # nothing to push
400 400 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
401 401 return False
402 402 # something to push
403 403 if not pushop.force:
404 404 # if repo.obsstore == False --> no obsolete
405 405 # then, save the iteration
406 406 if unfi.obsstore:
407 407 # this message are here for 80 char limit reason
408 408 mso = _("push includes obsolete changeset: %s!")
409 409 mst = {"unstable": _("push includes unstable changeset: %s!"),
410 410 "bumped": _("push includes bumped changeset: %s!"),
411 411 "divergent": _("push includes divergent changeset: %s!")}
412 412 # If we are to push if there is at least one
413 413 # obsolete or unstable changeset in missing, at
414 414 # least one of the missinghead will be obsolete or
415 415 # unstable. So checking heads only is ok
416 416 for node in outgoing.missingheads:
417 417 ctx = unfi[node]
418 418 if ctx.obsolete():
419 419 raise util.Abort(mso % ctx)
420 420 elif ctx.troubled():
421 421 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
422 422 newbm = pushop.ui.configlist('bookmarks', 'pushing')
423 423 discovery.checkheads(unfi, pushop.remote, outgoing,
424 424 pushop.remoteheads,
425 425 pushop.newbranch,
426 426 bool(pushop.incoming),
427 427 newbm)
428 428 return True
429 429
430 430 # List of names of steps to perform for an outgoing bundle2, order matters.
431 431 b2partsgenorder = []
432 432
433 433 # Mapping between step name and function
434 434 #
435 435 # This exists to help extensions wrap steps if necessary
436 436 b2partsgenmapping = {}
437 437
438 438 def b2partsgenerator(stepname, idx=None):
439 439 """decorator for function generating bundle2 part
440 440
441 441 The function is added to the step -> function mapping and appended to the
442 442 list of steps. Beware that decorated functions will be added in order
443 443 (this may matter).
444 444
445 445 You can only use this decorator for new steps, if you want to wrap a step
446 446 from an extension, attack the b2partsgenmapping dictionary directly."""
447 447 def dec(func):
448 448 assert stepname not in b2partsgenmapping
449 449 b2partsgenmapping[stepname] = func
450 450 if idx is None:
451 451 b2partsgenorder.append(stepname)
452 452 else:
453 453 b2partsgenorder.insert(idx, stepname)
454 454 return func
455 455 return dec
456 456
457 457 @b2partsgenerator('changeset')
458 458 def _pushb2ctx(pushop, bundler):
459 459 """handle changegroup push through bundle2
460 460
461 461 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
462 462 """
463 463 if 'changesets' in pushop.stepsdone:
464 464 return
465 465 pushop.stepsdone.add('changesets')
466 466 # Send known heads to the server for race detection.
467 467 if not _pushcheckoutgoing(pushop):
468 468 return
469 469 pushop.repo.prepushoutgoinghooks(pushop.repo,
470 470 pushop.remote,
471 471 pushop.outgoing)
472 472 if not pushop.force:
473 473 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
474 474 b2caps = bundle2.bundle2caps(pushop.remote)
475 475 version = None
476 476 cgversions = b2caps.get('changegroup')
477 477 if not cgversions: # 3.1 and 3.2 ship with an empty value
478 478 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
479 479 pushop.outgoing)
480 480 else:
481 481 cgversions = [v for v in cgversions if v in changegroup.packermap]
482 482 if not cgversions:
483 483 raise ValueError(_('no common changegroup version'))
484 484 version = max(cgversions)
485 485 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
486 486 pushop.outgoing,
487 487 version=version)
488 488 cgpart = bundler.newpart('changegroup', data=cg)
489 489 if version is not None:
490 490 cgpart.addparam('version', version)
491 491 def handlereply(op):
492 492 """extract addchangegroup returns from server reply"""
493 493 cgreplies = op.records.getreplies(cgpart.id)
494 494 assert len(cgreplies['changegroup']) == 1
495 495 pushop.cgresult = cgreplies['changegroup'][0]['return']
496 496 return handlereply
497 497
498 498 @b2partsgenerator('phase')
499 499 def _pushb2phases(pushop, bundler):
500 500 """handle phase push through bundle2"""
501 501 if 'phases' in pushop.stepsdone:
502 502 return
503 503 b2caps = bundle2.bundle2caps(pushop.remote)
504 504 if not 'pushkey' in b2caps:
505 505 return
506 506 pushop.stepsdone.add('phases')
507 507 part2node = []
508 508 enc = pushkey.encode
509 509 for newremotehead in pushop.outdatedphases:
510 510 part = bundler.newpart('pushkey')
511 511 part.addparam('namespace', enc('phases'))
512 512 part.addparam('key', enc(newremotehead.hex()))
513 513 part.addparam('old', enc(str(phases.draft)))
514 514 part.addparam('new', enc(str(phases.public)))
515 515 part2node.append((part.id, newremotehead))
516 516 def handlereply(op):
517 517 for partid, node in part2node:
518 518 partrep = op.records.getreplies(partid)
519 519 results = partrep['pushkey']
520 520 assert len(results) <= 1
521 521 msg = None
522 522 if not results:
523 523 msg = _('server ignored update of %s to public!\n') % node
524 524 elif not int(results[0]['return']):
525 525 msg = _('updating %s to public failed!\n') % node
526 526 if msg is not None:
527 527 pushop.ui.warn(msg)
528 528 return handlereply
529 529
530 530 @b2partsgenerator('obsmarkers')
531 531 def _pushb2obsmarkers(pushop, bundler):
532 532 if 'obsmarkers' in pushop.stepsdone:
533 533 return
534 534 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
535 535 if obsolete.commonversion(remoteversions) is None:
536 536 return
537 537 pushop.stepsdone.add('obsmarkers')
538 538 if pushop.outobsmarkers:
539 buildobsmarkerspart(bundler, pushop.outobsmarkers)
539 markers = sorted(pushop.outobsmarkers)
540 buildobsmarkerspart(bundler, markers)
540 541
541 542 @b2partsgenerator('bookmarks')
542 543 def _pushb2bookmarks(pushop, bundler):
543 544 """handle phase push through bundle2"""
544 545 if 'bookmarks' in pushop.stepsdone:
545 546 return
546 547 b2caps = bundle2.bundle2caps(pushop.remote)
547 548 if 'pushkey' not in b2caps:
548 549 return
549 550 pushop.stepsdone.add('bookmarks')
550 551 part2book = []
551 552 enc = pushkey.encode
552 553 for book, old, new in pushop.outbookmarks:
553 554 part = bundler.newpart('pushkey')
554 555 part.addparam('namespace', enc('bookmarks'))
555 556 part.addparam('key', enc(book))
556 557 part.addparam('old', enc(old))
557 558 part.addparam('new', enc(new))
558 559 action = 'update'
559 560 if not old:
560 561 action = 'export'
561 562 elif not new:
562 563 action = 'delete'
563 564 part2book.append((part.id, book, action))
564 565
565 566
566 567 def handlereply(op):
567 568 ui = pushop.ui
568 569 for partid, book, action in part2book:
569 570 partrep = op.records.getreplies(partid)
570 571 results = partrep['pushkey']
571 572 assert len(results) <= 1
572 573 if not results:
573 574 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
574 575 else:
575 576 ret = int(results[0]['return'])
576 577 if ret:
577 578 ui.status(bookmsgmap[action][0] % book)
578 579 else:
579 580 ui.warn(bookmsgmap[action][1] % book)
580 581 if pushop.bkresult is not None:
581 582 pushop.bkresult = 1
582 583 return handlereply
583 584
584 585
585 586 def _pushbundle2(pushop):
586 587 """push data to the remote using bundle2
587 588
588 589 The only currently supported type of data is changegroup but this will
589 590 evolve in the future."""
590 591 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
591 592 pushback = (pushop.trmanager
592 593 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
593 594
594 595 # create reply capability
595 596 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
596 597 allowpushback=pushback))
597 598 bundler.newpart('replycaps', data=capsblob)
598 599 replyhandlers = []
599 600 for partgenname in b2partsgenorder:
600 601 partgen = b2partsgenmapping[partgenname]
601 602 ret = partgen(pushop, bundler)
602 603 if callable(ret):
603 604 replyhandlers.append(ret)
604 605 # do not push if nothing to push
605 606 if bundler.nbparts <= 1:
606 607 return
607 608 stream = util.chunkbuffer(bundler.getchunks())
608 609 try:
609 610 reply = pushop.remote.unbundle(stream, ['force'], 'push')
610 611 except error.BundleValueError, exc:
611 612 raise util.Abort('missing support for %s' % exc)
612 613 try:
613 614 trgetter = None
614 615 if pushback:
615 616 trgetter = pushop.trmanager.transaction
616 617 op = bundle2.processbundle(pushop.repo, reply, trgetter)
617 618 except error.BundleValueError, exc:
618 619 raise util.Abort('missing support for %s' % exc)
619 620 for rephand in replyhandlers:
620 621 rephand(op)
621 622
622 623 def _pushchangeset(pushop):
623 624 """Make the actual push of changeset bundle to remote repo"""
624 625 if 'changesets' in pushop.stepsdone:
625 626 return
626 627 pushop.stepsdone.add('changesets')
627 628 if not _pushcheckoutgoing(pushop):
628 629 return
629 630 pushop.repo.prepushoutgoinghooks(pushop.repo,
630 631 pushop.remote,
631 632 pushop.outgoing)
632 633 outgoing = pushop.outgoing
633 634 unbundle = pushop.remote.capable('unbundle')
634 635 # TODO: get bundlecaps from remote
635 636 bundlecaps = None
636 637 # create a changegroup from local
637 638 if pushop.revs is None and not (outgoing.excluded
638 639 or pushop.repo.changelog.filteredrevs):
639 640 # push everything,
640 641 # use the fast path, no race possible on push
641 642 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
642 643 cg = changegroup.getsubset(pushop.repo,
643 644 outgoing,
644 645 bundler,
645 646 'push',
646 647 fastpath=True)
647 648 else:
648 649 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
649 650 bundlecaps)
650 651
651 652 # apply changegroup to remote
652 653 if unbundle:
653 654 # local repo finds heads on server, finds out what
654 655 # revs it must push. once revs transferred, if server
655 656 # finds it has different heads (someone else won
656 657 # commit/push race), server aborts.
657 658 if pushop.force:
658 659 remoteheads = ['force']
659 660 else:
660 661 remoteheads = pushop.remoteheads
661 662 # ssh: return remote's addchangegroup()
662 663 # http: return remote's addchangegroup() or 0 for error
663 664 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
664 665 pushop.repo.url())
665 666 else:
666 667 # we return an integer indicating remote head count
667 668 # change
668 669 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
669 670 pushop.repo.url())
670 671
671 672 def _pushsyncphase(pushop):
672 673 """synchronise phase information locally and remotely"""
673 674 cheads = pushop.commonheads
674 675 # even when we don't push, exchanging phase data is useful
675 676 remotephases = pushop.remote.listkeys('phases')
676 677 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
677 678 and remotephases # server supports phases
678 679 and pushop.cgresult is None # nothing was pushed
679 680 and remotephases.get('publishing', False)):
680 681 # When:
681 682 # - this is a subrepo push
682 683 # - and remote support phase
683 684 # - and no changeset was pushed
684 685 # - and remote is publishing
685 686 # We may be in issue 3871 case!
686 687 # We drop the possible phase synchronisation done by
687 688 # courtesy to publish changesets possibly locally draft
688 689 # on the remote.
689 690 remotephases = {'publishing': 'True'}
690 691 if not remotephases: # old server or public only reply from non-publishing
691 692 _localphasemove(pushop, cheads)
692 693 # don't push any phase data as there is nothing to push
693 694 else:
694 695 ana = phases.analyzeremotephases(pushop.repo, cheads,
695 696 remotephases)
696 697 pheads, droots = ana
697 698 ### Apply remote phase on local
698 699 if remotephases.get('publishing', False):
699 700 _localphasemove(pushop, cheads)
700 701 else: # publish = False
701 702 _localphasemove(pushop, pheads)
702 703 _localphasemove(pushop, cheads, phases.draft)
703 704 ### Apply local phase on remote
704 705
705 706 if pushop.cgresult:
706 707 if 'phases' in pushop.stepsdone:
707 708 # phases already pushed though bundle2
708 709 return
709 710 outdated = pushop.outdatedphases
710 711 else:
711 712 outdated = pushop.fallbackoutdatedphases
712 713
713 714 pushop.stepsdone.add('phases')
714 715
715 716 # filter heads already turned public by the push
716 717 outdated = [c for c in outdated if c.node() not in pheads]
717 718 # fallback to independent pushkey command
718 719 for newremotehead in outdated:
719 720 r = pushop.remote.pushkey('phases',
720 721 newremotehead.hex(),
721 722 str(phases.draft),
722 723 str(phases.public))
723 724 if not r:
724 725 pushop.ui.warn(_('updating %s to public failed!\n')
725 726 % newremotehead)
726 727
727 728 def _localphasemove(pushop, nodes, phase=phases.public):
728 729 """move <nodes> to <phase> in the local source repo"""
729 730 if pushop.trmanager:
730 731 phases.advanceboundary(pushop.repo,
731 732 pushop.trmanager.transaction(),
732 733 phase,
733 734 nodes)
734 735 else:
735 736 # repo is not locked, do not change any phases!
736 737 # Informs the user that phases should have been moved when
737 738 # applicable.
738 739 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
739 740 phasestr = phases.phasenames[phase]
740 741 if actualmoves:
741 742 pushop.ui.status(_('cannot lock source repo, skipping '
742 743 'local %s phase update\n') % phasestr)
743 744
744 745 def _pushobsolete(pushop):
745 746 """utility function to push obsolete markers to a remote"""
746 747 if 'obsmarkers' in pushop.stepsdone:
747 748 return
748 749 pushop.ui.debug('try to push obsolete markers to remote\n')
749 750 repo = pushop.repo
750 751 remote = pushop.remote
751 752 pushop.stepsdone.add('obsmarkers')
752 753 if pushop.outobsmarkers:
753 754 rslts = []
754 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
755 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
755 756 for key in sorted(remotedata, reverse=True):
756 757 # reverse sort to ensure we end with dump0
757 758 data = remotedata[key]
758 759 rslts.append(remote.pushkey('obsolete', key, '', data))
759 760 if [r for r in rslts if not r]:
760 761 msg = _('failed to push some obsolete markers!\n')
761 762 repo.ui.warn(msg)
762 763
763 764 def _pushbookmark(pushop):
764 765 """Update bookmark position on remote"""
765 766 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
766 767 return
767 768 pushop.stepsdone.add('bookmarks')
768 769 ui = pushop.ui
769 770 remote = pushop.remote
770 771
771 772 for b, old, new in pushop.outbookmarks:
772 773 action = 'update'
773 774 if not old:
774 775 action = 'export'
775 776 elif not new:
776 777 action = 'delete'
777 778 if remote.pushkey('bookmarks', b, old, new):
778 779 ui.status(bookmsgmap[action][0] % b)
779 780 else:
780 781 ui.warn(bookmsgmap[action][1] % b)
781 782 # discovery can have set the value form invalid entry
782 783 if pushop.bkresult is not None:
783 784 pushop.bkresult = 1
784 785
785 786 class pulloperation(object):
786 787 """A object that represent a single pull operation
787 788
788 789 It purpose is to carry pull related state and very common operation.
789 790
790 791 A new should be created at the beginning of each pull and discarded
791 792 afterward.
792 793 """
793 794
794 795 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
795 796 # repo we pull into
796 797 self.repo = repo
797 798 # repo we pull from
798 799 self.remote = remote
799 800 # revision we try to pull (None is "all")
800 801 self.heads = heads
801 802 # bookmark pulled explicitly
802 803 self.explicitbookmarks = bookmarks
803 804 # do we force pull?
804 805 self.force = force
805 806 # transaction manager
806 807 self.trmanager = None
807 808 # set of common changeset between local and remote before pull
808 809 self.common = None
809 810 # set of pulled head
810 811 self.rheads = None
811 812 # list of missing changeset to fetch remotely
812 813 self.fetch = None
813 814 # remote bookmarks data
814 815 self.remotebookmarks = None
815 816 # result of changegroup pulling (used as return code by pull)
816 817 self.cgresult = None
817 818 # list of step already done
818 819 self.stepsdone = set()
819 820
820 821 @util.propertycache
821 822 def pulledsubset(self):
822 823 """heads of the set of changeset target by the pull"""
823 824 # compute target subset
824 825 if self.heads is None:
825 826 # We pulled every thing possible
826 827 # sync on everything common
827 828 c = set(self.common)
828 829 ret = list(self.common)
829 830 for n in self.rheads:
830 831 if n not in c:
831 832 ret.append(n)
832 833 return ret
833 834 else:
834 835 # We pulled a specific subset
835 836 # sync on this subset
836 837 return self.heads
837 838
838 839 def gettransaction(self):
839 840 # deprecated; talk to trmanager directly
840 841 return self.trmanager.transaction()
841 842
842 843 class transactionmanager(object):
843 844 """An object to manage the life cycle of a transaction
844 845
845 846 It creates the transaction on demand and calls the appropriate hooks when
846 847 closing the transaction."""
847 848 def __init__(self, repo, source, url):
848 849 self.repo = repo
849 850 self.source = source
850 851 self.url = url
851 852 self._tr = None
852 853
853 854 def transaction(self):
854 855 """Return an open transaction object, constructing if necessary"""
855 856 if not self._tr:
856 857 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
857 858 self._tr = self.repo.transaction(trname)
858 859 self._tr.hookargs['source'] = self.source
859 860 self._tr.hookargs['url'] = self.url
860 861 return self._tr
861 862
862 863 def close(self):
863 864 """close transaction if created"""
864 865 if self._tr is not None:
865 866 self._tr.close()
866 867
867 868 def release(self):
868 869 """release transaction if created"""
869 870 if self._tr is not None:
870 871 self._tr.release()
871 872
872 873 def pull(repo, remote, heads=None, force=False, bookmarks=()):
873 874 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
874 875 if pullop.remote.local():
875 876 missing = set(pullop.remote.requirements) - pullop.repo.supported
876 877 if missing:
877 878 msg = _("required features are not"
878 879 " supported in the destination:"
879 880 " %s") % (', '.join(sorted(missing)))
880 881 raise util.Abort(msg)
881 882
882 883 pullop.remotebookmarks = remote.listkeys('bookmarks')
883 884 lock = pullop.repo.lock()
884 885 try:
885 886 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
886 887 _pulldiscovery(pullop)
887 888 if _canusebundle2(pullop):
888 889 _pullbundle2(pullop)
889 890 _pullchangeset(pullop)
890 891 _pullphase(pullop)
891 892 _pullbookmarks(pullop)
892 893 _pullobsolete(pullop)
893 894 pullop.trmanager.close()
894 895 finally:
895 896 pullop.trmanager.release()
896 897 lock.release()
897 898
898 899 return pullop
899 900
900 901 # list of steps to perform discovery before pull
901 902 pulldiscoveryorder = []
902 903
903 904 # Mapping between step name and function
904 905 #
905 906 # This exists to help extensions wrap steps if necessary
906 907 pulldiscoverymapping = {}
907 908
908 909 def pulldiscovery(stepname):
909 910 """decorator for function performing discovery before pull
910 911
911 912 The function is added to the step -> function mapping and appended to the
912 913 list of steps. Beware that decorated function will be added in order (this
913 914 may matter).
914 915
915 916 You can only use this decorator for a new step, if you want to wrap a step
916 917 from an extension, change the pulldiscovery dictionary directly."""
917 918 def dec(func):
918 919 assert stepname not in pulldiscoverymapping
919 920 pulldiscoverymapping[stepname] = func
920 921 pulldiscoveryorder.append(stepname)
921 922 return func
922 923 return dec
923 924
924 925 def _pulldiscovery(pullop):
925 926 """Run all discovery steps"""
926 927 for stepname in pulldiscoveryorder:
927 928 step = pulldiscoverymapping[stepname]
928 929 step(pullop)
929 930
930 931 @pulldiscovery('changegroup')
931 932 def _pulldiscoverychangegroup(pullop):
932 933 """discovery phase for the pull
933 934
934 935 Current handle changeset discovery only, will change handle all discovery
935 936 at some point."""
936 937 tmp = discovery.findcommonincoming(pullop.repo,
937 938 pullop.remote,
938 939 heads=pullop.heads,
939 940 force=pullop.force)
940 941 common, fetch, rheads = tmp
941 942 nm = pullop.repo.unfiltered().changelog.nodemap
942 943 if fetch and rheads:
943 944 # If a remote heads in filtered locally, lets drop it from the unknown
944 945 # remote heads and put in back in common.
945 946 #
946 947 # This is a hackish solution to catch most of "common but locally
947 948 # hidden situation". We do not performs discovery on unfiltered
948 949 # repository because it end up doing a pathological amount of round
949 950 # trip for w huge amount of changeset we do not care about.
950 951 #
951 952 # If a set of such "common but filtered" changeset exist on the server
952 953 # but are not including a remote heads, we'll not be able to detect it,
953 954 scommon = set(common)
954 955 filteredrheads = []
955 956 for n in rheads:
956 957 if n in nm:
957 958 if n not in scommon:
958 959 common.append(n)
959 960 else:
960 961 filteredrheads.append(n)
961 962 if not filteredrheads:
962 963 fetch = []
963 964 rheads = filteredrheads
964 965 pullop.common = common
965 966 pullop.fetch = fetch
966 967 pullop.rheads = rheads
967 968
968 969 def _pullbundle2(pullop):
969 970 """pull data using bundle2
970 971
971 972 For now, the only supported data are changegroup."""
972 973 remotecaps = bundle2.bundle2caps(pullop.remote)
973 974 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
974 975 # pulling changegroup
975 976 pullop.stepsdone.add('changegroup')
976 977
977 978 kwargs['common'] = pullop.common
978 979 kwargs['heads'] = pullop.heads or pullop.rheads
979 980 kwargs['cg'] = pullop.fetch
980 981 if 'listkeys' in remotecaps:
981 982 kwargs['listkeys'] = ['phase', 'bookmarks']
982 983 if not pullop.fetch:
983 984 pullop.repo.ui.status(_("no changes found\n"))
984 985 pullop.cgresult = 0
985 986 else:
986 987 if pullop.heads is None and list(pullop.common) == [nullid]:
987 988 pullop.repo.ui.status(_("requesting all changes\n"))
988 989 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
989 990 remoteversions = bundle2.obsmarkersversion(remotecaps)
990 991 if obsolete.commonversion(remoteversions) is not None:
991 992 kwargs['obsmarkers'] = True
992 993 pullop.stepsdone.add('obsmarkers')
993 994 _pullbundle2extraprepare(pullop, kwargs)
994 995 bundle = pullop.remote.getbundle('pull', **kwargs)
995 996 try:
996 997 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
997 998 except error.BundleValueError, exc:
998 999 raise util.Abort('missing support for %s' % exc)
999 1000
1000 1001 if pullop.fetch:
1001 1002 results = [cg['return'] for cg in op.records['changegroup']]
1002 1003 pullop.cgresult = changegroup.combineresults(results)
1003 1004
1004 1005 # processing phases change
1005 1006 for namespace, value in op.records['listkeys']:
1006 1007 if namespace == 'phases':
1007 1008 _pullapplyphases(pullop, value)
1008 1009
1009 1010 # processing bookmark update
1010 1011 for namespace, value in op.records['listkeys']:
1011 1012 if namespace == 'bookmarks':
1012 1013 pullop.remotebookmarks = value
1013 1014 _pullbookmarks(pullop)
1014 1015
1015 1016 def _pullbundle2extraprepare(pullop, kwargs):
1016 1017 """hook function so that extensions can extend the getbundle call"""
1017 1018 pass
1018 1019
1019 1020 def _pullchangeset(pullop):
1020 1021 """pull changeset from unbundle into the local repo"""
1021 1022 # We delay the open of the transaction as late as possible so we
1022 1023 # don't open transaction for nothing or you break future useful
1023 1024 # rollback call
1024 1025 if 'changegroup' in pullop.stepsdone:
1025 1026 return
1026 1027 pullop.stepsdone.add('changegroup')
1027 1028 if not pullop.fetch:
1028 1029 pullop.repo.ui.status(_("no changes found\n"))
1029 1030 pullop.cgresult = 0
1030 1031 return
1031 1032 pullop.gettransaction()
1032 1033 if pullop.heads is None and list(pullop.common) == [nullid]:
1033 1034 pullop.repo.ui.status(_("requesting all changes\n"))
1034 1035 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1035 1036 # issue1320, avoid a race if remote changed after discovery
1036 1037 pullop.heads = pullop.rheads
1037 1038
1038 1039 if pullop.remote.capable('getbundle'):
1039 1040 # TODO: get bundlecaps from remote
1040 1041 cg = pullop.remote.getbundle('pull', common=pullop.common,
1041 1042 heads=pullop.heads or pullop.rheads)
1042 1043 elif pullop.heads is None:
1043 1044 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1044 1045 elif not pullop.remote.capable('changegroupsubset'):
1045 1046 raise util.Abort(_("partial pull cannot be done because "
1046 1047 "other repository doesn't support "
1047 1048 "changegroupsubset."))
1048 1049 else:
1049 1050 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1050 1051 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1051 1052 pullop.remote.url())
1052 1053
1053 1054 def _pullphase(pullop):
1054 1055 # Get remote phases data from remote
1055 1056 if 'phases' in pullop.stepsdone:
1056 1057 return
1057 1058 remotephases = pullop.remote.listkeys('phases')
1058 1059 _pullapplyphases(pullop, remotephases)
1059 1060
1060 1061 def _pullapplyphases(pullop, remotephases):
1061 1062 """apply phase movement from observed remote state"""
1062 1063 if 'phases' in pullop.stepsdone:
1063 1064 return
1064 1065 pullop.stepsdone.add('phases')
1065 1066 publishing = bool(remotephases.get('publishing', False))
1066 1067 if remotephases and not publishing:
1067 1068 # remote is new and unpublishing
1068 1069 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1069 1070 pullop.pulledsubset,
1070 1071 remotephases)
1071 1072 dheads = pullop.pulledsubset
1072 1073 else:
1073 1074 # Remote is old or publishing all common changesets
1074 1075 # should be seen as public
1075 1076 pheads = pullop.pulledsubset
1076 1077 dheads = []
1077 1078 unfi = pullop.repo.unfiltered()
1078 1079 phase = unfi._phasecache.phase
1079 1080 rev = unfi.changelog.nodemap.get
1080 1081 public = phases.public
1081 1082 draft = phases.draft
1082 1083
1083 1084 # exclude changesets already public locally and update the others
1084 1085 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1085 1086 if pheads:
1086 1087 tr = pullop.gettransaction()
1087 1088 phases.advanceboundary(pullop.repo, tr, public, pheads)
1088 1089
1089 1090 # exclude changesets already draft locally and update the others
1090 1091 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1091 1092 if dheads:
1092 1093 tr = pullop.gettransaction()
1093 1094 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1094 1095
1095 1096 def _pullbookmarks(pullop):
1096 1097 """process the remote bookmark information to update the local one"""
1097 1098 if 'bookmarks' in pullop.stepsdone:
1098 1099 return
1099 1100 pullop.stepsdone.add('bookmarks')
1100 1101 repo = pullop.repo
1101 1102 remotebookmarks = pullop.remotebookmarks
1102 1103 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1103 1104 pullop.remote.url(),
1104 1105 pullop.gettransaction,
1105 1106 explicit=pullop.explicitbookmarks)
1106 1107
1107 1108 def _pullobsolete(pullop):
1108 1109 """utility function to pull obsolete markers from a remote
1109 1110
1110 1111 The `gettransaction` is function that return the pull transaction, creating
1111 1112 one if necessary. We return the transaction to inform the calling code that
1112 1113 a new transaction have been created (when applicable).
1113 1114
1114 1115 Exists mostly to allow overriding for experimentation purpose"""
1115 1116 if 'obsmarkers' in pullop.stepsdone:
1116 1117 return
1117 1118 pullop.stepsdone.add('obsmarkers')
1118 1119 tr = None
1119 1120 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1120 1121 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1121 1122 remoteobs = pullop.remote.listkeys('obsolete')
1122 1123 if 'dump0' in remoteobs:
1123 1124 tr = pullop.gettransaction()
1124 1125 for key in sorted(remoteobs, reverse=True):
1125 1126 if key.startswith('dump'):
1126 1127 data = base85.b85decode(remoteobs[key])
1127 1128 pullop.repo.obsstore.mergemarkers(tr, data)
1128 1129 pullop.repo.invalidatevolatilesets()
1129 1130 return tr
1130 1131
1131 1132 def caps20to10(repo):
1132 1133 """return a set with appropriate options to use bundle20 during getbundle"""
1133 1134 caps = set(['HG20'])
1134 1135 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1135 1136 caps.add('bundle2=' + urllib.quote(capsblob))
1136 1137 return caps
1137 1138
1138 1139 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1139 1140 getbundle2partsorder = []
1140 1141
1141 1142 # Mapping between step name and function
1142 1143 #
1143 1144 # This exists to help extensions wrap steps if necessary
1144 1145 getbundle2partsmapping = {}
1145 1146
1146 1147 def getbundle2partsgenerator(stepname, idx=None):
1147 1148 """decorator for function generating bundle2 part for getbundle
1148 1149
1149 1150 The function is added to the step -> function mapping and appended to the
1150 1151 list of steps. Beware that decorated functions will be added in order
1151 1152 (this may matter).
1152 1153
1153 1154 You can only use this decorator for new steps, if you want to wrap a step
1154 1155 from an extension, attack the getbundle2partsmapping dictionary directly."""
1155 1156 def dec(func):
1156 1157 assert stepname not in getbundle2partsmapping
1157 1158 getbundle2partsmapping[stepname] = func
1158 1159 if idx is None:
1159 1160 getbundle2partsorder.append(stepname)
1160 1161 else:
1161 1162 getbundle2partsorder.insert(idx, stepname)
1162 1163 return func
1163 1164 return dec
1164 1165
1165 1166 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1166 1167 **kwargs):
1167 1168 """return a full bundle (with potentially multiple kind of parts)
1168 1169
1169 1170 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1170 1171 passed. For now, the bundle can contain only changegroup, but this will
1171 1172 changes when more part type will be available for bundle2.
1172 1173
1173 1174 This is different from changegroup.getchangegroup that only returns an HG10
1174 1175 changegroup bundle. They may eventually get reunited in the future when we
1175 1176 have a clearer idea of the API we what to query different data.
1176 1177
1177 1178 The implementation is at a very early stage and will get massive rework
1178 1179 when the API of bundle is refined.
1179 1180 """
1180 1181 # bundle10 case
1181 1182 usebundle2 = False
1182 1183 if bundlecaps is not None:
1183 1184 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1184 1185 if not usebundle2:
1185 1186 if bundlecaps and not kwargs.get('cg', True):
1186 1187 raise ValueError(_('request for bundle10 must include changegroup'))
1187 1188
1188 1189 if kwargs:
1189 1190 raise ValueError(_('unsupported getbundle arguments: %s')
1190 1191 % ', '.join(sorted(kwargs.keys())))
1191 1192 return changegroup.getchangegroup(repo, source, heads=heads,
1192 1193 common=common, bundlecaps=bundlecaps)
1193 1194
1194 1195 # bundle20 case
1195 1196 b2caps = {}
1196 1197 for bcaps in bundlecaps:
1197 1198 if bcaps.startswith('bundle2='):
1198 1199 blob = urllib.unquote(bcaps[len('bundle2='):])
1199 1200 b2caps.update(bundle2.decodecaps(blob))
1200 1201 bundler = bundle2.bundle20(repo.ui, b2caps)
1201 1202
1202 1203 kwargs['heads'] = heads
1203 1204 kwargs['common'] = common
1204 1205
1205 1206 for name in getbundle2partsorder:
1206 1207 func = getbundle2partsmapping[name]
1207 1208 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1208 1209 **kwargs)
1209 1210
1210 1211 return util.chunkbuffer(bundler.getchunks())
1211 1212
1212 1213 @getbundle2partsgenerator('changegroup')
1213 1214 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1214 1215 b2caps=None, heads=None, common=None, **kwargs):
1215 1216 """add a changegroup part to the requested bundle"""
1216 1217 cg = None
1217 1218 if kwargs.get('cg', True):
1218 1219 # build changegroup bundle here.
1219 1220 version = None
1220 1221 cgversions = b2caps.get('changegroup')
1221 1222 if not cgversions: # 3.1 and 3.2 ship with an empty value
1222 1223 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1223 1224 common=common,
1224 1225 bundlecaps=bundlecaps)
1225 1226 else:
1226 1227 cgversions = [v for v in cgversions if v in changegroup.packermap]
1227 1228 if not cgversions:
1228 1229 raise ValueError(_('no common changegroup version'))
1229 1230 version = max(cgversions)
1230 1231 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1231 1232 common=common,
1232 1233 bundlecaps=bundlecaps,
1233 1234 version=version)
1234 1235
1235 1236 if cg:
1236 1237 part = bundler.newpart('changegroup', data=cg)
1237 1238 if version is not None:
1238 1239 part.addparam('version', version)
1239 1240
1240 1241 @getbundle2partsgenerator('listkeys')
1241 1242 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1242 1243 b2caps=None, **kwargs):
1243 1244 """add parts containing listkeys namespaces to the requested bundle"""
1244 1245 listkeys = kwargs.get('listkeys', ())
1245 1246 for namespace in listkeys:
1246 1247 part = bundler.newpart('listkeys')
1247 1248 part.addparam('namespace', namespace)
1248 1249 keys = repo.listkeys(namespace).items()
1249 1250 part.data = pushkey.encodekeys(keys)
1250 1251
1251 1252 @getbundle2partsgenerator('obsmarkers')
1252 1253 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1253 1254 b2caps=None, heads=None, **kwargs):
1254 1255 """add an obsolescence markers part to the requested bundle"""
1255 1256 if kwargs.get('obsmarkers', False):
1256 1257 if heads is None:
1257 1258 heads = repo.heads()
1258 1259 subset = [c.node() for c in repo.set('::%ln', heads)]
1259 1260 markers = repo.obsstore.relevantmarkers(subset)
1261 markers = sorted(markers)
1260 1262 buildobsmarkerspart(bundler, markers)
1261 1263
1262 1264 def check_heads(repo, their_heads, context):
1263 1265 """check if the heads of a repo have been modified
1264 1266
1265 1267 Used by peer for unbundling.
1266 1268 """
1267 1269 heads = repo.heads()
1268 1270 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1269 1271 if not (their_heads == ['force'] or their_heads == heads or
1270 1272 their_heads == ['hashed', heads_hash]):
1271 1273 # someone else committed/pushed/unbundled while we
1272 1274 # were transferring data
1273 1275 raise error.PushRaced('repository changed while %s - '
1274 1276 'please try again' % context)
1275 1277
1276 1278 def unbundle(repo, cg, heads, source, url):
1277 1279 """Apply a bundle to a repo.
1278 1280
1279 1281 this function makes sure the repo is locked during the application and have
1280 1282 mechanism to check that no push race occurred between the creation of the
1281 1283 bundle and its application.
1282 1284
1283 1285 If the push was raced as PushRaced exception is raised."""
1284 1286 r = 0
1285 1287 # need a transaction when processing a bundle2 stream
1286 1288 wlock = lock = tr = None
1287 1289 recordout = None
1288 1290 # quick fix for output mismatch with bundle2 in 3.4
1289 1291 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1290 1292 False)
1291 1293 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1292 1294 captureoutput = True
1293 1295 try:
1294 1296 check_heads(repo, heads, 'uploading changes')
1295 1297 # push can proceed
1296 1298 if util.safehasattr(cg, 'params'):
1297 1299 r = None
1298 1300 try:
1299 1301 wlock = repo.wlock()
1300 1302 lock = repo.lock()
1301 1303 tr = repo.transaction(source)
1302 1304 tr.hookargs['source'] = source
1303 1305 tr.hookargs['url'] = url
1304 1306 tr.hookargs['bundle2'] = '1'
1305 1307 op = bundle2.bundleoperation(repo, lambda: tr,
1306 1308 captureoutput=captureoutput)
1307 1309 try:
1308 1310 r = bundle2.processbundle(repo, cg, op=op)
1309 1311 finally:
1310 1312 r = op.reply
1311 1313 if captureoutput and r is not None:
1312 1314 repo.ui.pushbuffer(error=True, subproc=True)
1313 1315 def recordout(output):
1314 1316 r.newpart('output', data=output, mandatory=False)
1315 1317 tr.close()
1316 1318 except Exception, exc:
1317 1319 exc.duringunbundle2 = True
1318 1320 if captureoutput and r is not None:
1319 1321 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1320 1322 def recordout(output):
1321 1323 part = bundle2.bundlepart('output', data=output,
1322 1324 mandatory=False)
1323 1325 parts.append(part)
1324 1326 raise
1325 1327 else:
1326 1328 lock = repo.lock()
1327 1329 r = changegroup.addchangegroup(repo, cg, source, url)
1328 1330 finally:
1329 1331 lockmod.release(tr, lock, wlock)
1330 1332 if recordout is not None:
1331 1333 recordout(repo.ui.popbuffer())
1332 1334 return r
@@ -1,1252 +1,1252 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker are used:
46 46
47 47 (A, (C, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 import struct
71 71 import util, base85, node, parsers
72 72 import phases
73 73 from i18n import _
74 74
75 75 _pack = struct.pack
76 76 _unpack = struct.unpack
77 77 _calcsize = struct.calcsize
78 78 propertycache = util.propertycache
79 79
80 80 # the obsolete feature is not mature enough to be enabled by default.
81 81 # you have to rely on third party extension extension to enable this.
82 82 _enabled = False
83 83
84 84 # Options for obsolescence
85 85 createmarkersopt = 'createmarkers'
86 86 allowunstableopt = 'allowunstable'
87 87 exchangeopt = 'exchange'
88 88
89 89 ### obsolescence marker flag
90 90
91 91 ## bumpedfix flag
92 92 #
93 93 # When a changeset A' succeed to a changeset A which became public, we call A'
94 94 # "bumped" because it's a successors of a public changesets
95 95 #
96 96 # o A' (bumped)
97 97 # |`:
98 98 # | o A
99 99 # |/
100 100 # o Z
101 101 #
102 102 # The way to solve this situation is to create a new changeset Ad as children
103 103 # of A. This changeset have the same content than A'. So the diff from A to A'
104 104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 105 #
106 106 # o Ad
107 107 # |`:
108 108 # | x A'
109 109 # |'|
110 110 # o | A
111 111 # |/
112 112 # o Z
113 113 #
114 114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 116 # This flag mean that the successors express the changes between the public and
117 117 # bumped version and fix the situation, breaking the transitivity of
118 118 # "bumped" here.
119 119 bumpedfix = 1
120 120 usingsha256 = 2
121 121
122 122 ## Parsing and writing of version "0"
123 123 #
124 124 # The header is followed by the markers. Each marker is made of:
125 125 #
126 126 # - 1 uint8 : number of new changesets "N", can be zero.
127 127 #
128 128 # - 1 uint32: metadata size "M" in bytes.
129 129 #
130 130 # - 1 byte: a bit field. It is reserved for flags used in common
131 131 # obsolete marker operations, to avoid repeated decoding of metadata
132 132 # entries.
133 133 #
134 134 # - 20 bytes: obsoleted changeset identifier.
135 135 #
136 136 # - N*20 bytes: new changesets identifiers.
137 137 #
138 138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 139 # string contains a key and a value, separated by a colon ':', without
140 140 # additional encoding. Keys cannot contain '\0' or ':' and values
141 141 # cannot contain '\0'.
142 142 _fm0version = 0
143 143 _fm0fixed = '>BIB20s'
144 144 _fm0node = '20s'
145 145 _fm0fsize = _calcsize(_fm0fixed)
146 146 _fm0fnodesize = _calcsize(_fm0node)
147 147
148 148 def _fm0readmarkers(data, off):
149 149 # Loop on markers
150 150 l = len(data)
151 151 while off + _fm0fsize <= l:
152 152 # read fixed part
153 153 cur = data[off:off + _fm0fsize]
154 154 off += _fm0fsize
155 155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 156 # read replacement
157 157 sucs = ()
158 158 if numsuc:
159 159 s = (_fm0fnodesize * numsuc)
160 160 cur = data[off:off + s]
161 161 sucs = _unpack(_fm0node * numsuc, cur)
162 162 off += s
163 163 # read metadata
164 164 # (metadata will be decoded on demand)
165 165 metadata = data[off:off + mdsize]
166 166 if len(metadata) != mdsize:
167 167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 168 'short, %d bytes expected, got %d')
169 169 % (mdsize, len(metadata)))
170 170 off += mdsize
171 171 metadata = _fm0decodemeta(metadata)
172 172 try:
173 173 when, offset = metadata.pop('date', '0 0').split(' ')
174 174 date = float(when), int(offset)
175 175 except ValueError:
176 176 date = (0., 0)
177 177 parents = None
178 178 if 'p2' in metadata:
179 179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 180 elif 'p1' in metadata:
181 181 parents = (metadata.pop('p1', None),)
182 182 elif 'p0' in metadata:
183 183 parents = ()
184 184 if parents is not None:
185 185 try:
186 186 parents = tuple(node.bin(p) for p in parents)
187 187 # if parent content is not a nodeid, drop the data
188 188 for p in parents:
189 189 if len(p) != 20:
190 190 parents = None
191 191 break
192 192 except TypeError:
193 193 # if content cannot be translated to nodeid drop the data.
194 194 parents = None
195 195
196 196 metadata = tuple(sorted(metadata.iteritems()))
197 197
198 198 yield (pre, sucs, flags, metadata, date, parents)
199 199
200 200 def _fm0encodeonemarker(marker):
201 201 pre, sucs, flags, metadata, date, parents = marker
202 202 if flags & usingsha256:
203 203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 204 metadata = dict(metadata)
205 205 time, tz = date
206 206 metadata['date'] = '%r %i' % (time, tz)
207 207 if parents is not None:
208 208 if not parents:
209 209 # mark that we explicitly recorded no parents
210 210 metadata['p0'] = ''
211 211 for i, p in enumerate(parents):
212 212 metadata['p%i' % (i + 1)] = node.hex(p)
213 213 metadata = _fm0encodemeta(metadata)
214 214 numsuc = len(sucs)
215 215 format = _fm0fixed + (_fm0node * numsuc)
216 216 data = [numsuc, len(metadata), flags, pre]
217 217 data.extend(sucs)
218 218 return _pack(format, *data) + metadata
219 219
220 220 def _fm0encodemeta(meta):
221 221 """Return encoded metadata string to string mapping.
222 222
223 223 Assume no ':' in key and no '\0' in both key and value."""
224 224 for key, value in meta.iteritems():
225 225 if ':' in key or '\0' in key:
226 226 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 227 if '\0' in value:
228 228 raise ValueError("':' is forbidden in metadata value'")
229 229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 230
231 231 def _fm0decodemeta(data):
232 232 """Return string to string dictionary from encoded version."""
233 233 d = {}
234 234 for l in data.split('\0'):
235 235 if l:
236 236 key, value = l.split(':')
237 237 d[key] = value
238 238 return d
239 239
240 240 ## Parsing and writing of version "1"
241 241 #
242 242 # The header is followed by the markers. Each marker is made of:
243 243 #
244 244 # - uint32: total size of the marker (including this field)
245 245 #
246 246 # - float64: date in seconds since epoch
247 247 #
248 248 # - int16: timezone offset in minutes
249 249 #
250 250 # - uint16: a bit field. It is reserved for flags used in common
251 251 # obsolete marker operations, to avoid repeated decoding of metadata
252 252 # entries.
253 253 #
254 254 # - uint8: number of successors "N", can be zero.
255 255 #
256 256 # - uint8: number of parents "P", can be zero.
257 257 #
258 258 # 0: parents data stored but no parent,
259 259 # 1: one parent stored,
260 260 # 2: two parents stored,
261 261 # 3: no parent data stored
262 262 #
263 263 # - uint8: number of metadata entries M
264 264 #
265 265 # - 20 or 32 bytes: precursor changeset identifier.
266 266 #
267 267 # - N*(20 or 32) bytes: successors changesets identifiers.
268 268 #
269 269 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 270 #
271 271 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 272 #
273 273 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 274 _fm1version = 1
275 275 _fm1fixed = '>IdhHBBB20s'
276 276 _fm1nodesha1 = '20s'
277 277 _fm1nodesha256 = '32s'
278 278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 280 _fm1fsize = _calcsize(_fm1fixed)
281 281 _fm1parentnone = 3
282 282 _fm1parentshift = 14
283 283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 284 _fm1metapair = 'BB'
285 285 _fm1metapairsize = _calcsize('BB')
286 286
287 287 def _fm1purereadmarkers(data, off):
288 288 # make some global constants local for performance
289 289 noneflag = _fm1parentnone
290 290 sha2flag = usingsha256
291 291 sha1size = _fm1nodesha1size
292 292 sha2size = _fm1nodesha256size
293 293 sha1fmt = _fm1nodesha1
294 294 sha2fmt = _fm1nodesha256
295 295 metasize = _fm1metapairsize
296 296 metafmt = _fm1metapair
297 297 fsize = _fm1fsize
298 298 unpack = _unpack
299 299
300 300 # Loop on markers
301 301 stop = len(data) - _fm1fsize
302 302 ufixed = util.unpacker(_fm1fixed)
303 303
304 304 while off <= stop:
305 305 # read fixed part
306 306 o1 = off + fsize
307 307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308 308
309 309 if flags & sha2flag:
310 310 # FIXME: prec was read as a SHA1, needs to be amended
311 311
312 312 # read 0 or more successors
313 313 if numsuc == 1:
314 314 o2 = o1 + sha2size
315 315 sucs = (data[o1:o2],)
316 316 else:
317 317 o2 = o1 + sha2size * numsuc
318 318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319 319
320 320 # read parents
321 321 if numpar == noneflag:
322 322 o3 = o2
323 323 parents = None
324 324 elif numpar == 1:
325 325 o3 = o2 + sha2size
326 326 parents = (data[o2:o3],)
327 327 else:
328 328 o3 = o2 + sha2size * numpar
329 329 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 330 else:
331 331 # read 0 or more successors
332 332 if numsuc == 1:
333 333 o2 = o1 + sha1size
334 334 sucs = (data[o1:o2],)
335 335 else:
336 336 o2 = o1 + sha1size * numsuc
337 337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338 338
339 339 # read parents
340 340 if numpar == noneflag:
341 341 o3 = o2
342 342 parents = None
343 343 elif numpar == 1:
344 344 o3 = o2 + sha1size
345 345 parents = (data[o2:o3],)
346 346 else:
347 347 o3 = o2 + sha1size * numpar
348 348 parents = unpack(sha1fmt * numpar, data[o2:o3])
349 349
350 350 # read metadata
351 351 off = o3 + metasize * nummeta
352 352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 353 metadata = []
354 354 for idx in xrange(0, len(metapairsize), 2):
355 355 o1 = off + metapairsize[idx]
356 356 o2 = o1 + metapairsize[idx + 1]
357 357 metadata.append((data[off:o1], data[o1:o2]))
358 358 off = o2
359 359
360 360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361 361
362 362 def _fm1encodeonemarker(marker):
363 363 pre, sucs, flags, metadata, date, parents = marker
364 364 # determine node size
365 365 _fm1node = _fm1nodesha1
366 366 if flags & usingsha256:
367 367 _fm1node = _fm1nodesha256
368 368 numsuc = len(sucs)
369 369 numextranodes = numsuc
370 370 if parents is None:
371 371 numpar = _fm1parentnone
372 372 else:
373 373 numpar = len(parents)
374 374 numextranodes += numpar
375 375 formatnodes = _fm1node * numextranodes
376 376 formatmeta = _fm1metapair * len(metadata)
377 377 format = _fm1fixed + formatnodes + formatmeta
378 378 # tz is stored in minutes so we divide by 60
379 379 tz = date[1]//60
380 380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 381 data.extend(sucs)
382 382 if parents is not None:
383 383 data.extend(parents)
384 384 totalsize = _calcsize(format)
385 385 for key, value in metadata:
386 386 lk = len(key)
387 387 lv = len(value)
388 388 data.append(lk)
389 389 data.append(lv)
390 390 totalsize += lk + lv
391 391 data[0] = totalsize
392 392 data = [_pack(format, *data)]
393 393 for key, value in metadata:
394 394 data.append(key)
395 395 data.append(value)
396 396 return ''.join(data)
397 397
398 398 def _fm1readmarkers(data, off):
399 399 native = getattr(parsers, 'fm1readmarkers', None)
400 400 if not native:
401 401 return _fm1purereadmarkers(data, off)
402 402 stop = len(data) - _fm1fsize
403 403 return native(data, off, stop)
404 404
405 405 # mapping to read/write various marker formats
406 406 # <version> -> (decoder, encoder)
407 407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
408 408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
409 409
410 410 @util.nogc
411 411 def _readmarkers(data):
412 412 """Read and enumerate markers from raw data"""
413 413 off = 0
414 414 diskversion = _unpack('>B', data[off:off + 1])[0]
415 415 off += 1
416 416 if diskversion not in formats:
417 417 raise util.Abort(_('parsing obsolete marker: unknown version %r')
418 418 % diskversion)
419 419 return diskversion, formats[diskversion][0](data, off)
420 420
421 421 def encodemarkers(markers, addheader=False, version=_fm0version):
422 422 # Kept separate from flushmarkers(), it will be reused for
423 423 # markers exchange.
424 424 encodeone = formats[version][1]
425 425 if addheader:
426 426 yield _pack('>B', version)
427 427 for marker in markers:
428 428 yield encodeone(marker)
429 429
430 430
431 431 class marker(object):
432 432 """Wrap obsolete marker raw data"""
433 433
434 434 def __init__(self, repo, data):
435 435 # the repo argument will be used to create changectx in later version
436 436 self._repo = repo
437 437 self._data = data
438 438 self._decodedmeta = None
439 439
440 440 def __hash__(self):
441 441 return hash(self._data)
442 442
443 443 def __eq__(self, other):
444 444 if type(other) != type(self):
445 445 return False
446 446 return self._data == other._data
447 447
448 448 def precnode(self):
449 449 """Precursor changeset node identifier"""
450 450 return self._data[0]
451 451
452 452 def succnodes(self):
453 453 """List of successor changesets node identifiers"""
454 454 return self._data[1]
455 455
456 456 def parentnodes(self):
457 457 """Parents of the precursors (None if not recorded)"""
458 458 return self._data[5]
459 459
460 460 def metadata(self):
461 461 """Decoded metadata dictionary"""
462 462 return dict(self._data[3])
463 463
464 464 def date(self):
465 465 """Creation date as (unixtime, offset)"""
466 466 return self._data[4]
467 467
468 468 def flags(self):
469 469 """The flags field of the marker"""
470 470 return self._data[2]
471 471
472 472 @util.nogc
473 473 def _addsuccessors(successors, markers):
474 474 for mark in markers:
475 475 successors.setdefault(mark[0], set()).add(mark)
476 476
477 477 @util.nogc
478 478 def _addprecursors(precursors, markers):
479 479 for mark in markers:
480 480 for suc in mark[1]:
481 481 precursors.setdefault(suc, set()).add(mark)
482 482
483 483 @util.nogc
484 484 def _addchildren(children, markers):
485 485 for mark in markers:
486 486 parents = mark[5]
487 487 if parents is not None:
488 488 for p in parents:
489 489 children.setdefault(p, set()).add(mark)
490 490
491 491 def _checkinvalidmarkers(markers):
492 492 """search for marker with invalid data and raise error if needed
493 493
494 494 Exist as a separated function to allow the evolve extension for a more
495 495 subtle handling.
496 496 """
497 497 for mark in markers:
498 498 if node.nullid in mark[1]:
499 499 raise util.Abort(_('bad obsolescence marker detected: '
500 500 'invalid successors nullid'))
501 501
502 502 class obsstore(object):
503 503 """Store obsolete markers
504 504
505 505 Markers can be accessed with two mappings:
506 506 - precursors[x] -> set(markers on precursors edges of x)
507 507 - successors[x] -> set(markers on successors edges of x)
508 508 - children[x] -> set(markers on precursors edges of children(x)
509 509 """
510 510
511 511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
512 512 # prec: nodeid, precursor changesets
513 513 # succs: tuple of nodeid, successor changesets (0-N length)
514 514 # flag: integer, flag field carrying modifier for the markers (see doc)
515 515 # meta: binary blob, encoded metadata dictionary
516 516 # date: (float, int) tuple, date of marker creation
517 517 # parents: (tuple of nodeid) or None, parents of precursors
518 518 # None is used when no data has been recorded
519 519
520 520 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
521 521 # caches for various obsolescence related cache
522 522 self.caches = {}
523 523 self._all = []
524 524 self.sopener = sopener
525 525 data = sopener.tryread('obsstore')
526 526 self._version = defaultformat
527 527 self._readonly = readonly
528 528 if data:
529 529 self._version, markers = _readmarkers(data)
530 530 self._addmarkers(markers)
531 531
532 532 def __iter__(self):
533 533 return iter(self._all)
534 534
535 535 def __len__(self):
536 536 return len(self._all)
537 537
538 538 def __nonzero__(self):
539 539 return bool(self._all)
540 540
541 541 def create(self, transaction, prec, succs=(), flag=0, parents=None,
542 542 date=None, metadata=None):
543 543 """obsolete: add a new obsolete marker
544 544
545 545 * ensuring it is hashable
546 546 * check mandatory metadata
547 547 * encode metadata
548 548
549 549 If you are a human writing code creating marker you want to use the
550 550 `createmarkers` function in this module instead.
551 551
552 552 return True if a new marker have been added, False if the markers
553 553 already existed (no op).
554 554 """
555 555 if metadata is None:
556 556 metadata = {}
557 557 if date is None:
558 558 if 'date' in metadata:
559 559 # as a courtesy for out-of-tree extensions
560 560 date = util.parsedate(metadata.pop('date'))
561 561 else:
562 562 date = util.makedate()
563 563 if len(prec) != 20:
564 564 raise ValueError(prec)
565 565 for succ in succs:
566 566 if len(succ) != 20:
567 567 raise ValueError(succ)
568 568 if prec in succs:
569 569 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
570 570
571 571 metadata = tuple(sorted(metadata.iteritems()))
572 572
573 573 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
574 574 return bool(self.add(transaction, [marker]))
575 575
576 576 def add(self, transaction, markers):
577 577 """Add new markers to the store
578 578
579 579 Take care of filtering duplicate.
580 580 Return the number of new marker."""
581 581 if self._readonly:
582 582 raise util.Abort('creating obsolete markers is not enabled on this '
583 583 'repo')
584 584 known = set(self._all)
585 585 new = []
586 586 for m in markers:
587 587 if m not in known:
588 588 known.add(m)
589 589 new.append(m)
590 590 if new:
591 591 f = self.sopener('obsstore', 'ab')
592 592 try:
593 593 offset = f.tell()
594 594 transaction.add('obsstore', offset)
595 595 # offset == 0: new file - add the version header
596 596 for bytes in encodemarkers(new, offset == 0, self._version):
597 597 f.write(bytes)
598 598 finally:
599 599 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
600 600 # call 'filecacheentry.refresh()' here
601 601 f.close()
602 602 self._addmarkers(new)
603 603 # new marker *may* have changed several set. invalidate the cache.
604 604 self.caches.clear()
605 605 # records the number of new markers for the transaction hooks
606 606 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
607 607 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
608 608 return len(new)
609 609
610 610 def mergemarkers(self, transaction, data):
611 611 """merge a binary stream of markers inside the obsstore
612 612
613 613 Returns the number of new markers added."""
614 614 version, markers = _readmarkers(data)
615 615 return self.add(transaction, markers)
616 616
617 617 @propertycache
618 618 def successors(self):
619 619 successors = {}
620 620 _addsuccessors(successors, self._all)
621 621 return successors
622 622
623 623 @propertycache
624 624 def precursors(self):
625 625 precursors = {}
626 626 _addprecursors(precursors, self._all)
627 627 return precursors
628 628
629 629 @propertycache
630 630 def children(self):
631 631 children = {}
632 632 _addchildren(children, self._all)
633 633 return children
634 634
635 635 def _cached(self, attr):
636 636 return attr in self.__dict__
637 637
638 638 def _addmarkers(self, markers):
639 639 markers = list(markers) # to allow repeated iteration
640 640 self._all.extend(markers)
641 641 if self._cached('successors'):
642 642 _addsuccessors(self.successors, markers)
643 643 if self._cached('precursors'):
644 644 _addprecursors(self.precursors, markers)
645 645 if self._cached('children'):
646 646 _addchildren(self.children, markers)
647 647 _checkinvalidmarkers(markers)
648 648
649 649 def relevantmarkers(self, nodes):
650 650 """return a set of all obsolescence markers relevant to a set of nodes.
651 651
652 652 "relevant" to a set of nodes mean:
653 653
654 654 - marker that use this changeset as successor
655 655 - prune marker of direct children on this changeset
656 656 - recursive application of the two rules on precursors of these markers
657 657
658 658 It is a set so you cannot rely on order."""
659 659
660 660 pendingnodes = set(nodes)
661 661 seenmarkers = set()
662 662 seennodes = set(pendingnodes)
663 663 precursorsmarkers = self.precursors
664 664 children = self.children
665 665 while pendingnodes:
666 666 direct = set()
667 667 for current in pendingnodes:
668 668 direct.update(precursorsmarkers.get(current, ()))
669 669 pruned = [m for m in children.get(current, ()) if not m[1]]
670 670 direct.update(pruned)
671 671 direct -= seenmarkers
672 672 pendingnodes = set([m[0] for m in direct])
673 673 seenmarkers |= direct
674 674 pendingnodes -= seennodes
675 675 seennodes |= pendingnodes
676 676 return seenmarkers
677 677
678 678 def commonversion(versions):
679 679 """Return the newest version listed in both versions and our local formats.
680 680
681 681 Returns None if no common version exists.
682 682 """
683 683 versions.sort(reverse=True)
684 684 # search for highest version known on both side
685 685 for v in versions:
686 686 if v in formats:
687 687 return v
688 688 return None
689 689
690 690 # arbitrary picked to fit into 8K limit from HTTP server
691 691 # you have to take in account:
692 692 # - the version header
693 693 # - the base85 encoding
694 694 _maxpayload = 5300
695 695
696 696 def _pushkeyescape(markers):
697 697 """encode markers into a dict suitable for pushkey exchange
698 698
699 699 - binary data is base85 encoded
700 700 - split in chunks smaller than 5300 bytes"""
701 701 keys = {}
702 702 parts = []
703 703 currentlen = _maxpayload * 2 # ensure we create a new part
704 704 for marker in markers:
705 705 nextdata = _fm0encodeonemarker(marker)
706 706 if (len(nextdata) + currentlen > _maxpayload):
707 707 currentpart = []
708 708 currentlen = 0
709 709 parts.append(currentpart)
710 710 currentpart.append(nextdata)
711 711 currentlen += len(nextdata)
712 712 for idx, part in enumerate(reversed(parts)):
713 713 data = ''.join([_pack('>B', _fm0version)] + part)
714 714 keys['dump%i' % idx] = base85.b85encode(data)
715 715 return keys
716 716
717 717 def listmarkers(repo):
718 718 """List markers over pushkey"""
719 719 if not repo.obsstore:
720 720 return {}
721 return _pushkeyescape(repo.obsstore)
721 return _pushkeyescape(sorted(repo.obsstore))
722 722
723 723 def pushmarker(repo, key, old, new):
724 724 """Push markers over pushkey"""
725 725 if not key.startswith('dump'):
726 726 repo.ui.warn(_('unknown key: %r') % key)
727 727 return 0
728 728 if old:
729 729 repo.ui.warn(_('unexpected old value for %r') % key)
730 730 return 0
731 731 data = base85.b85decode(new)
732 732 lock = repo.lock()
733 733 try:
734 734 tr = repo.transaction('pushkey: obsolete markers')
735 735 try:
736 736 repo.obsstore.mergemarkers(tr, data)
737 737 tr.close()
738 738 return 1
739 739 finally:
740 740 tr.release()
741 741 finally:
742 742 lock.release()
743 743
744 744 def getmarkers(repo, nodes=None):
745 745 """returns markers known in a repository
746 746
747 747 If <nodes> is specified, only markers "relevant" to those nodes are are
748 748 returned"""
749 749 if nodes is None:
750 750 rawmarkers = repo.obsstore
751 751 else:
752 752 rawmarkers = repo.obsstore.relevantmarkers(nodes)
753 753
754 754 for markerdata in rawmarkers:
755 755 yield marker(repo, markerdata)
756 756
757 757 def relevantmarkers(repo, node):
758 758 """all obsolete markers relevant to some revision"""
759 759 for markerdata in repo.obsstore.relevantmarkers(node):
760 760 yield marker(repo, markerdata)
761 761
762 762
763 763 def precursormarkers(ctx):
764 764 """obsolete marker marking this changeset as a successors"""
765 765 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
766 766 yield marker(ctx.repo(), data)
767 767
768 768 def successormarkers(ctx):
769 769 """obsolete marker making this changeset obsolete"""
770 770 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
771 771 yield marker(ctx.repo(), data)
772 772
773 773 def allsuccessors(obsstore, nodes, ignoreflags=0):
774 774 """Yield node for every successor of <nodes>.
775 775
776 776 Some successors may be unknown locally.
777 777
778 778 This is a linear yield unsuited to detecting split changesets. It includes
779 779 initial nodes too."""
780 780 remaining = set(nodes)
781 781 seen = set(remaining)
782 782 while remaining:
783 783 current = remaining.pop()
784 784 yield current
785 785 for mark in obsstore.successors.get(current, ()):
786 786 # ignore marker flagged with specified flag
787 787 if mark[2] & ignoreflags:
788 788 continue
789 789 for suc in mark[1]:
790 790 if suc not in seen:
791 791 seen.add(suc)
792 792 remaining.add(suc)
793 793
794 794 def allprecursors(obsstore, nodes, ignoreflags=0):
795 795 """Yield node for every precursors of <nodes>.
796 796
797 797 Some precursors may be unknown locally.
798 798
799 799 This is a linear yield unsuited to detecting folded changesets. It includes
800 800 initial nodes too."""
801 801
802 802 remaining = set(nodes)
803 803 seen = set(remaining)
804 804 while remaining:
805 805 current = remaining.pop()
806 806 yield current
807 807 for mark in obsstore.precursors.get(current, ()):
808 808 # ignore marker flagged with specified flag
809 809 if mark[2] & ignoreflags:
810 810 continue
811 811 suc = mark[0]
812 812 if suc not in seen:
813 813 seen.add(suc)
814 814 remaining.add(suc)
815 815
816 816 def foreground(repo, nodes):
817 817 """return all nodes in the "foreground" of other node
818 818
819 819 The foreground of a revision is anything reachable using parent -> children
820 820 or precursor -> successor relation. It is very similar to "descendant" but
821 821 augmented with obsolescence information.
822 822
823 823 Beware that possible obsolescence cycle may result if complex situation.
824 824 """
825 825 repo = repo.unfiltered()
826 826 foreground = set(repo.set('%ln::', nodes))
827 827 if repo.obsstore:
828 828 # We only need this complicated logic if there is obsolescence
829 829 # XXX will probably deserve an optimised revset.
830 830 nm = repo.changelog.nodemap
831 831 plen = -1
832 832 # compute the whole set of successors or descendants
833 833 while len(foreground) != plen:
834 834 plen = len(foreground)
835 835 succs = set(c.node() for c in foreground)
836 836 mutable = [c.node() for c in foreground if c.mutable()]
837 837 succs.update(allsuccessors(repo.obsstore, mutable))
838 838 known = (n for n in succs if n in nm)
839 839 foreground = set(repo.set('%ln::', known))
840 840 return set(c.node() for c in foreground)
841 841
842 842
843 843 def successorssets(repo, initialnode, cache=None):
844 844 """Return all set of successors of initial nodes
845 845
846 846 The successors set of a changeset A are a group of revisions that succeed
847 847 A. It succeeds A as a consistent whole, each revision being only a partial
848 848 replacement. The successors set contains non-obsolete changesets only.
849 849
850 850 This function returns the full list of successor sets which is why it
851 851 returns a list of tuples and not just a single tuple. Each tuple is a valid
852 852 successors set. Not that (A,) may be a valid successors set for changeset A
853 853 (see below).
854 854
855 855 In most cases, a changeset A will have a single element (e.g. the changeset
856 856 A is replaced by A') in its successors set. Though, it is also common for a
857 857 changeset A to have no elements in its successor set (e.g. the changeset
858 858 has been pruned). Therefore, the returned list of successors sets will be
859 859 [(A',)] or [], respectively.
860 860
861 861 When a changeset A is split into A' and B', however, it will result in a
862 862 successors set containing more than a single element, i.e. [(A',B')].
863 863 Divergent changesets will result in multiple successors sets, i.e. [(A',),
864 864 (A'')].
865 865
866 866 If a changeset A is not obsolete, then it will conceptually have no
867 867 successors set. To distinguish this from a pruned changeset, the successor
868 868 set will only contain itself, i.e. [(A,)].
869 869
870 870 Finally, successors unknown locally are considered to be pruned (obsoleted
871 871 without any successors).
872 872
873 873 The optional `cache` parameter is a dictionary that may contain precomputed
874 874 successors sets. It is meant to reuse the computation of a previous call to
875 875 `successorssets` when multiple calls are made at the same time. The cache
876 876 dictionary is updated in place. The caller is responsible for its live
877 877 spawn. Code that makes multiple calls to `successorssets` *must* use this
878 878 cache mechanism or suffer terrible performances.
879 879
880 880 """
881 881
882 882 succmarkers = repo.obsstore.successors
883 883
884 884 # Stack of nodes we search successors sets for
885 885 toproceed = [initialnode]
886 886 # set version of above list for fast loop detection
887 887 # element added to "toproceed" must be added here
888 888 stackedset = set(toproceed)
889 889 if cache is None:
890 890 cache = {}
891 891
892 892 # This while loop is the flattened version of a recursive search for
893 893 # successors sets
894 894 #
895 895 # def successorssets(x):
896 896 # successors = directsuccessors(x)
897 897 # ss = [[]]
898 898 # for succ in directsuccessors(x):
899 899 # # product as in itertools cartesian product
900 900 # ss = product(ss, successorssets(succ))
901 901 # return ss
902 902 #
903 903 # But we can not use plain recursive calls here:
904 904 # - that would blow the python call stack
905 905 # - obsolescence markers may have cycles, we need to handle them.
906 906 #
907 907 # The `toproceed` list act as our call stack. Every node we search
908 908 # successors set for are stacked there.
909 909 #
910 910 # The `stackedset` is set version of this stack used to check if a node is
911 911 # already stacked. This check is used to detect cycles and prevent infinite
912 912 # loop.
913 913 #
914 914 # successors set of all nodes are stored in the `cache` dictionary.
915 915 #
916 916 # After this while loop ends we use the cache to return the successors sets
917 917 # for the node requested by the caller.
918 918 while toproceed:
919 919 # Every iteration tries to compute the successors sets of the topmost
920 920 # node of the stack: CURRENT.
921 921 #
922 922 # There are four possible outcomes:
923 923 #
924 924 # 1) We already know the successors sets of CURRENT:
925 925 # -> mission accomplished, pop it from the stack.
926 926 # 2) Node is not obsolete:
927 927 # -> the node is its own successors sets. Add it to the cache.
928 928 # 3) We do not know successors set of direct successors of CURRENT:
929 929 # -> We add those successors to the stack.
930 930 # 4) We know successors sets of all direct successors of CURRENT:
931 931 # -> We can compute CURRENT successors set and add it to the
932 932 # cache.
933 933 #
934 934 current = toproceed[-1]
935 935 if current in cache:
936 936 # case (1): We already know the successors sets
937 937 stackedset.remove(toproceed.pop())
938 938 elif current not in succmarkers:
939 939 # case (2): The node is not obsolete.
940 940 if current in repo:
941 941 # We have a valid last successors.
942 942 cache[current] = [(current,)]
943 943 else:
944 944 # Final obsolete version is unknown locally.
945 945 # Do not count that as a valid successors
946 946 cache[current] = []
947 947 else:
948 948 # cases (3) and (4)
949 949 #
950 950 # We proceed in two phases. Phase 1 aims to distinguish case (3)
951 951 # from case (4):
952 952 #
953 953 # For each direct successors of CURRENT, we check whether its
954 954 # successors sets are known. If they are not, we stack the
955 955 # unknown node and proceed to the next iteration of the while
956 956 # loop. (case 3)
957 957 #
958 958 # During this step, we may detect obsolescence cycles: a node
959 959 # with unknown successors sets but already in the call stack.
960 960 # In such a situation, we arbitrary set the successors sets of
961 961 # the node to nothing (node pruned) to break the cycle.
962 962 #
963 963 # If no break was encountered we proceed to phase 2.
964 964 #
965 965 # Phase 2 computes successors sets of CURRENT (case 4); see details
966 966 # in phase 2 itself.
967 967 #
968 968 # Note the two levels of iteration in each phase.
969 969 # - The first one handles obsolescence markers using CURRENT as
970 970 # precursor (successors markers of CURRENT).
971 971 #
972 972 # Having multiple entry here means divergence.
973 973 #
974 974 # - The second one handles successors defined in each marker.
975 975 #
976 976 # Having none means pruned node, multiple successors means split,
977 977 # single successors are standard replacement.
978 978 #
979 979 for mark in sorted(succmarkers[current]):
980 980 for suc in mark[1]:
981 981 if suc not in cache:
982 982 if suc in stackedset:
983 983 # cycle breaking
984 984 cache[suc] = []
985 985 else:
986 986 # case (3) If we have not computed successors sets
987 987 # of one of those successors we add it to the
988 988 # `toproceed` stack and stop all work for this
989 989 # iteration.
990 990 toproceed.append(suc)
991 991 stackedset.add(suc)
992 992 break
993 993 else:
994 994 continue
995 995 break
996 996 else:
997 997 # case (4): we know all successors sets of all direct
998 998 # successors
999 999 #
1000 1000 # Successors set contributed by each marker depends on the
1001 1001 # successors sets of all its "successors" node.
1002 1002 #
1003 1003 # Each different marker is a divergence in the obsolescence
1004 1004 # history. It contributes successors sets distinct from other
1005 1005 # markers.
1006 1006 #
1007 1007 # Within a marker, a successor may have divergent successors
1008 1008 # sets. In such a case, the marker will contribute multiple
1009 1009 # divergent successors sets. If multiple successors have
1010 1010 # divergent successors sets, a Cartesian product is used.
1011 1011 #
1012 1012 # At the end we post-process successors sets to remove
1013 1013 # duplicated entry and successors set that are strict subset of
1014 1014 # another one.
1015 1015 succssets = []
1016 1016 for mark in sorted(succmarkers[current]):
1017 1017 # successors sets contributed by this marker
1018 1018 markss = [[]]
1019 1019 for suc in mark[1]:
1020 1020 # cardinal product with previous successors
1021 1021 productresult = []
1022 1022 for prefix in markss:
1023 1023 for suffix in cache[suc]:
1024 1024 newss = list(prefix)
1025 1025 for part in suffix:
1026 1026 # do not duplicated entry in successors set
1027 1027 # first entry wins.
1028 1028 if part not in newss:
1029 1029 newss.append(part)
1030 1030 productresult.append(newss)
1031 1031 markss = productresult
1032 1032 succssets.extend(markss)
1033 1033 # remove duplicated and subset
1034 1034 seen = []
1035 1035 final = []
1036 1036 candidate = sorted(((set(s), s) for s in succssets if s),
1037 1037 key=lambda x: len(x[1]), reverse=True)
1038 1038 for setversion, listversion in candidate:
1039 1039 for seenset in seen:
1040 1040 if setversion.issubset(seenset):
1041 1041 break
1042 1042 else:
1043 1043 final.append(listversion)
1044 1044 seen.append(setversion)
1045 1045 final.reverse() # put small successors set first
1046 1046 cache[current] = final
1047 1047 return cache[initialnode]
1048 1048
1049 1049 def _knownrevs(repo, nodes):
1050 1050 """yield revision numbers of known nodes passed in parameters
1051 1051
1052 1052 Unknown revisions are silently ignored."""
1053 1053 torev = repo.changelog.nodemap.get
1054 1054 for n in nodes:
1055 1055 rev = torev(n)
1056 1056 if rev is not None:
1057 1057 yield rev
1058 1058
1059 1059 # mapping of 'set-name' -> <function to compute this set>
1060 1060 cachefuncs = {}
1061 1061 def cachefor(name):
1062 1062 """Decorator to register a function as computing the cache for a set"""
1063 1063 def decorator(func):
1064 1064 assert name not in cachefuncs
1065 1065 cachefuncs[name] = func
1066 1066 return func
1067 1067 return decorator
1068 1068
1069 1069 def getrevs(repo, name):
1070 1070 """Return the set of revision that belong to the <name> set
1071 1071
1072 1072 Such access may compute the set and cache it for future use"""
1073 1073 repo = repo.unfiltered()
1074 1074 if not repo.obsstore:
1075 1075 return frozenset()
1076 1076 if name not in repo.obsstore.caches:
1077 1077 repo.obsstore.caches[name] = cachefuncs[name](repo)
1078 1078 return repo.obsstore.caches[name]
1079 1079
1080 1080 # To be simple we need to invalidate obsolescence cache when:
1081 1081 #
1082 1082 # - new changeset is added:
1083 1083 # - public phase is changed
1084 1084 # - obsolescence marker are added
1085 1085 # - strip is used a repo
1086 1086 def clearobscaches(repo):
1087 1087 """Remove all obsolescence related cache from a repo
1088 1088
1089 1089 This remove all cache in obsstore is the obsstore already exist on the
1090 1090 repo.
1091 1091
1092 1092 (We could be smarter here given the exact event that trigger the cache
1093 1093 clearing)"""
1094 1094 # only clear cache is there is obsstore data in this repo
1095 1095 if 'obsstore' in repo._filecache:
1096 1096 repo.obsstore.caches.clear()
1097 1097
1098 1098 @cachefor('obsolete')
1099 1099 def _computeobsoleteset(repo):
1100 1100 """the set of obsolete revisions"""
1101 1101 obs = set()
1102 1102 getrev = repo.changelog.nodemap.get
1103 1103 getphase = repo._phasecache.phase
1104 1104 for n in repo.obsstore.successors:
1105 1105 rev = getrev(n)
1106 1106 if rev is not None and getphase(repo, rev):
1107 1107 obs.add(rev)
1108 1108 return obs
1109 1109
1110 1110 @cachefor('unstable')
1111 1111 def _computeunstableset(repo):
1112 1112 """the set of non obsolete revisions with obsolete parents"""
1113 1113 revs = [(ctx.rev(), ctx) for ctx in
1114 1114 repo.set('(not public()) and (not obsolete())')]
1115 1115 revs.sort(key=lambda x:x[0])
1116 1116 unstable = set()
1117 1117 for rev, ctx in revs:
1118 1118 # A rev is unstable if one of its parent is obsolete or unstable
1119 1119 # this works since we traverse following growing rev order
1120 1120 if util.any((x.obsolete() or (x.rev() in unstable))
1121 1121 for x in ctx.parents()):
1122 1122 unstable.add(rev)
1123 1123 return unstable
1124 1124
1125 1125 @cachefor('suspended')
1126 1126 def _computesuspendedset(repo):
1127 1127 """the set of obsolete parents with non obsolete descendants"""
1128 1128 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1129 1129 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1130 1130
1131 1131 @cachefor('extinct')
1132 1132 def _computeextinctset(repo):
1133 1133 """the set of obsolete parents without non obsolete descendants"""
1134 1134 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1135 1135
1136 1136
1137 1137 @cachefor('bumped')
1138 1138 def _computebumpedset(repo):
1139 1139 """the set of revs trying to obsolete public revisions"""
1140 1140 bumped = set()
1141 1141 # util function (avoid attribute lookup in the loop)
1142 1142 phase = repo._phasecache.phase # would be faster to grab the full list
1143 1143 public = phases.public
1144 1144 cl = repo.changelog
1145 1145 torev = cl.nodemap.get
1146 1146 for ctx in repo.set('(not public()) and (not obsolete())'):
1147 1147 rev = ctx.rev()
1148 1148 # We only evaluate mutable, non-obsolete revision
1149 1149 node = ctx.node()
1150 1150 # (future) A cache of precursors may worth if split is very common
1151 1151 for pnode in allprecursors(repo.obsstore, [node],
1152 1152 ignoreflags=bumpedfix):
1153 1153 prev = torev(pnode) # unfiltered! but so is phasecache
1154 1154 if (prev is not None) and (phase(repo, prev) <= public):
1155 1155 # we have a public precursors
1156 1156 bumped.add(rev)
1157 1157 break # Next draft!
1158 1158 return bumped
1159 1159
1160 1160 @cachefor('divergent')
1161 1161 def _computedivergentset(repo):
1162 1162 """the set of rev that compete to be the final successors of some revision.
1163 1163 """
1164 1164 divergent = set()
1165 1165 obsstore = repo.obsstore
1166 1166 newermap = {}
1167 1167 for ctx in repo.set('(not public()) - obsolete()'):
1168 1168 mark = obsstore.precursors.get(ctx.node(), ())
1169 1169 toprocess = set(mark)
1170 1170 seen = set()
1171 1171 while toprocess:
1172 1172 prec = toprocess.pop()[0]
1173 1173 if prec in seen:
1174 1174 continue # emergency cycle hanging prevention
1175 1175 seen.add(prec)
1176 1176 if prec not in newermap:
1177 1177 successorssets(repo, prec, newermap)
1178 1178 newer = [n for n in newermap[prec] if n]
1179 1179 if len(newer) > 1:
1180 1180 divergent.add(ctx.rev())
1181 1181 break
1182 1182 toprocess.update(obsstore.precursors.get(prec, ()))
1183 1183 return divergent
1184 1184
1185 1185
1186 1186 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1187 1187 """Add obsolete markers between changesets in a repo
1188 1188
1189 1189 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1190 1190 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1191 1191 containing metadata for this marker only. It is merged with the global
1192 1192 metadata specified through the `metadata` argument of this function,
1193 1193
1194 1194 Trying to obsolete a public changeset will raise an exception.
1195 1195
1196 1196 Current user and date are used except if specified otherwise in the
1197 1197 metadata attribute.
1198 1198
1199 1199 This function operates within a transaction of its own, but does
1200 1200 not take any lock on the repo.
1201 1201 """
1202 1202 # prepare metadata
1203 1203 if metadata is None:
1204 1204 metadata = {}
1205 1205 if 'user' not in metadata:
1206 1206 metadata['user'] = repo.ui.username()
1207 1207 tr = repo.transaction('add-obsolescence-marker')
1208 1208 try:
1209 1209 for rel in relations:
1210 1210 prec = rel[0]
1211 1211 sucs = rel[1]
1212 1212 localmetadata = metadata.copy()
1213 1213 if 2 < len(rel):
1214 1214 localmetadata.update(rel[2])
1215 1215
1216 1216 if not prec.mutable():
1217 1217 raise util.Abort("cannot obsolete immutable changeset: %s"
1218 1218 % prec)
1219 1219 nprec = prec.node()
1220 1220 nsucs = tuple(s.node() for s in sucs)
1221 1221 npare = None
1222 1222 if not nsucs:
1223 1223 npare = tuple(p.node() for p in prec.parents())
1224 1224 if nprec in nsucs:
1225 1225 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1226 1226 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1227 1227 date=date, metadata=localmetadata)
1228 1228 repo.filteredrevcache.clear()
1229 1229 tr.close()
1230 1230 finally:
1231 1231 tr.release()
1232 1232
1233 1233 def isenabled(repo, option):
1234 1234 """Returns True if the given repository has the given obsolete option
1235 1235 enabled.
1236 1236 """
1237 1237 result = set(repo.ui.configlist('experimental', 'evolution'))
1238 1238 if 'all' in result:
1239 1239 return True
1240 1240
1241 1241 # For migration purposes, temporarily return true if the config hasn't been
1242 1242 # set but _enabled is true.
1243 1243 if len(result) == 0 and _enabled:
1244 1244 return True
1245 1245
1246 1246 # createmarkers must be enabled if other options are enabled
1247 1247 if ((allowunstableopt in result or exchangeopt in result) and
1248 1248 not createmarkersopt in result):
1249 1249 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1250 1250 "if other obsolete options are enabled"))
1251 1251
1252 1252 return option in result
@@ -1,888 +1,888 b''
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [phases]
3 3 > # public changeset are not obsolete
4 4 > publish=false
5 5 > [ui]
6 6 > logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
7 7 > EOF
8 8 $ mkcommit() {
9 9 > echo "$1" > "$1"
10 10 > hg add "$1"
11 11 > hg ci -m "add $1"
12 12 > }
13 13 $ getid() {
14 14 > hg log -T "{node}\n" --hidden -r "desc('$1')"
15 15 > }
16 16
17 17 $ cat > debugkeys.py <<EOF
18 18 > def reposetup(ui, repo):
19 19 > class debugkeysrepo(repo.__class__):
20 20 > def listkeys(self, namespace):
21 21 > ui.write('listkeys %s\n' % (namespace,))
22 22 > return super(debugkeysrepo, self).listkeys(namespace)
23 23 >
24 24 > if repo.local():
25 25 > repo.__class__ = debugkeysrepo
26 26 > EOF
27 27
28 28 $ hg init tmpa
29 29 $ cd tmpa
30 30 $ mkcommit kill_me
31 31
32 32 Checking that the feature is properly disabled
33 33
34 34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
35 35 abort: creating obsolete markers is not enabled on this repo
36 36 [255]
37 37
38 38 Enabling it
39 39
40 40 $ cat >> $HGRCPATH << EOF
41 41 > [experimental]
42 42 > evolution=createmarkers,exchange
43 43 > EOF
44 44
45 45 Killing a single changeset without replacement
46 46
47 47 $ hg debugobsolete 0
48 48 abort: changeset references must be full hexadecimal node identifiers
49 49 [255]
50 50 $ hg debugobsolete '00'
51 51 abort: changeset references must be full hexadecimal node identifiers
52 52 [255]
53 53 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
54 54 $ hg debugobsolete
55 55 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
56 56
57 57 (test that mercurial is not confused)
58 58
59 59 $ hg up null --quiet # having 0 as parent prevents it to be hidden
60 60 $ hg tip
61 61 -1:000000000000 (public) [tip ]
62 62 $ hg up --hidden tip --quiet
63 63
64 64 Killing a single changeset with itself should fail
65 65 (simple local safeguard)
66 66
67 67 $ hg debugobsolete `getid kill_me` `getid kill_me`
68 68 abort: bad obsmarker input: in-marker cycle with 97b7c2d76b1845ed3eb988cd612611e72406cef0
69 69 [255]
70 70
71 71 $ cd ..
72 72
73 73 Killing a single changeset with replacement
74 74 (and testing the format option)
75 75
76 76 $ hg init tmpb
77 77 $ cd tmpb
78 78 $ mkcommit a
79 79 $ mkcommit b
80 80 $ mkcommit original_c
81 81 $ hg up "desc('b')"
82 82 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
83 83 $ mkcommit new_c
84 84 created new head
85 85 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
86 86 $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '121 120'
87 87 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
88 88 2:245bde4270cd add original_c
89 89 $ hg debugrevlog -cd
90 90 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
91 91 0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
92 92 1 0 -1 59 118 59 59 0 0 58 116 0 1 0
93 93 2 1 -1 118 193 118 118 59 0 76 192 0 1 0
94 94 3 1 -1 193 260 193 193 59 0 66 258 0 2 0
95 95 $ hg debugobsolete
96 96 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
97 97
98 98 (check for version number of the obsstore)
99 99
100 100 $ dd bs=1 count=1 if=.hg/store/obsstore 2>/dev/null
101 101 \x00 (no-eol) (esc)
102 102
103 103 do it again (it read the obsstore before adding new changeset)
104 104
105 105 $ hg up '.^'
106 106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
107 107 $ mkcommit new_2_c
108 108 created new head
109 109 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
110 110 $ hg debugobsolete
111 111 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
112 112 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
113 113
114 114 Register two markers with a missing node
115 115
116 116 $ hg up '.^'
117 117 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
118 118 $ mkcommit new_3_c
119 119 created new head
120 120 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
121 121 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
122 122 $ hg debugobsolete
123 123 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
124 124 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
125 125 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
126 126 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
127 127
128 128 Refuse pathological nullid successors
129 129 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
130 130 transaction abort!
131 131 rollback completed
132 132 abort: bad obsolescence marker detected: invalid successors nullid
133 133 [255]
134 134
135 135 Check that graphlog detect that a changeset is obsolete:
136 136
137 137 $ hg log -G
138 138 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
139 139 |
140 140 o 1:7c3bad9141dc (draft) [ ] add b
141 141 |
142 142 o 0:1f0dee641bb7 (draft) [ ] add a
143 143
144 144
145 145 check that heads does not report them
146 146
147 147 $ hg heads
148 148 5:5601fb93a350 (draft) [tip ] add new_3_c
149 149 $ hg heads --hidden
150 150 5:5601fb93a350 (draft) [tip ] add new_3_c
151 151 4:ca819180edb9 (draft) [ ] add new_2_c
152 152 3:cdbce2fbb163 (draft) [ ] add new_c
153 153 2:245bde4270cd (draft) [ ] add original_c
154 154
155 155
156 156 check that summary does not report them
157 157
158 158 $ hg init ../sink
159 159 $ echo '[paths]' >> .hg/hgrc
160 160 $ echo 'default=../sink' >> .hg/hgrc
161 161 $ hg summary --remote
162 162 parent: 5:5601fb93a350 tip
163 163 add new_3_c
164 164 branch: default
165 165 commit: (clean)
166 166 update: (current)
167 167 phases: 3 draft (draft)
168 168 remote: 3 outgoing
169 169
170 170 $ hg summary --remote --hidden
171 171 parent: 5:5601fb93a350 tip
172 172 add new_3_c
173 173 branch: default
174 174 commit: (clean)
175 175 update: 3 new changesets, 4 branch heads (merge)
176 176 phases: 6 draft (draft)
177 177 remote: 3 outgoing
178 178
179 179 check that various commands work well with filtering
180 180
181 181 $ hg tip
182 182 5:5601fb93a350 (draft) [tip ] add new_3_c
183 183 $ hg log -r 6
184 184 abort: unknown revision '6'!
185 185 [255]
186 186 $ hg log -r 4
187 187 abort: hidden revision '4'!
188 188 (use --hidden to access hidden revisions)
189 189 [255]
190 190 $ hg debugrevspec 'rev(6)'
191 191 $ hg debugrevspec 'rev(4)'
192 192 $ hg debugrevspec 'null'
193 193 -1
194 194
195 195 Check that public changeset are not accounted as obsolete:
196 196
197 197 $ hg --hidden phase --public 2
198 198 $ hg log -G
199 199 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
200 200 |
201 201 | o 2:245bde4270cd (public) [ ] add original_c
202 202 |/
203 203 o 1:7c3bad9141dc (public) [ ] add b
204 204 |
205 205 o 0:1f0dee641bb7 (public) [ ] add a
206 206
207 207
208 208 And that bumped changeset are detected
209 209 --------------------------------------
210 210
211 211 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
212 212 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
213 213 the public changeset
214 214
215 215 $ hg log --hidden -r 'bumped()'
216 216 5:5601fb93a350 (draft) [tip ] add new_3_c
217 217
218 218 And that we can't push bumped changeset
219 219
220 220 $ hg push ../tmpa -r 0 --force #(make repo related)
221 221 pushing to ../tmpa
222 222 searching for changes
223 223 warning: repository is unrelated
224 224 adding changesets
225 225 adding manifests
226 226 adding file changes
227 227 added 1 changesets with 1 changes to 1 files (+1 heads)
228 228 $ hg push ../tmpa
229 229 pushing to ../tmpa
230 230 searching for changes
231 231 abort: push includes bumped changeset: 5601fb93a350!
232 232 [255]
233 233
234 234 Fixing "bumped" situation
235 235 We need to create a clone of 5 and add a special marker with a flag
236 236
237 237 $ hg up '5^'
238 238 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
239 239 $ hg revert -ar 5
240 240 adding new_3_c
241 241 $ hg ci -m 'add n3w_3_c'
242 242 created new head
243 243 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
244 244 $ hg log -r 'bumped()'
245 245 $ hg log -G
246 246 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
247 247 |
248 248 | o 2:245bde4270cd (public) [ ] add original_c
249 249 |/
250 250 o 1:7c3bad9141dc (public) [ ] add b
251 251 |
252 252 o 0:1f0dee641bb7 (public) [ ] add a
253 253
254 254
255 255 $ cd ..
256 256
257 257 Revision 0 is hidden
258 258 --------------------
259 259
260 260 $ hg init rev0hidden
261 261 $ cd rev0hidden
262 262
263 263 $ mkcommit kill0
264 264 $ hg up -q null
265 265 $ hg debugobsolete `getid kill0`
266 266 $ mkcommit a
267 267 $ mkcommit b
268 268
269 269 Should pick the first visible revision as "repo" node
270 270
271 271 $ hg archive ../archive-null
272 272 $ cat ../archive-null/.hg_archival.txt
273 273 repo: 1f0dee641bb7258c56bd60e93edfa2405381c41e
274 274 node: 7c3bad9141dcb46ff89abf5f61856facd56e476c
275 275 branch: default
276 276 latesttag: null
277 277 latesttagdistance: 2
278 278 changessincelatesttag: 2
279 279
280 280
281 281 $ cd ..
282 282
283 283 Exchange Test
284 284 ============================
285 285
286 286 Destination repo does not have any data
287 287 ---------------------------------------
288 288
289 289 Simple incoming test
290 290
291 291 $ hg init tmpc
292 292 $ cd tmpc
293 293 $ hg incoming ../tmpb
294 294 comparing with ../tmpb
295 295 0:1f0dee641bb7 (public) [ ] add a
296 296 1:7c3bad9141dc (public) [ ] add b
297 297 2:245bde4270cd (public) [ ] add original_c
298 298 6:6f9641995072 (draft) [tip ] add n3w_3_c
299 299
300 300 Try to pull markers
301 301 (extinct changeset are excluded but marker are pushed)
302 302
303 303 $ hg pull ../tmpb
304 304 pulling from ../tmpb
305 305 requesting all changes
306 306 adding changesets
307 307 adding manifests
308 308 adding file changes
309 309 added 4 changesets with 4 changes to 4 files (+1 heads)
310 310 (run 'hg heads' to see heads, 'hg merge' to merge)
311 311 $ hg debugobsolete
312 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
312 313 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
313 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
314 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
314 315 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
315 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
316 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
316 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
317 317
318 318 Rollback//Transaction support
319 319
320 320 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
321 321 $ hg debugobsolete
322 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
322 323 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
323 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
324 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
324 325 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
325 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
326 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
326 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
327 327 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:22:20 1970 +0000) {'user': 'test'}
328 328 $ hg rollback -n
329 329 repository tip rolled back to revision 3 (undo debugobsolete)
330 330 $ hg rollback
331 331 repository tip rolled back to revision 3 (undo debugobsolete)
332 332 $ hg debugobsolete
333 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
333 334 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
334 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
335 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
335 336 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
336 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
337 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
337 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
338 338
339 339 $ cd ..
340 340
341 341 Try to push markers
342 342
343 343 $ hg init tmpd
344 344 $ hg -R tmpb push tmpd
345 345 pushing to tmpd
346 346 searching for changes
347 347 adding changesets
348 348 adding manifests
349 349 adding file changes
350 350 added 4 changesets with 4 changes to 4 files (+1 heads)
351 351 $ hg -R tmpd debugobsolete | sort
352 352 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
353 353 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
354 354 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
355 355 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
356 356 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
357 357
358 358 Check obsolete keys are exchanged only if source has an obsolete store
359 359
360 360 $ hg init empty
361 361 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
362 362 pushing to tmpd
363 363 listkeys phases
364 364 listkeys bookmarks
365 365 no changes found
366 366 listkeys phases
367 367 [1]
368 368
369 369 clone support
370 370 (markers are copied and extinct changesets are included to allow hardlinks)
371 371
372 372 $ hg clone tmpb clone-dest
373 373 updating to branch default
374 374 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
375 375 $ hg -R clone-dest log -G --hidden
376 376 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
377 377 |
378 378 | x 5:5601fb93a350 (draft) [ ] add new_3_c
379 379 |/
380 380 | x 4:ca819180edb9 (draft) [ ] add new_2_c
381 381 |/
382 382 | x 3:cdbce2fbb163 (draft) [ ] add new_c
383 383 |/
384 384 | o 2:245bde4270cd (public) [ ] add original_c
385 385 |/
386 386 o 1:7c3bad9141dc (public) [ ] add b
387 387 |
388 388 o 0:1f0dee641bb7 (public) [ ] add a
389 389
390 390 $ hg -R clone-dest debugobsolete
391 391 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
392 392 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
393 393 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
394 394 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
395 395 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
396 396
397 397
398 398 Destination repo have existing data
399 399 ---------------------------------------
400 400
401 401 On pull
402 402
403 403 $ hg init tmpe
404 404 $ cd tmpe
405 405 $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
406 406 $ hg pull ../tmpb
407 407 pulling from ../tmpb
408 408 requesting all changes
409 409 adding changesets
410 410 adding manifests
411 411 adding file changes
412 412 added 4 changesets with 4 changes to 4 files (+1 heads)
413 413 (run 'hg heads' to see heads, 'hg merge' to merge)
414 414 $ hg debugobsolete
415 415 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
416 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
416 417 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
417 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
418 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
418 419 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
419 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
420 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
420 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
421 421
422 422
423 423 On push
424 424
425 425 $ hg push ../tmpc
426 426 pushing to ../tmpc
427 427 searching for changes
428 428 no changes found
429 429 [1]
430 430 $ hg -R ../tmpc debugobsolete
431 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
431 432 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
432 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
433 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
433 434 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
434 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
435 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
435 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
436 436 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
437 437
438 438 detect outgoing obsolete and unstable
439 439 ---------------------------------------
440 440
441 441
442 442 $ hg log -G
443 443 o 3:6f9641995072 (draft) [tip ] add n3w_3_c
444 444 |
445 445 | o 2:245bde4270cd (public) [ ] add original_c
446 446 |/
447 447 o 1:7c3bad9141dc (public) [ ] add b
448 448 |
449 449 o 0:1f0dee641bb7 (public) [ ] add a
450 450
451 451 $ hg up 'desc("n3w_3_c")'
452 452 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
453 453 $ mkcommit original_d
454 454 $ mkcommit original_e
455 455 $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
456 456 $ hg debugobsolete | grep `getid original_d`
457 457 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
458 458 $ hg log -r 'obsolete()'
459 459 4:94b33453f93b (draft) [ ] add original_d
460 460 $ hg log -G -r '::unstable()'
461 461 @ 5:cda648ca50f5 (draft) [tip ] add original_e
462 462 |
463 463 x 4:94b33453f93b (draft) [ ] add original_d
464 464 |
465 465 o 3:6f9641995072 (draft) [ ] add n3w_3_c
466 466 |
467 467 o 1:7c3bad9141dc (public) [ ] add b
468 468 |
469 469 o 0:1f0dee641bb7 (public) [ ] add a
470 470
471 471
472 472 refuse to push obsolete changeset
473 473
474 474 $ hg push ../tmpc/ -r 'desc("original_d")'
475 475 pushing to ../tmpc/
476 476 searching for changes
477 477 abort: push includes obsolete changeset: 94b33453f93b!
478 478 [255]
479 479
480 480 refuse to push unstable changeset
481 481
482 482 $ hg push ../tmpc/
483 483 pushing to ../tmpc/
484 484 searching for changes
485 485 abort: push includes unstable changeset: cda648ca50f5!
486 486 [255]
487 487
488 488 Test that extinct changeset are properly detected
489 489
490 490 $ hg log -r 'extinct()'
491 491
492 492 Don't try to push extinct changeset
493 493
494 494 $ hg init ../tmpf
495 495 $ hg out ../tmpf
496 496 comparing with ../tmpf
497 497 searching for changes
498 498 0:1f0dee641bb7 (public) [ ] add a
499 499 1:7c3bad9141dc (public) [ ] add b
500 500 2:245bde4270cd (public) [ ] add original_c
501 501 3:6f9641995072 (draft) [ ] add n3w_3_c
502 502 4:94b33453f93b (draft) [ ] add original_d
503 503 5:cda648ca50f5 (draft) [tip ] add original_e
504 504 $ hg push ../tmpf -f # -f because be push unstable too
505 505 pushing to ../tmpf
506 506 searching for changes
507 507 adding changesets
508 508 adding manifests
509 509 adding file changes
510 510 added 6 changesets with 6 changes to 6 files (+1 heads)
511 511
512 512 no warning displayed
513 513
514 514 $ hg push ../tmpf
515 515 pushing to ../tmpf
516 516 searching for changes
517 517 no changes found
518 518 [1]
519 519
520 520 Do not warn about new head when the new head is a successors of a remote one
521 521
522 522 $ hg log -G
523 523 @ 5:cda648ca50f5 (draft) [tip ] add original_e
524 524 |
525 525 x 4:94b33453f93b (draft) [ ] add original_d
526 526 |
527 527 o 3:6f9641995072 (draft) [ ] add n3w_3_c
528 528 |
529 529 | o 2:245bde4270cd (public) [ ] add original_c
530 530 |/
531 531 o 1:7c3bad9141dc (public) [ ] add b
532 532 |
533 533 o 0:1f0dee641bb7 (public) [ ] add a
534 534
535 535 $ hg up -q 'desc(n3w_3_c)'
536 536 $ mkcommit obsolete_e
537 537 created new head
538 538 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
539 539 $ hg outgoing ../tmpf # parasite hg outgoing testin
540 540 comparing with ../tmpf
541 541 searching for changes
542 542 6:3de5eca88c00 (draft) [tip ] add obsolete_e
543 543 $ hg push ../tmpf
544 544 pushing to ../tmpf
545 545 searching for changes
546 546 adding changesets
547 547 adding manifests
548 548 adding file changes
549 549 added 1 changesets with 1 changes to 1 files (+1 heads)
550 550
551 551 test relevance computation
552 552 ---------------------------------------
553 553
554 554 Checking simple case of "marker relevance".
555 555
556 556
557 557 Reminder of the repo situation
558 558
559 559 $ hg log --hidden --graph
560 560 @ 6:3de5eca88c00 (draft) [tip ] add obsolete_e
561 561 |
562 562 | x 5:cda648ca50f5 (draft) [ ] add original_e
563 563 | |
564 564 | x 4:94b33453f93b (draft) [ ] add original_d
565 565 |/
566 566 o 3:6f9641995072 (draft) [ ] add n3w_3_c
567 567 |
568 568 | o 2:245bde4270cd (public) [ ] add original_c
569 569 |/
570 570 o 1:7c3bad9141dc (public) [ ] add b
571 571 |
572 572 o 0:1f0dee641bb7 (public) [ ] add a
573 573
574 574
575 575 List of all markers
576 576
577 577 $ hg debugobsolete
578 578 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
579 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
579 580 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
580 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
581 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
581 582 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
582 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
583 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
583 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
584 584 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
585 585 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
586 586
587 587 List of changesets with no chain
588 588
589 589 $ hg debugobsolete --hidden --rev ::2
590 590
591 591 List of changesets that are included on marker chain
592 592
593 593 $ hg debugobsolete --hidden --rev 6
594 594 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
595 595
596 596 List of changesets with a longer chain, (including a pruned children)
597 597
598 598 $ hg debugobsolete --hidden --rev 3
599 599 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
600 600 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
601 601 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
602 602 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
603 603 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
604 604 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
605 605 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
606 606
607 607 List of both
608 608
609 609 $ hg debugobsolete --hidden --rev 3::6
610 610 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
611 611 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
612 612 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
613 613 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
614 614 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
615 615 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
616 616 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
617 617 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
618 618
619 619 #if serve
620 620
621 621 Test the debug output for exchange
622 622 ----------------------------------
623 623
624 624 $ hg pull ../tmpb --config 'experimental.obsmarkers-exchange-debug=True' --config 'experimental.bundle2-exp=True'
625 625 pulling from ../tmpb
626 626 searching for changes
627 627 no changes found
628 628 obsmarker-exchange: 346 bytes received
629 629
630 630 check hgweb does not explode
631 631 ====================================
632 632
633 633 $ hg unbundle $TESTDIR/bundles/hgweb+obs.hg
634 634 adding changesets
635 635 adding manifests
636 636 adding file changes
637 637 added 62 changesets with 63 changes to 9 files (+60 heads)
638 638 (run 'hg heads .' to see heads, 'hg merge' to merge)
639 639 $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
640 640 > do
641 641 > hg debugobsolete $node
642 642 > done
643 643 $ hg up tip
644 644 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
645 645
646 646 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
647 647 $ cat hg.pid >> $DAEMON_PIDS
648 648
649 649 check changelog view
650 650
651 651 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'shortlog/'
652 652 200 Script output follows
653 653
654 654 check graph view
655 655
656 656 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'graph'
657 657 200 Script output follows
658 658
659 659 check filelog view
660 660
661 661 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'log/'`hg log -r . -T "{node}"`/'babar'
662 662 200 Script output follows
663 663
664 664 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/68'
665 665 200 Script output follows
666 666 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
667 667 404 Not Found
668 668 [1]
669 669
670 670 check that web.view config option:
671 671
672 672 $ "$TESTDIR/killdaemons.py" hg.pid
673 673 $ cat >> .hg/hgrc << EOF
674 674 > [web]
675 675 > view=all
676 676 > EOF
677 677 $ wait
678 678 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
679 679 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
680 680 200 Script output follows
681 681 $ "$TESTDIR/killdaemons.py" hg.pid
682 682
683 683 Checking _enable=False warning if obsolete marker exists
684 684
685 685 $ echo '[experimental]' >> $HGRCPATH
686 686 $ echo "evolution=" >> $HGRCPATH
687 687 $ hg log -r tip
688 688 obsolete feature not enabled but 68 markers found!
689 689 68:c15e9edfca13 (draft) [tip ] add celestine
690 690
691 691 reenable for later test
692 692
693 693 $ echo '[experimental]' >> $HGRCPATH
694 694 $ echo "evolution=createmarkers,exchange" >> $HGRCPATH
695 695
696 696 #endif
697 697
698 698 Test incoming/outcoming with changesets obsoleted remotely, known locally
699 699 ===============================================================================
700 700
701 701 This test issue 3805
702 702
703 703 $ hg init repo-issue3805
704 704 $ cd repo-issue3805
705 705 $ echo "foo" > foo
706 706 $ hg ci -Am "A"
707 707 adding foo
708 708 $ hg clone . ../other-issue3805
709 709 updating to branch default
710 710 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
711 711 $ echo "bar" >> foo
712 712 $ hg ci --amend
713 713 $ cd ../other-issue3805
714 714 $ hg log -G
715 715 @ 0:193e9254ce7e (draft) [tip ] A
716 716
717 717 $ hg log -G -R ../repo-issue3805
718 718 @ 2:3816541e5485 (draft) [tip ] A
719 719
720 720 $ hg incoming
721 721 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
722 722 searching for changes
723 723 2:3816541e5485 (draft) [tip ] A
724 724 $ hg incoming --bundle ../issue3805.hg
725 725 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
726 726 searching for changes
727 727 2:3816541e5485 (draft) [tip ] A
728 728 $ hg outgoing
729 729 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
730 730 searching for changes
731 731 no changes found
732 732 [1]
733 733
734 734 #if serve
735 735
736 736 $ hg serve -R ../repo-issue3805 -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
737 737 $ cat hg.pid >> $DAEMON_PIDS
738 738
739 739 $ hg incoming http://localhost:$HGPORT
740 740 comparing with http://localhost:$HGPORT/
741 741 searching for changes
742 742 1:3816541e5485 (draft) [tip ] A
743 743 $ hg outgoing http://localhost:$HGPORT
744 744 comparing with http://localhost:$HGPORT/
745 745 searching for changes
746 746 no changes found
747 747 [1]
748 748
749 749 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
750 750
751 751 #endif
752 752
753 753 This test issue 3814
754 754
755 755 (nothing to push but locally hidden changeset)
756 756
757 757 $ cd ..
758 758 $ hg init repo-issue3814
759 759 $ cd repo-issue3805
760 760 $ hg push -r 3816541e5485 ../repo-issue3814
761 761 pushing to ../repo-issue3814
762 762 searching for changes
763 763 adding changesets
764 764 adding manifests
765 765 adding file changes
766 766 added 1 changesets with 1 changes to 1 files
767 767 $ hg out ../repo-issue3814
768 768 comparing with ../repo-issue3814
769 769 searching for changes
770 770 no changes found
771 771 [1]
772 772
773 773 Test that a local tag blocks a changeset from being hidden
774 774
775 775 $ hg tag -l visible -r 0 --hidden
776 776 $ hg log -G
777 777 @ 2:3816541e5485 (draft) [tip ] A
778 778
779 779 x 0:193e9254ce7e (draft) [visible ] A
780 780
781 781 Test that removing a local tag does not cause some commands to fail
782 782
783 783 $ hg tag -l -r tip tiptag
784 784 $ hg tags
785 785 tiptag 2:3816541e5485
786 786 tip 2:3816541e5485
787 787 visible 0:193e9254ce7e
788 788 $ hg --config extensions.strip= strip -r tip --no-backup
789 789 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
790 790 $ hg tags
791 791 visible 0:193e9254ce7e
792 792 tip 0:193e9254ce7e
793 793
794 794 Test bundle overlay onto hidden revision
795 795
796 796 $ cd ..
797 797 $ hg init repo-bundleoverlay
798 798 $ cd repo-bundleoverlay
799 799 $ echo "A" > foo
800 800 $ hg ci -Am "A"
801 801 adding foo
802 802 $ echo "B" >> foo
803 803 $ hg ci -m "B"
804 804 $ hg up 0
805 805 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
806 806 $ echo "C" >> foo
807 807 $ hg ci -m "C"
808 808 created new head
809 809 $ hg log -G
810 810 @ 2:c186d7714947 (draft) [tip ] C
811 811 |
812 812 | o 1:44526ebb0f98 (draft) [ ] B
813 813 |/
814 814 o 0:4b34ecfb0d56 (draft) [ ] A
815 815
816 816
817 817 $ hg clone -r1 . ../other-bundleoverlay
818 818 adding changesets
819 819 adding manifests
820 820 adding file changes
821 821 added 2 changesets with 2 changes to 1 files
822 822 updating to branch default
823 823 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
824 824 $ cd ../other-bundleoverlay
825 825 $ echo "B+" >> foo
826 826 $ hg ci --amend -m "B+"
827 827 $ hg log -G --hidden
828 828 @ 3:b7d587542d40 (draft) [tip ] B+
829 829 |
830 830 | x 2:eb95e9297e18 (draft) [ ] temporary amend commit for 44526ebb0f98
831 831 | |
832 832 | x 1:44526ebb0f98 (draft) [ ] B
833 833 |/
834 834 o 0:4b34ecfb0d56 (draft) [ ] A
835 835
836 836
837 837 $ hg incoming ../repo-bundleoverlay --bundle ../bundleoverlay.hg
838 838 comparing with ../repo-bundleoverlay
839 839 searching for changes
840 840 1:44526ebb0f98 (draft) [ ] B
841 841 2:c186d7714947 (draft) [tip ] C
842 842 $ hg log -G -R ../bundleoverlay.hg
843 843 o 4:c186d7714947 (draft) [tip ] C
844 844 |
845 845 | @ 3:b7d587542d40 (draft) [ ] B+
846 846 |/
847 847 o 0:4b34ecfb0d56 (draft) [ ] A
848 848
849 849
850 850 #if serve
851 851
852 852 Test issue 4506
853 853
854 854 $ cd ..
855 855 $ hg init repo-issue4506
856 856 $ cd repo-issue4506
857 857 $ echo "0" > foo
858 858 $ hg add foo
859 859 $ hg ci -m "content-0"
860 860
861 861 $ hg up null
862 862 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
863 863 $ echo "1" > bar
864 864 $ hg add bar
865 865 $ hg ci -m "content-1"
866 866 created new head
867 867 $ hg up 0
868 868 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
869 869 $ hg graft 1
870 870 grafting 1:1c9eddb02162 "content-1" (tip)
871 871
872 872 $ hg debugobsolete `hg log -r1 -T'{node}'` `hg log -r2 -T'{node}'`
873 873
874 874 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
875 875 $ cat hg.pid >> $DAEMON_PIDS
876 876
877 877 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/1'
878 878 404 Not Found
879 879 [1]
880 880 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'file/tip/bar'
881 881 200 Script output follows
882 882 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'annotate/tip/bar'
883 883 200 Script output follows
884 884
885 885 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
886 886
887 887 #endif
888 888
General Comments 0
You need to be logged in to leave comments. Login now