##// END OF EJS Templates
push: catch and process PushkeyFailed error...
Pierre-Yves David -
r25485:8182163a default
parent child Browse files
Show More
@@ -1,1547 +1,1556
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import time
9 9 from i18n import _
10 10 from node import hex, nullid
11 11 import errno, urllib
12 12 import util, scmutil, changegroup, base85, error, store
13 13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
14 14 import lock as lockmod
15 15 import tags
16 16
17 17 def readbundle(ui, fh, fname, vfs=None):
18 18 header = changegroup.readexactly(fh, 4)
19 19
20 20 alg = None
21 21 if not fname:
22 22 fname = "stream"
23 23 if not header.startswith('HG') and header.startswith('\0'):
24 24 fh = changegroup.headerlessfixup(fh, header)
25 25 header = "HG10"
26 26 alg = 'UN'
27 27 elif vfs:
28 28 fname = vfs.join(fname)
29 29
30 30 magic, version = header[0:2], header[2:4]
31 31
32 32 if magic != 'HG':
33 33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
34 34 if version == '10':
35 35 if alg is None:
36 36 alg = changegroup.readexactly(fh, 2)
37 37 return changegroup.cg1unpacker(fh, alg)
38 38 elif version.startswith('2'):
39 39 return bundle2.getunbundler(ui, fh, header=magic + version)
40 40 else:
41 41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
42 42
43 43 def buildobsmarkerspart(bundler, markers):
44 44 """add an obsmarker part to the bundler with <markers>
45 45
46 46 No part is created if markers is empty.
47 47 Raises ValueError if the bundler doesn't support any known obsmarker format.
48 48 """
49 49 if markers:
50 50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
51 51 version = obsolete.commonversion(remoteversions)
52 52 if version is None:
53 53 raise ValueError('bundler do not support common obsmarker format')
54 54 stream = obsolete.encodemarkers(markers, True, version=version)
55 55 return bundler.newpart('obsmarkers', data=stream)
56 56 return None
57 57
58 58 def _canusebundle2(op):
59 59 """return true if a pull/push can use bundle2
60 60
61 61 Feel free to nuke this function when we drop the experimental option"""
62 62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
63 63 and op.remote.capable('bundle2'))
64 64
65 65
66 66 class pushoperation(object):
67 67 """A object that represent a single push operation
68 68
69 69 It purpose is to carry push related state and very common operation.
70 70
71 71 A new should be created at the beginning of each push and discarded
72 72 afterward.
73 73 """
74 74
75 75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
76 76 bookmarks=()):
77 77 # repo we push from
78 78 self.repo = repo
79 79 self.ui = repo.ui
80 80 # repo we push to
81 81 self.remote = remote
82 82 # force option provided
83 83 self.force = force
84 84 # revs to be pushed (None is "all")
85 85 self.revs = revs
86 86 # bookmark explicitly pushed
87 87 self.bookmarks = bookmarks
88 88 # allow push of new branch
89 89 self.newbranch = newbranch
90 90 # did a local lock get acquired?
91 91 self.locallocked = None
92 92 # step already performed
93 93 # (used to check what steps have been already performed through bundle2)
94 94 self.stepsdone = set()
95 95 # Integer version of the changegroup push result
96 96 # - None means nothing to push
97 97 # - 0 means HTTP error
98 98 # - 1 means we pushed and remote head count is unchanged *or*
99 99 # we have outgoing changesets but refused to push
100 100 # - other values as described by addchangegroup()
101 101 self.cgresult = None
102 102 # Boolean value for the bookmark push
103 103 self.bkresult = None
104 104 # discover.outgoing object (contains common and outgoing data)
105 105 self.outgoing = None
106 106 # all remote heads before the push
107 107 self.remoteheads = None
108 108 # testable as a boolean indicating if any nodes are missing locally.
109 109 self.incoming = None
110 110 # phases changes that must be pushed along side the changesets
111 111 self.outdatedphases = None
112 112 # phases changes that must be pushed if changeset push fails
113 113 self.fallbackoutdatedphases = None
114 114 # outgoing obsmarkers
115 115 self.outobsmarkers = set()
116 116 # outgoing bookmarks
117 117 self.outbookmarks = []
118 118 # transaction manager
119 119 self.trmanager = None
120 # map { pushkey partid -> callback handling failure}
121 # used to handle exception from mandatory pushkey part failure
122 self.pkfailcb = {}
120 123
121 124 @util.propertycache
122 125 def futureheads(self):
123 126 """future remote heads if the changeset push succeeds"""
124 127 return self.outgoing.missingheads
125 128
126 129 @util.propertycache
127 130 def fallbackheads(self):
128 131 """future remote heads if the changeset push fails"""
129 132 if self.revs is None:
130 133 # not target to push, all common are relevant
131 134 return self.outgoing.commonheads
132 135 unfi = self.repo.unfiltered()
133 136 # I want cheads = heads(::missingheads and ::commonheads)
134 137 # (missingheads is revs with secret changeset filtered out)
135 138 #
136 139 # This can be expressed as:
137 140 # cheads = ( (missingheads and ::commonheads)
138 141 # + (commonheads and ::missingheads))"
139 142 # )
140 143 #
141 144 # while trying to push we already computed the following:
142 145 # common = (::commonheads)
143 146 # missing = ((commonheads::missingheads) - commonheads)
144 147 #
145 148 # We can pick:
146 149 # * missingheads part of common (::commonheads)
147 150 common = set(self.outgoing.common)
148 151 nm = self.repo.changelog.nodemap
149 152 cheads = [node for node in self.revs if nm[node] in common]
150 153 # and
151 154 # * commonheads parents on missing
152 155 revset = unfi.set('%ln and parents(roots(%ln))',
153 156 self.outgoing.commonheads,
154 157 self.outgoing.missing)
155 158 cheads.extend(c.node() for c in revset)
156 159 return cheads
157 160
158 161 @property
159 162 def commonheads(self):
160 163 """set of all common heads after changeset bundle push"""
161 164 if self.cgresult:
162 165 return self.futureheads
163 166 else:
164 167 return self.fallbackheads
165 168
166 169 # mapping of message used when pushing bookmark
167 170 bookmsgmap = {'update': (_("updating bookmark %s\n"),
168 171 _('updating bookmark %s failed!\n')),
169 172 'export': (_("exporting bookmark %s\n"),
170 173 _('exporting bookmark %s failed!\n')),
171 174 'delete': (_("deleting remote bookmark %s\n"),
172 175 _('deleting remote bookmark %s failed!\n')),
173 176 }
174 177
175 178
176 179 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
177 180 '''Push outgoing changesets (limited by revs) from a local
178 181 repository to remote. Return an integer:
179 182 - None means nothing to push
180 183 - 0 means HTTP error
181 184 - 1 means we pushed and remote head count is unchanged *or*
182 185 we have outgoing changesets but refused to push
183 186 - other values as described by addchangegroup()
184 187 '''
185 188 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
186 189 if pushop.remote.local():
187 190 missing = (set(pushop.repo.requirements)
188 191 - pushop.remote.local().supported)
189 192 if missing:
190 193 msg = _("required features are not"
191 194 " supported in the destination:"
192 195 " %s") % (', '.join(sorted(missing)))
193 196 raise util.Abort(msg)
194 197
195 198 # there are two ways to push to remote repo:
196 199 #
197 200 # addchangegroup assumes local user can lock remote
198 201 # repo (local filesystem, old ssh servers).
199 202 #
200 203 # unbundle assumes local user cannot lock remote repo (new ssh
201 204 # servers, http servers).
202 205
203 206 if not pushop.remote.canpush():
204 207 raise util.Abort(_("destination does not support push"))
205 208 # get local lock as we might write phase data
206 209 localwlock = locallock = None
207 210 try:
208 211 # bundle2 push may receive a reply bundle touching bookmarks or other
209 212 # things requiring the wlock. Take it now to ensure proper ordering.
210 213 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
211 214 if _canusebundle2(pushop) and maypushback:
212 215 localwlock = pushop.repo.wlock()
213 216 locallock = pushop.repo.lock()
214 217 pushop.locallocked = True
215 218 except IOError, err:
216 219 pushop.locallocked = False
217 220 if err.errno != errno.EACCES:
218 221 raise
219 222 # source repo cannot be locked.
220 223 # We do not abort the push, but just disable the local phase
221 224 # synchronisation.
222 225 msg = 'cannot lock source repository: %s\n' % err
223 226 pushop.ui.debug(msg)
224 227 try:
225 228 if pushop.locallocked:
226 229 pushop.trmanager = transactionmanager(repo,
227 230 'push-response',
228 231 pushop.remote.url())
229 232 pushop.repo.checkpush(pushop)
230 233 lock = None
231 234 unbundle = pushop.remote.capable('unbundle')
232 235 if not unbundle:
233 236 lock = pushop.remote.lock()
234 237 try:
235 238 _pushdiscovery(pushop)
236 239 if _canusebundle2(pushop):
237 240 _pushbundle2(pushop)
238 241 _pushchangeset(pushop)
239 242 _pushsyncphase(pushop)
240 243 _pushobsolete(pushop)
241 244 _pushbookmark(pushop)
242 245 finally:
243 246 if lock is not None:
244 247 lock.release()
245 248 if pushop.trmanager:
246 249 pushop.trmanager.close()
247 250 finally:
248 251 if pushop.trmanager:
249 252 pushop.trmanager.release()
250 253 if locallock is not None:
251 254 locallock.release()
252 255 if localwlock is not None:
253 256 localwlock.release()
254 257
255 258 return pushop
256 259
257 260 # list of steps to perform discovery before push
258 261 pushdiscoveryorder = []
259 262
260 263 # Mapping between step name and function
261 264 #
262 265 # This exists to help extensions wrap steps if necessary
263 266 pushdiscoverymapping = {}
264 267
265 268 def pushdiscovery(stepname):
266 269 """decorator for function performing discovery before push
267 270
268 271 The function is added to the step -> function mapping and appended to the
269 272 list of steps. Beware that decorated function will be added in order (this
270 273 may matter).
271 274
272 275 You can only use this decorator for a new step, if you want to wrap a step
273 276 from an extension, change the pushdiscovery dictionary directly."""
274 277 def dec(func):
275 278 assert stepname not in pushdiscoverymapping
276 279 pushdiscoverymapping[stepname] = func
277 280 pushdiscoveryorder.append(stepname)
278 281 return func
279 282 return dec
280 283
281 284 def _pushdiscovery(pushop):
282 285 """Run all discovery steps"""
283 286 for stepname in pushdiscoveryorder:
284 287 step = pushdiscoverymapping[stepname]
285 288 step(pushop)
286 289
287 290 @pushdiscovery('changeset')
288 291 def _pushdiscoverychangeset(pushop):
289 292 """discover the changeset that need to be pushed"""
290 293 fci = discovery.findcommonincoming
291 294 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
292 295 common, inc, remoteheads = commoninc
293 296 fco = discovery.findcommonoutgoing
294 297 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
295 298 commoninc=commoninc, force=pushop.force)
296 299 pushop.outgoing = outgoing
297 300 pushop.remoteheads = remoteheads
298 301 pushop.incoming = inc
299 302
300 303 @pushdiscovery('phase')
301 304 def _pushdiscoveryphase(pushop):
302 305 """discover the phase that needs to be pushed
303 306
304 307 (computed for both success and failure case for changesets push)"""
305 308 outgoing = pushop.outgoing
306 309 unfi = pushop.repo.unfiltered()
307 310 remotephases = pushop.remote.listkeys('phases')
308 311 publishing = remotephases.get('publishing', False)
309 312 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
310 313 and remotephases # server supports phases
311 314 and not pushop.outgoing.missing # no changesets to be pushed
312 315 and publishing):
313 316 # When:
314 317 # - this is a subrepo push
315 318 # - and remote support phase
316 319 # - and no changeset are to be pushed
317 320 # - and remote is publishing
318 321 # We may be in issue 3871 case!
319 322 # We drop the possible phase synchronisation done by
320 323 # courtesy to publish changesets possibly locally draft
321 324 # on the remote.
322 325 remotephases = {'publishing': 'True'}
323 326 ana = phases.analyzeremotephases(pushop.repo,
324 327 pushop.fallbackheads,
325 328 remotephases)
326 329 pheads, droots = ana
327 330 extracond = ''
328 331 if not publishing:
329 332 extracond = ' and public()'
330 333 revset = 'heads((%%ln::%%ln) %s)' % extracond
331 334 # Get the list of all revs draft on remote by public here.
332 335 # XXX Beware that revset break if droots is not strictly
333 336 # XXX root we may want to ensure it is but it is costly
334 337 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
335 338 if not outgoing.missing:
336 339 future = fallback
337 340 else:
338 341 # adds changeset we are going to push as draft
339 342 #
340 343 # should not be necessary for publishing server, but because of an
341 344 # issue fixed in xxxxx we have to do it anyway.
342 345 fdroots = list(unfi.set('roots(%ln + %ln::)',
343 346 outgoing.missing, droots))
344 347 fdroots = [f.node() for f in fdroots]
345 348 future = list(unfi.set(revset, fdroots, pushop.futureheads))
346 349 pushop.outdatedphases = future
347 350 pushop.fallbackoutdatedphases = fallback
348 351
349 352 @pushdiscovery('obsmarker')
350 353 def _pushdiscoveryobsmarkers(pushop):
351 354 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
352 355 and pushop.repo.obsstore
353 356 and 'obsolete' in pushop.remote.listkeys('namespaces')):
354 357 repo = pushop.repo
355 358 # very naive computation, that can be quite expensive on big repo.
356 359 # However: evolution is currently slow on them anyway.
357 360 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
358 361 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
359 362
360 363 @pushdiscovery('bookmarks')
361 364 def _pushdiscoverybookmarks(pushop):
362 365 ui = pushop.ui
363 366 repo = pushop.repo.unfiltered()
364 367 remote = pushop.remote
365 368 ui.debug("checking for updated bookmarks\n")
366 369 ancestors = ()
367 370 if pushop.revs:
368 371 revnums = map(repo.changelog.rev, pushop.revs)
369 372 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
370 373 remotebookmark = remote.listkeys('bookmarks')
371 374
372 375 explicit = set(pushop.bookmarks)
373 376
374 377 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
375 378 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
376 379 for b, scid, dcid in advsrc:
377 380 if b in explicit:
378 381 explicit.remove(b)
379 382 if not ancestors or repo[scid].rev() in ancestors:
380 383 pushop.outbookmarks.append((b, dcid, scid))
381 384 # search added bookmark
382 385 for b, scid, dcid in addsrc:
383 386 if b in explicit:
384 387 explicit.remove(b)
385 388 pushop.outbookmarks.append((b, '', scid))
386 389 # search for overwritten bookmark
387 390 for b, scid, dcid in advdst + diverge + differ:
388 391 if b in explicit:
389 392 explicit.remove(b)
390 393 pushop.outbookmarks.append((b, dcid, scid))
391 394 # search for bookmark to delete
392 395 for b, scid, dcid in adddst:
393 396 if b in explicit:
394 397 explicit.remove(b)
395 398 # treat as "deleted locally"
396 399 pushop.outbookmarks.append((b, dcid, ''))
397 400 # identical bookmarks shouldn't get reported
398 401 for b, scid, dcid in same:
399 402 if b in explicit:
400 403 explicit.remove(b)
401 404
402 405 if explicit:
403 406 explicit = sorted(explicit)
404 407 # we should probably list all of them
405 408 ui.warn(_('bookmark %s does not exist on the local '
406 409 'or remote repository!\n') % explicit[0])
407 410 pushop.bkresult = 2
408 411
409 412 pushop.outbookmarks.sort()
410 413
411 414 def _pushcheckoutgoing(pushop):
412 415 outgoing = pushop.outgoing
413 416 unfi = pushop.repo.unfiltered()
414 417 if not outgoing.missing:
415 418 # nothing to push
416 419 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
417 420 return False
418 421 # something to push
419 422 if not pushop.force:
420 423 # if repo.obsstore == False --> no obsolete
421 424 # then, save the iteration
422 425 if unfi.obsstore:
423 426 # this message are here for 80 char limit reason
424 427 mso = _("push includes obsolete changeset: %s!")
425 428 mst = {"unstable": _("push includes unstable changeset: %s!"),
426 429 "bumped": _("push includes bumped changeset: %s!"),
427 430 "divergent": _("push includes divergent changeset: %s!")}
428 431 # If we are to push if there is at least one
429 432 # obsolete or unstable changeset in missing, at
430 433 # least one of the missinghead will be obsolete or
431 434 # unstable. So checking heads only is ok
432 435 for node in outgoing.missingheads:
433 436 ctx = unfi[node]
434 437 if ctx.obsolete():
435 438 raise util.Abort(mso % ctx)
436 439 elif ctx.troubled():
437 440 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
438 441 newbm = pushop.ui.configlist('bookmarks', 'pushing')
439 442 discovery.checkheads(unfi, pushop.remote, outgoing,
440 443 pushop.remoteheads,
441 444 pushop.newbranch,
442 445 bool(pushop.incoming),
443 446 newbm)
444 447 return True
445 448
446 449 # List of names of steps to perform for an outgoing bundle2, order matters.
447 450 b2partsgenorder = []
448 451
449 452 # Mapping between step name and function
450 453 #
451 454 # This exists to help extensions wrap steps if necessary
452 455 b2partsgenmapping = {}
453 456
454 457 def b2partsgenerator(stepname, idx=None):
455 458 """decorator for function generating bundle2 part
456 459
457 460 The function is added to the step -> function mapping and appended to the
458 461 list of steps. Beware that decorated functions will be added in order
459 462 (this may matter).
460 463
461 464 You can only use this decorator for new steps, if you want to wrap a step
462 465 from an extension, attack the b2partsgenmapping dictionary directly."""
463 466 def dec(func):
464 467 assert stepname not in b2partsgenmapping
465 468 b2partsgenmapping[stepname] = func
466 469 if idx is None:
467 470 b2partsgenorder.append(stepname)
468 471 else:
469 472 b2partsgenorder.insert(idx, stepname)
470 473 return func
471 474 return dec
472 475
473 476 @b2partsgenerator('changeset')
474 477 def _pushb2ctx(pushop, bundler):
475 478 """handle changegroup push through bundle2
476 479
477 480 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
478 481 """
479 482 if 'changesets' in pushop.stepsdone:
480 483 return
481 484 pushop.stepsdone.add('changesets')
482 485 # Send known heads to the server for race detection.
483 486 if not _pushcheckoutgoing(pushop):
484 487 return
485 488 pushop.repo.prepushoutgoinghooks(pushop.repo,
486 489 pushop.remote,
487 490 pushop.outgoing)
488 491 if not pushop.force:
489 492 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
490 493 b2caps = bundle2.bundle2caps(pushop.remote)
491 494 version = None
492 495 cgversions = b2caps.get('changegroup')
493 496 if not cgversions: # 3.1 and 3.2 ship with an empty value
494 497 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
495 498 pushop.outgoing)
496 499 else:
497 500 cgversions = [v for v in cgversions if v in changegroup.packermap]
498 501 if not cgversions:
499 502 raise ValueError(_('no common changegroup version'))
500 503 version = max(cgversions)
501 504 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
502 505 pushop.outgoing,
503 506 version=version)
504 507 cgpart = bundler.newpart('changegroup', data=cg)
505 508 if version is not None:
506 509 cgpart.addparam('version', version)
507 510 def handlereply(op):
508 511 """extract addchangegroup returns from server reply"""
509 512 cgreplies = op.records.getreplies(cgpart.id)
510 513 assert len(cgreplies['changegroup']) == 1
511 514 pushop.cgresult = cgreplies['changegroup'][0]['return']
512 515 return handlereply
513 516
514 517 @b2partsgenerator('phase')
515 518 def _pushb2phases(pushop, bundler):
516 519 """handle phase push through bundle2"""
517 520 if 'phases' in pushop.stepsdone:
518 521 return
519 522 b2caps = bundle2.bundle2caps(pushop.remote)
520 523 if not 'pushkey' in b2caps:
521 524 return
522 525 pushop.stepsdone.add('phases')
523 526 part2node = []
524 527 enc = pushkey.encode
525 528 for newremotehead in pushop.outdatedphases:
526 529 part = bundler.newpart('pushkey', mandatory=False)
527 530 part.addparam('namespace', enc('phases'))
528 531 part.addparam('key', enc(newremotehead.hex()))
529 532 part.addparam('old', enc(str(phases.draft)))
530 533 part.addparam('new', enc(str(phases.public)))
531 534 part2node.append((part.id, newremotehead))
532 535 def handlereply(op):
533 536 for partid, node in part2node:
534 537 partrep = op.records.getreplies(partid)
535 538 results = partrep['pushkey']
536 539 assert len(results) <= 1
537 540 msg = None
538 541 if not results:
539 542 msg = _('server ignored update of %s to public!\n') % node
540 543 elif not int(results[0]['return']):
541 544 msg = _('updating %s to public failed!\n') % node
542 545 if msg is not None:
543 546 pushop.ui.warn(msg)
544 547 return handlereply
545 548
546 549 @b2partsgenerator('obsmarkers')
547 550 def _pushb2obsmarkers(pushop, bundler):
548 551 if 'obsmarkers' in pushop.stepsdone:
549 552 return
550 553 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
551 554 if obsolete.commonversion(remoteversions) is None:
552 555 return
553 556 pushop.stepsdone.add('obsmarkers')
554 557 if pushop.outobsmarkers:
555 558 markers = sorted(pushop.outobsmarkers)
556 559 buildobsmarkerspart(bundler, markers)
557 560
558 561 @b2partsgenerator('bookmarks')
559 562 def _pushb2bookmarks(pushop, bundler):
560 563 """handle phase push through bundle2"""
561 564 if 'bookmarks' in pushop.stepsdone:
562 565 return
563 566 b2caps = bundle2.bundle2caps(pushop.remote)
564 567 if 'pushkey' not in b2caps:
565 568 return
566 569 pushop.stepsdone.add('bookmarks')
567 570 part2book = []
568 571 enc = pushkey.encode
569 572 for book, old, new in pushop.outbookmarks:
570 573 part = bundler.newpart('pushkey', mandatory=False)
571 574 part.addparam('namespace', enc('bookmarks'))
572 575 part.addparam('key', enc(book))
573 576 part.addparam('old', enc(old))
574 577 part.addparam('new', enc(new))
575 578 action = 'update'
576 579 if not old:
577 580 action = 'export'
578 581 elif not new:
579 582 action = 'delete'
580 583 part2book.append((part.id, book, action))
581 584
582 585
583 586 def handlereply(op):
584 587 ui = pushop.ui
585 588 for partid, book, action in part2book:
586 589 partrep = op.records.getreplies(partid)
587 590 results = partrep['pushkey']
588 591 assert len(results) <= 1
589 592 if not results:
590 593 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
591 594 else:
592 595 ret = int(results[0]['return'])
593 596 if ret:
594 597 ui.status(bookmsgmap[action][0] % book)
595 598 else:
596 599 ui.warn(bookmsgmap[action][1] % book)
597 600 if pushop.bkresult is not None:
598 601 pushop.bkresult = 1
599 602 return handlereply
600 603
601 604
602 605 def _pushbundle2(pushop):
603 606 """push data to the remote using bundle2
604 607
605 608 The only currently supported type of data is changegroup but this will
606 609 evolve in the future."""
607 610 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
608 611 pushback = (pushop.trmanager
609 612 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
610 613
611 614 # create reply capability
612 615 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
613 616 allowpushback=pushback))
614 617 bundler.newpart('replycaps', data=capsblob)
615 618 replyhandlers = []
616 619 for partgenname in b2partsgenorder:
617 620 partgen = b2partsgenmapping[partgenname]
618 621 ret = partgen(pushop, bundler)
619 622 if callable(ret):
620 623 replyhandlers.append(ret)
621 624 # do not push if nothing to push
622 625 if bundler.nbparts <= 1:
623 626 return
624 627 stream = util.chunkbuffer(bundler.getchunks())
625 628 try:
629 try:
626 630 reply = pushop.remote.unbundle(stream, ['force'], 'push')
627 631 except error.BundleValueError, exc:
628 632 raise util.Abort('missing support for %s' % exc)
629 633 try:
630 634 trgetter = None
631 635 if pushback:
632 636 trgetter = pushop.trmanager.transaction
633 637 op = bundle2.processbundle(pushop.repo, reply, trgetter)
634 638 except error.BundleValueError, exc:
635 639 raise util.Abort('missing support for %s' % exc)
640 except error.PushkeyFailed, exc:
641 partid = int(exc.partid)
642 if partid not in pushop.pkfailcb:
643 raise
644 pushop.pkfailcb[partid](pushop, exc)
636 645 for rephand in replyhandlers:
637 646 rephand(op)
638 647
639 648 def _pushchangeset(pushop):
640 649 """Make the actual push of changeset bundle to remote repo"""
641 650 if 'changesets' in pushop.stepsdone:
642 651 return
643 652 pushop.stepsdone.add('changesets')
644 653 if not _pushcheckoutgoing(pushop):
645 654 return
646 655 pushop.repo.prepushoutgoinghooks(pushop.repo,
647 656 pushop.remote,
648 657 pushop.outgoing)
649 658 outgoing = pushop.outgoing
650 659 unbundle = pushop.remote.capable('unbundle')
651 660 # TODO: get bundlecaps from remote
652 661 bundlecaps = None
653 662 # create a changegroup from local
654 663 if pushop.revs is None and not (outgoing.excluded
655 664 or pushop.repo.changelog.filteredrevs):
656 665 # push everything,
657 666 # use the fast path, no race possible on push
658 667 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
659 668 cg = changegroup.getsubset(pushop.repo,
660 669 outgoing,
661 670 bundler,
662 671 'push',
663 672 fastpath=True)
664 673 else:
665 674 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
666 675 bundlecaps)
667 676
668 677 # apply changegroup to remote
669 678 if unbundle:
670 679 # local repo finds heads on server, finds out what
671 680 # revs it must push. once revs transferred, if server
672 681 # finds it has different heads (someone else won
673 682 # commit/push race), server aborts.
674 683 if pushop.force:
675 684 remoteheads = ['force']
676 685 else:
677 686 remoteheads = pushop.remoteheads
678 687 # ssh: return remote's addchangegroup()
679 688 # http: return remote's addchangegroup() or 0 for error
680 689 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
681 690 pushop.repo.url())
682 691 else:
683 692 # we return an integer indicating remote head count
684 693 # change
685 694 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
686 695 pushop.repo.url())
687 696
688 697 def _pushsyncphase(pushop):
689 698 """synchronise phase information locally and remotely"""
690 699 cheads = pushop.commonheads
691 700 # even when we don't push, exchanging phase data is useful
692 701 remotephases = pushop.remote.listkeys('phases')
693 702 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
694 703 and remotephases # server supports phases
695 704 and pushop.cgresult is None # nothing was pushed
696 705 and remotephases.get('publishing', False)):
697 706 # When:
698 707 # - this is a subrepo push
699 708 # - and remote support phase
700 709 # - and no changeset was pushed
701 710 # - and remote is publishing
702 711 # We may be in issue 3871 case!
703 712 # We drop the possible phase synchronisation done by
704 713 # courtesy to publish changesets possibly locally draft
705 714 # on the remote.
706 715 remotephases = {'publishing': 'True'}
707 716 if not remotephases: # old server or public only reply from non-publishing
708 717 _localphasemove(pushop, cheads)
709 718 # don't push any phase data as there is nothing to push
710 719 else:
711 720 ana = phases.analyzeremotephases(pushop.repo, cheads,
712 721 remotephases)
713 722 pheads, droots = ana
714 723 ### Apply remote phase on local
715 724 if remotephases.get('publishing', False):
716 725 _localphasemove(pushop, cheads)
717 726 else: # publish = False
718 727 _localphasemove(pushop, pheads)
719 728 _localphasemove(pushop, cheads, phases.draft)
720 729 ### Apply local phase on remote
721 730
722 731 if pushop.cgresult:
723 732 if 'phases' in pushop.stepsdone:
724 733 # phases already pushed though bundle2
725 734 return
726 735 outdated = pushop.outdatedphases
727 736 else:
728 737 outdated = pushop.fallbackoutdatedphases
729 738
730 739 pushop.stepsdone.add('phases')
731 740
732 741 # filter heads already turned public by the push
733 742 outdated = [c for c in outdated if c.node() not in pheads]
734 743 # fallback to independent pushkey command
735 744 for newremotehead in outdated:
736 745 r = pushop.remote.pushkey('phases',
737 746 newremotehead.hex(),
738 747 str(phases.draft),
739 748 str(phases.public))
740 749 if not r:
741 750 pushop.ui.warn(_('updating %s to public failed!\n')
742 751 % newremotehead)
743 752
744 753 def _localphasemove(pushop, nodes, phase=phases.public):
745 754 """move <nodes> to <phase> in the local source repo"""
746 755 if pushop.trmanager:
747 756 phases.advanceboundary(pushop.repo,
748 757 pushop.trmanager.transaction(),
749 758 phase,
750 759 nodes)
751 760 else:
752 761 # repo is not locked, do not change any phases!
753 762 # Informs the user that phases should have been moved when
754 763 # applicable.
755 764 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
756 765 phasestr = phases.phasenames[phase]
757 766 if actualmoves:
758 767 pushop.ui.status(_('cannot lock source repo, skipping '
759 768 'local %s phase update\n') % phasestr)
760 769
761 770 def _pushobsolete(pushop):
762 771 """utility function to push obsolete markers to a remote"""
763 772 if 'obsmarkers' in pushop.stepsdone:
764 773 return
765 774 pushop.ui.debug('try to push obsolete markers to remote\n')
766 775 repo = pushop.repo
767 776 remote = pushop.remote
768 777 pushop.stepsdone.add('obsmarkers')
769 778 if pushop.outobsmarkers:
770 779 rslts = []
771 780 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
772 781 for key in sorted(remotedata, reverse=True):
773 782 # reverse sort to ensure we end with dump0
774 783 data = remotedata[key]
775 784 rslts.append(remote.pushkey('obsolete', key, '', data))
776 785 if [r for r in rslts if not r]:
777 786 msg = _('failed to push some obsolete markers!\n')
778 787 repo.ui.warn(msg)
779 788
780 789 def _pushbookmark(pushop):
781 790 """Update bookmark position on remote"""
782 791 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
783 792 return
784 793 pushop.stepsdone.add('bookmarks')
785 794 ui = pushop.ui
786 795 remote = pushop.remote
787 796
788 797 for b, old, new in pushop.outbookmarks:
789 798 action = 'update'
790 799 if not old:
791 800 action = 'export'
792 801 elif not new:
793 802 action = 'delete'
794 803 if remote.pushkey('bookmarks', b, old, new):
795 804 ui.status(bookmsgmap[action][0] % b)
796 805 else:
797 806 ui.warn(bookmsgmap[action][1] % b)
798 807 # discovery can have set the value form invalid entry
799 808 if pushop.bkresult is not None:
800 809 pushop.bkresult = 1
801 810
802 811 class pulloperation(object):
803 812 """A object that represent a single pull operation
804 813
805 814 It purpose is to carry pull related state and very common operation.
806 815
807 816 A new should be created at the beginning of each pull and discarded
808 817 afterward.
809 818 """
810 819
811 820 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
812 821 remotebookmarks=None):
813 822 # repo we pull into
814 823 self.repo = repo
815 824 # repo we pull from
816 825 self.remote = remote
817 826 # revision we try to pull (None is "all")
818 827 self.heads = heads
819 828 # bookmark pulled explicitly
820 829 self.explicitbookmarks = bookmarks
821 830 # do we force pull?
822 831 self.force = force
823 832 # transaction manager
824 833 self.trmanager = None
825 834 # set of common changeset between local and remote before pull
826 835 self.common = None
827 836 # set of pulled head
828 837 self.rheads = None
829 838 # list of missing changeset to fetch remotely
830 839 self.fetch = None
831 840 # remote bookmarks data
832 841 self.remotebookmarks = remotebookmarks
833 842 # result of changegroup pulling (used as return code by pull)
834 843 self.cgresult = None
835 844 # list of step already done
836 845 self.stepsdone = set()
837 846
838 847 @util.propertycache
839 848 def pulledsubset(self):
840 849 """heads of the set of changeset target by the pull"""
841 850 # compute target subset
842 851 if self.heads is None:
843 852 # We pulled every thing possible
844 853 # sync on everything common
845 854 c = set(self.common)
846 855 ret = list(self.common)
847 856 for n in self.rheads:
848 857 if n not in c:
849 858 ret.append(n)
850 859 return ret
851 860 else:
852 861 # We pulled a specific subset
853 862 # sync on this subset
854 863 return self.heads
855 864
856 865 def gettransaction(self):
857 866 # deprecated; talk to trmanager directly
858 867 return self.trmanager.transaction()
859 868
860 869 class transactionmanager(object):
861 870 """An object to manage the life cycle of a transaction
862 871
863 872 It creates the transaction on demand and calls the appropriate hooks when
864 873 closing the transaction."""
865 874 def __init__(self, repo, source, url):
866 875 self.repo = repo
867 876 self.source = source
868 877 self.url = url
869 878 self._tr = None
870 879
871 880 def transaction(self):
872 881 """Return an open transaction object, constructing if necessary"""
873 882 if not self._tr:
874 883 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
875 884 self._tr = self.repo.transaction(trname)
876 885 self._tr.hookargs['source'] = self.source
877 886 self._tr.hookargs['url'] = self.url
878 887 return self._tr
879 888
880 889 def close(self):
881 890 """close transaction if created"""
882 891 if self._tr is not None:
883 892 self._tr.close()
884 893
885 894 def release(self):
886 895 """release transaction if created"""
887 896 if self._tr is not None:
888 897 self._tr.release()
889 898
890 899 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None):
891 900 if opargs is None:
892 901 opargs = {}
893 902 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
894 903 **opargs)
895 904 if pullop.remote.local():
896 905 missing = set(pullop.remote.requirements) - pullop.repo.supported
897 906 if missing:
898 907 msg = _("required features are not"
899 908 " supported in the destination:"
900 909 " %s") % (', '.join(sorted(missing)))
901 910 raise util.Abort(msg)
902 911
903 912 lock = pullop.repo.lock()
904 913 try:
905 914 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
906 915 _pulldiscovery(pullop)
907 916 if _canusebundle2(pullop):
908 917 _pullbundle2(pullop)
909 918 _pullchangeset(pullop)
910 919 _pullphase(pullop)
911 920 _pullbookmarks(pullop)
912 921 _pullobsolete(pullop)
913 922 pullop.trmanager.close()
914 923 finally:
915 924 pullop.trmanager.release()
916 925 lock.release()
917 926
918 927 return pullop
919 928
920 929 # list of steps to perform discovery before pull
921 930 pulldiscoveryorder = []
922 931
923 932 # Mapping between step name and function
924 933 #
925 934 # This exists to help extensions wrap steps if necessary
926 935 pulldiscoverymapping = {}
927 936
928 937 def pulldiscovery(stepname):
929 938 """decorator for function performing discovery before pull
930 939
931 940 The function is added to the step -> function mapping and appended to the
932 941 list of steps. Beware that decorated function will be added in order (this
933 942 may matter).
934 943
935 944 You can only use this decorator for a new step, if you want to wrap a step
936 945 from an extension, change the pulldiscovery dictionary directly."""
937 946 def dec(func):
938 947 assert stepname not in pulldiscoverymapping
939 948 pulldiscoverymapping[stepname] = func
940 949 pulldiscoveryorder.append(stepname)
941 950 return func
942 951 return dec
943 952
944 953 def _pulldiscovery(pullop):
945 954 """Run all discovery steps"""
946 955 for stepname in pulldiscoveryorder:
947 956 step = pulldiscoverymapping[stepname]
948 957 step(pullop)
949 958
950 959 @pulldiscovery('b1:bookmarks')
951 960 def _pullbookmarkbundle1(pullop):
952 961 """fetch bookmark data in bundle1 case
953 962
954 963 If not using bundle2, we have to fetch bookmarks before changeset
955 964 discovery to reduce the chance and impact of race conditions."""
956 965 if pullop.remotebookmarks is not None:
957 966 return
958 967 if (_canusebundle2(pullop)
959 968 and 'listkeys' in bundle2.bundle2caps(pullop.remote)):
960 969 # all known bundle2 servers now support listkeys, but lets be nice with
961 970 # new implementation.
962 971 return
963 972 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
964 973
965 974
966 975 @pulldiscovery('changegroup')
967 976 def _pulldiscoverychangegroup(pullop):
968 977 """discovery phase for the pull
969 978
970 979 Current handle changeset discovery only, will change handle all discovery
971 980 at some point."""
972 981 tmp = discovery.findcommonincoming(pullop.repo,
973 982 pullop.remote,
974 983 heads=pullop.heads,
975 984 force=pullop.force)
976 985 common, fetch, rheads = tmp
977 986 nm = pullop.repo.unfiltered().changelog.nodemap
978 987 if fetch and rheads:
979 988 # If a remote heads in filtered locally, lets drop it from the unknown
980 989 # remote heads and put in back in common.
981 990 #
982 991 # This is a hackish solution to catch most of "common but locally
983 992 # hidden situation". We do not performs discovery on unfiltered
984 993 # repository because it end up doing a pathological amount of round
985 994 # trip for w huge amount of changeset we do not care about.
986 995 #
987 996 # If a set of such "common but filtered" changeset exist on the server
988 997 # but are not including a remote heads, we'll not be able to detect it,
989 998 scommon = set(common)
990 999 filteredrheads = []
991 1000 for n in rheads:
992 1001 if n in nm:
993 1002 if n not in scommon:
994 1003 common.append(n)
995 1004 else:
996 1005 filteredrheads.append(n)
997 1006 if not filteredrheads:
998 1007 fetch = []
999 1008 rheads = filteredrheads
1000 1009 pullop.common = common
1001 1010 pullop.fetch = fetch
1002 1011 pullop.rheads = rheads
1003 1012
1004 1013 def _pullbundle2(pullop):
1005 1014 """pull data using bundle2
1006 1015
1007 1016 For now, the only supported data are changegroup."""
1008 1017 remotecaps = bundle2.bundle2caps(pullop.remote)
1009 1018 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1010 1019 # pulling changegroup
1011 1020 pullop.stepsdone.add('changegroup')
1012 1021
1013 1022 kwargs['common'] = pullop.common
1014 1023 kwargs['heads'] = pullop.heads or pullop.rheads
1015 1024 kwargs['cg'] = pullop.fetch
1016 1025 if 'listkeys' in remotecaps:
1017 1026 kwargs['listkeys'] = ['phase']
1018 1027 if pullop.remotebookmarks is None:
1019 1028 # make sure to always includes bookmark data when migrating
1020 1029 # `hg incoming --bundle` to using this function.
1021 1030 kwargs['listkeys'].append('bookmarks')
1022 1031 if not pullop.fetch:
1023 1032 pullop.repo.ui.status(_("no changes found\n"))
1024 1033 pullop.cgresult = 0
1025 1034 else:
1026 1035 if pullop.heads is None and list(pullop.common) == [nullid]:
1027 1036 pullop.repo.ui.status(_("requesting all changes\n"))
1028 1037 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1029 1038 remoteversions = bundle2.obsmarkersversion(remotecaps)
1030 1039 if obsolete.commonversion(remoteversions) is not None:
1031 1040 kwargs['obsmarkers'] = True
1032 1041 pullop.stepsdone.add('obsmarkers')
1033 1042 _pullbundle2extraprepare(pullop, kwargs)
1034 1043 bundle = pullop.remote.getbundle('pull', **kwargs)
1035 1044 try:
1036 1045 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1037 1046 except error.BundleValueError, exc:
1038 1047 raise util.Abort('missing support for %s' % exc)
1039 1048
1040 1049 if pullop.fetch:
1041 1050 results = [cg['return'] for cg in op.records['changegroup']]
1042 1051 pullop.cgresult = changegroup.combineresults(results)
1043 1052
1044 1053 # processing phases change
1045 1054 for namespace, value in op.records['listkeys']:
1046 1055 if namespace == 'phases':
1047 1056 _pullapplyphases(pullop, value)
1048 1057
1049 1058 # processing bookmark update
1050 1059 for namespace, value in op.records['listkeys']:
1051 1060 if namespace == 'bookmarks':
1052 1061 pullop.remotebookmarks = value
1053 1062
1054 1063 # bookmark data were either already there or pulled in the bundle
1055 1064 if pullop.remotebookmarks is not None:
1056 1065 _pullbookmarks(pullop)
1057 1066
1058 1067 def _pullbundle2extraprepare(pullop, kwargs):
1059 1068 """hook function so that extensions can extend the getbundle call"""
1060 1069 pass
1061 1070
1062 1071 def _pullchangeset(pullop):
1063 1072 """pull changeset from unbundle into the local repo"""
1064 1073 # We delay the open of the transaction as late as possible so we
1065 1074 # don't open transaction for nothing or you break future useful
1066 1075 # rollback call
1067 1076 if 'changegroup' in pullop.stepsdone:
1068 1077 return
1069 1078 pullop.stepsdone.add('changegroup')
1070 1079 if not pullop.fetch:
1071 1080 pullop.repo.ui.status(_("no changes found\n"))
1072 1081 pullop.cgresult = 0
1073 1082 return
1074 1083 pullop.gettransaction()
1075 1084 if pullop.heads is None and list(pullop.common) == [nullid]:
1076 1085 pullop.repo.ui.status(_("requesting all changes\n"))
1077 1086 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1078 1087 # issue1320, avoid a race if remote changed after discovery
1079 1088 pullop.heads = pullop.rheads
1080 1089
1081 1090 if pullop.remote.capable('getbundle'):
1082 1091 # TODO: get bundlecaps from remote
1083 1092 cg = pullop.remote.getbundle('pull', common=pullop.common,
1084 1093 heads=pullop.heads or pullop.rheads)
1085 1094 elif pullop.heads is None:
1086 1095 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1087 1096 elif not pullop.remote.capable('changegroupsubset'):
1088 1097 raise util.Abort(_("partial pull cannot be done because "
1089 1098 "other repository doesn't support "
1090 1099 "changegroupsubset."))
1091 1100 else:
1092 1101 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1093 1102 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1094 1103 pullop.remote.url())
1095 1104
1096 1105 def _pullphase(pullop):
1097 1106 # Get remote phases data from remote
1098 1107 if 'phases' in pullop.stepsdone:
1099 1108 return
1100 1109 remotephases = pullop.remote.listkeys('phases')
1101 1110 _pullapplyphases(pullop, remotephases)
1102 1111
1103 1112 def _pullapplyphases(pullop, remotephases):
1104 1113 """apply phase movement from observed remote state"""
1105 1114 if 'phases' in pullop.stepsdone:
1106 1115 return
1107 1116 pullop.stepsdone.add('phases')
1108 1117 publishing = bool(remotephases.get('publishing', False))
1109 1118 if remotephases and not publishing:
1110 1119 # remote is new and unpublishing
1111 1120 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1112 1121 pullop.pulledsubset,
1113 1122 remotephases)
1114 1123 dheads = pullop.pulledsubset
1115 1124 else:
1116 1125 # Remote is old or publishing all common changesets
1117 1126 # should be seen as public
1118 1127 pheads = pullop.pulledsubset
1119 1128 dheads = []
1120 1129 unfi = pullop.repo.unfiltered()
1121 1130 phase = unfi._phasecache.phase
1122 1131 rev = unfi.changelog.nodemap.get
1123 1132 public = phases.public
1124 1133 draft = phases.draft
1125 1134
1126 1135 # exclude changesets already public locally and update the others
1127 1136 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1128 1137 if pheads:
1129 1138 tr = pullop.gettransaction()
1130 1139 phases.advanceboundary(pullop.repo, tr, public, pheads)
1131 1140
1132 1141 # exclude changesets already draft locally and update the others
1133 1142 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1134 1143 if dheads:
1135 1144 tr = pullop.gettransaction()
1136 1145 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1137 1146
1138 1147 def _pullbookmarks(pullop):
1139 1148 """process the remote bookmark information to update the local one"""
1140 1149 if 'bookmarks' in pullop.stepsdone:
1141 1150 return
1142 1151 pullop.stepsdone.add('bookmarks')
1143 1152 repo = pullop.repo
1144 1153 remotebookmarks = pullop.remotebookmarks
1145 1154 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1146 1155 pullop.remote.url(),
1147 1156 pullop.gettransaction,
1148 1157 explicit=pullop.explicitbookmarks)
1149 1158
1150 1159 def _pullobsolete(pullop):
1151 1160 """utility function to pull obsolete markers from a remote
1152 1161
1153 1162 The `gettransaction` is function that return the pull transaction, creating
1154 1163 one if necessary. We return the transaction to inform the calling code that
1155 1164 a new transaction have been created (when applicable).
1156 1165
1157 1166 Exists mostly to allow overriding for experimentation purpose"""
1158 1167 if 'obsmarkers' in pullop.stepsdone:
1159 1168 return
1160 1169 pullop.stepsdone.add('obsmarkers')
1161 1170 tr = None
1162 1171 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1163 1172 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1164 1173 remoteobs = pullop.remote.listkeys('obsolete')
1165 1174 if 'dump0' in remoteobs:
1166 1175 tr = pullop.gettransaction()
1167 1176 for key in sorted(remoteobs, reverse=True):
1168 1177 if key.startswith('dump'):
1169 1178 data = base85.b85decode(remoteobs[key])
1170 1179 pullop.repo.obsstore.mergemarkers(tr, data)
1171 1180 pullop.repo.invalidatevolatilesets()
1172 1181 return tr
1173 1182
1174 1183 def caps20to10(repo):
1175 1184 """return a set with appropriate options to use bundle20 during getbundle"""
1176 1185 caps = set(['HG20'])
1177 1186 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1178 1187 caps.add('bundle2=' + urllib.quote(capsblob))
1179 1188 return caps
1180 1189
1181 1190 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1182 1191 getbundle2partsorder = []
1183 1192
1184 1193 # Mapping between step name and function
1185 1194 #
1186 1195 # This exists to help extensions wrap steps if necessary
1187 1196 getbundle2partsmapping = {}
1188 1197
1189 1198 def getbundle2partsgenerator(stepname, idx=None):
1190 1199 """decorator for function generating bundle2 part for getbundle
1191 1200
1192 1201 The function is added to the step -> function mapping and appended to the
1193 1202 list of steps. Beware that decorated functions will be added in order
1194 1203 (this may matter).
1195 1204
1196 1205 You can only use this decorator for new steps, if you want to wrap a step
1197 1206 from an extension, attack the getbundle2partsmapping dictionary directly."""
1198 1207 def dec(func):
1199 1208 assert stepname not in getbundle2partsmapping
1200 1209 getbundle2partsmapping[stepname] = func
1201 1210 if idx is None:
1202 1211 getbundle2partsorder.append(stepname)
1203 1212 else:
1204 1213 getbundle2partsorder.insert(idx, stepname)
1205 1214 return func
1206 1215 return dec
1207 1216
1208 1217 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1209 1218 **kwargs):
1210 1219 """return a full bundle (with potentially multiple kind of parts)
1211 1220
1212 1221 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1213 1222 passed. For now, the bundle can contain only changegroup, but this will
1214 1223 changes when more part type will be available for bundle2.
1215 1224
1216 1225 This is different from changegroup.getchangegroup that only returns an HG10
1217 1226 changegroup bundle. They may eventually get reunited in the future when we
1218 1227 have a clearer idea of the API we what to query different data.
1219 1228
1220 1229 The implementation is at a very early stage and will get massive rework
1221 1230 when the API of bundle is refined.
1222 1231 """
1223 1232 # bundle10 case
1224 1233 usebundle2 = False
1225 1234 if bundlecaps is not None:
1226 1235 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1227 1236 if not usebundle2:
1228 1237 if bundlecaps and not kwargs.get('cg', True):
1229 1238 raise ValueError(_('request for bundle10 must include changegroup'))
1230 1239
1231 1240 if kwargs:
1232 1241 raise ValueError(_('unsupported getbundle arguments: %s')
1233 1242 % ', '.join(sorted(kwargs.keys())))
1234 1243 return changegroup.getchangegroup(repo, source, heads=heads,
1235 1244 common=common, bundlecaps=bundlecaps)
1236 1245
1237 1246 # bundle20 case
1238 1247 b2caps = {}
1239 1248 for bcaps in bundlecaps:
1240 1249 if bcaps.startswith('bundle2='):
1241 1250 blob = urllib.unquote(bcaps[len('bundle2='):])
1242 1251 b2caps.update(bundle2.decodecaps(blob))
1243 1252 bundler = bundle2.bundle20(repo.ui, b2caps)
1244 1253
1245 1254 kwargs['heads'] = heads
1246 1255 kwargs['common'] = common
1247 1256
1248 1257 for name in getbundle2partsorder:
1249 1258 func = getbundle2partsmapping[name]
1250 1259 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1251 1260 **kwargs)
1252 1261
1253 1262 return util.chunkbuffer(bundler.getchunks())
1254 1263
1255 1264 @getbundle2partsgenerator('changegroup')
1256 1265 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1257 1266 b2caps=None, heads=None, common=None, **kwargs):
1258 1267 """add a changegroup part to the requested bundle"""
1259 1268 cg = None
1260 1269 if kwargs.get('cg', True):
1261 1270 # build changegroup bundle here.
1262 1271 version = None
1263 1272 cgversions = b2caps.get('changegroup')
1264 1273 if not cgversions: # 3.1 and 3.2 ship with an empty value
1265 1274 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1266 1275 common=common,
1267 1276 bundlecaps=bundlecaps)
1268 1277 else:
1269 1278 cgversions = [v for v in cgversions if v in changegroup.packermap]
1270 1279 if not cgversions:
1271 1280 raise ValueError(_('no common changegroup version'))
1272 1281 version = max(cgversions)
1273 1282 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1274 1283 common=common,
1275 1284 bundlecaps=bundlecaps,
1276 1285 version=version)
1277 1286
1278 1287 if cg:
1279 1288 part = bundler.newpart('changegroup', data=cg)
1280 1289 if version is not None:
1281 1290 part.addparam('version', version)
1282 1291
1283 1292 @getbundle2partsgenerator('listkeys')
1284 1293 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1285 1294 b2caps=None, **kwargs):
1286 1295 """add parts containing listkeys namespaces to the requested bundle"""
1287 1296 listkeys = kwargs.get('listkeys', ())
1288 1297 for namespace in listkeys:
1289 1298 part = bundler.newpart('listkeys')
1290 1299 part.addparam('namespace', namespace)
1291 1300 keys = repo.listkeys(namespace).items()
1292 1301 part.data = pushkey.encodekeys(keys)
1293 1302
1294 1303 @getbundle2partsgenerator('obsmarkers')
1295 1304 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1296 1305 b2caps=None, heads=None, **kwargs):
1297 1306 """add an obsolescence markers part to the requested bundle"""
1298 1307 if kwargs.get('obsmarkers', False):
1299 1308 if heads is None:
1300 1309 heads = repo.heads()
1301 1310 subset = [c.node() for c in repo.set('::%ln', heads)]
1302 1311 markers = repo.obsstore.relevantmarkers(subset)
1303 1312 markers = sorted(markers)
1304 1313 buildobsmarkerspart(bundler, markers)
1305 1314
1306 1315 @getbundle2partsgenerator('hgtagsfnodes')
1307 1316 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1308 1317 b2caps=None, heads=None, common=None,
1309 1318 **kwargs):
1310 1319 """Transfer the .hgtags filenodes mapping.
1311 1320
1312 1321 Only values for heads in this bundle will be transferred.
1313 1322
1314 1323 The part data consists of pairs of 20 byte changeset node and .hgtags
1315 1324 filenodes raw values.
1316 1325 """
1317 1326 # Don't send unless:
1318 1327 # - changeset are being exchanged,
1319 1328 # - the client supports it.
1320 1329 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1321 1330 return
1322 1331
1323 1332 outgoing = changegroup.computeoutgoing(repo, heads, common)
1324 1333
1325 1334 if not outgoing.missingheads:
1326 1335 return
1327 1336
1328 1337 cache = tags.hgtagsfnodescache(repo.unfiltered())
1329 1338 chunks = []
1330 1339
1331 1340 # .hgtags fnodes are only relevant for head changesets. While we could
1332 1341 # transfer values for all known nodes, there will likely be little to
1333 1342 # no benefit.
1334 1343 #
1335 1344 # We don't bother using a generator to produce output data because
1336 1345 # a) we only have 40 bytes per head and even esoteric numbers of heads
1337 1346 # consume little memory (1M heads is 40MB) b) we don't want to send the
1338 1347 # part if we don't have entries and knowing if we have entries requires
1339 1348 # cache lookups.
1340 1349 for node in outgoing.missingheads:
1341 1350 # Don't compute missing, as this may slow down serving.
1342 1351 fnode = cache.getfnode(node, computemissing=False)
1343 1352 if fnode is not None:
1344 1353 chunks.extend([node, fnode])
1345 1354
1346 1355 if chunks:
1347 1356 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1348 1357
1349 1358 def check_heads(repo, their_heads, context):
1350 1359 """check if the heads of a repo have been modified
1351 1360
1352 1361 Used by peer for unbundling.
1353 1362 """
1354 1363 heads = repo.heads()
1355 1364 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1356 1365 if not (their_heads == ['force'] or their_heads == heads or
1357 1366 their_heads == ['hashed', heads_hash]):
1358 1367 # someone else committed/pushed/unbundled while we
1359 1368 # were transferring data
1360 1369 raise error.PushRaced('repository changed while %s - '
1361 1370 'please try again' % context)
1362 1371
1363 1372 def unbundle(repo, cg, heads, source, url):
1364 1373 """Apply a bundle to a repo.
1365 1374
1366 1375 this function makes sure the repo is locked during the application and have
1367 1376 mechanism to check that no push race occurred between the creation of the
1368 1377 bundle and its application.
1369 1378
1370 1379 If the push was raced as PushRaced exception is raised."""
1371 1380 r = 0
1372 1381 # need a transaction when processing a bundle2 stream
1373 1382 wlock = lock = tr = None
1374 1383 recordout = None
1375 1384 # quick fix for output mismatch with bundle2 in 3.4
1376 1385 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1377 1386 False)
1378 1387 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1379 1388 captureoutput = True
1380 1389 try:
1381 1390 check_heads(repo, heads, 'uploading changes')
1382 1391 # push can proceed
1383 1392 if util.safehasattr(cg, 'params'):
1384 1393 r = None
1385 1394 try:
1386 1395 wlock = repo.wlock()
1387 1396 lock = repo.lock()
1388 1397 tr = repo.transaction(source)
1389 1398 tr.hookargs['source'] = source
1390 1399 tr.hookargs['url'] = url
1391 1400 tr.hookargs['bundle2'] = '1'
1392 1401 op = bundle2.bundleoperation(repo, lambda: tr,
1393 1402 captureoutput=captureoutput)
1394 1403 try:
1395 1404 r = bundle2.processbundle(repo, cg, op=op)
1396 1405 finally:
1397 1406 r = op.reply
1398 1407 if captureoutput and r is not None:
1399 1408 repo.ui.pushbuffer(error=True, subproc=True)
1400 1409 def recordout(output):
1401 1410 r.newpart('output', data=output, mandatory=False)
1402 1411 tr.close()
1403 1412 except BaseException, exc:
1404 1413 exc.duringunbundle2 = True
1405 1414 if captureoutput and r is not None:
1406 1415 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1407 1416 def recordout(output):
1408 1417 part = bundle2.bundlepart('output', data=output,
1409 1418 mandatory=False)
1410 1419 parts.append(part)
1411 1420 raise
1412 1421 else:
1413 1422 lock = repo.lock()
1414 1423 r = changegroup.addchangegroup(repo, cg, source, url)
1415 1424 finally:
1416 1425 lockmod.release(tr, lock, wlock)
1417 1426 if recordout is not None:
1418 1427 recordout(repo.ui.popbuffer())
1419 1428 return r
1420 1429
1421 1430 # This is it's own function so extensions can override it.
1422 1431 def _walkstreamfiles(repo):
1423 1432 return repo.store.walk()
1424 1433
1425 1434 def generatestreamclone(repo):
1426 1435 """Emit content for a streaming clone.
1427 1436
1428 1437 This is a generator of raw chunks that constitute a streaming clone.
1429 1438
1430 1439 The stream begins with a line of 2 space-delimited integers containing the
1431 1440 number of entries and total bytes size.
1432 1441
1433 1442 Next, are N entries for each file being transferred. Each file entry starts
1434 1443 as a line with the file name and integer size delimited by a null byte.
1435 1444 The raw file data follows. Following the raw file data is the next file
1436 1445 entry, or EOF.
1437 1446
1438 1447 When used on the wire protocol, an additional line indicating protocol
1439 1448 success will be prepended to the stream. This function is not responsible
1440 1449 for adding it.
1441 1450
1442 1451 This function will obtain a repository lock to ensure a consistent view of
1443 1452 the store is captured. It therefore may raise LockError.
1444 1453 """
1445 1454 entries = []
1446 1455 total_bytes = 0
1447 1456 # Get consistent snapshot of repo, lock during scan.
1448 1457 lock = repo.lock()
1449 1458 try:
1450 1459 repo.ui.debug('scanning\n')
1451 1460 for name, ename, size in _walkstreamfiles(repo):
1452 1461 if size:
1453 1462 entries.append((name, size))
1454 1463 total_bytes += size
1455 1464 finally:
1456 1465 lock.release()
1457 1466
1458 1467 repo.ui.debug('%d files, %d bytes to transfer\n' %
1459 1468 (len(entries), total_bytes))
1460 1469 yield '%d %d\n' % (len(entries), total_bytes)
1461 1470
1462 1471 sopener = repo.svfs
1463 1472 oldaudit = sopener.mustaudit
1464 1473 debugflag = repo.ui.debugflag
1465 1474 sopener.mustaudit = False
1466 1475
1467 1476 try:
1468 1477 for name, size in entries:
1469 1478 if debugflag:
1470 1479 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1471 1480 # partially encode name over the wire for backwards compat
1472 1481 yield '%s\0%d\n' % (store.encodedir(name), size)
1473 1482 if size <= 65536:
1474 1483 fp = sopener(name)
1475 1484 try:
1476 1485 data = fp.read(size)
1477 1486 finally:
1478 1487 fp.close()
1479 1488 yield data
1480 1489 else:
1481 1490 for chunk in util.filechunkiter(sopener(name), limit=size):
1482 1491 yield chunk
1483 1492 finally:
1484 1493 sopener.mustaudit = oldaudit
1485 1494
1486 1495 def consumestreamclone(repo, fp):
1487 1496 """Apply the contents from a streaming clone file.
1488 1497
1489 1498 This takes the output from "streamout" and applies it to the specified
1490 1499 repository.
1491 1500
1492 1501 Like "streamout," the status line added by the wire protocol is not handled
1493 1502 by this function.
1494 1503 """
1495 1504 lock = repo.lock()
1496 1505 try:
1497 1506 repo.ui.status(_('streaming all changes\n'))
1498 1507 l = fp.readline()
1499 1508 try:
1500 1509 total_files, total_bytes = map(int, l.split(' ', 1))
1501 1510 except (ValueError, TypeError):
1502 1511 raise error.ResponseError(
1503 1512 _('unexpected response from remote server:'), l)
1504 1513 repo.ui.status(_('%d files to transfer, %s of data\n') %
1505 1514 (total_files, util.bytecount(total_bytes)))
1506 1515 handled_bytes = 0
1507 1516 repo.ui.progress(_('clone'), 0, total=total_bytes)
1508 1517 start = time.time()
1509 1518
1510 1519 tr = repo.transaction(_('clone'))
1511 1520 try:
1512 1521 for i in xrange(total_files):
1513 1522 # XXX doesn't support '\n' or '\r' in filenames
1514 1523 l = fp.readline()
1515 1524 try:
1516 1525 name, size = l.split('\0', 1)
1517 1526 size = int(size)
1518 1527 except (ValueError, TypeError):
1519 1528 raise error.ResponseError(
1520 1529 _('unexpected response from remote server:'), l)
1521 1530 if repo.ui.debugflag:
1522 1531 repo.ui.debug('adding %s (%s)\n' %
1523 1532 (name, util.bytecount(size)))
1524 1533 # for backwards compat, name was partially encoded
1525 1534 ofp = repo.svfs(store.decodedir(name), 'w')
1526 1535 for chunk in util.filechunkiter(fp, limit=size):
1527 1536 handled_bytes += len(chunk)
1528 1537 repo.ui.progress(_('clone'), handled_bytes,
1529 1538 total=total_bytes)
1530 1539 ofp.write(chunk)
1531 1540 ofp.close()
1532 1541 tr.close()
1533 1542 finally:
1534 1543 tr.release()
1535 1544
1536 1545 # Writing straight to files circumvented the inmemory caches
1537 1546 repo.invalidate()
1538 1547
1539 1548 elapsed = time.time() - start
1540 1549 if elapsed <= 0:
1541 1550 elapsed = 0.001
1542 1551 repo.ui.progress(_('clone'), None)
1543 1552 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1544 1553 (util.bytecount(total_bytes), elapsed,
1545 1554 util.bytecount(total_bytes / elapsed)))
1546 1555 finally:
1547 1556 lock.release()
@@ -1,861 +1,869
1 1 Test exchange of common information using bundle2
2 2
3 3
4 4 $ getmainid() {
5 5 > hg -R main log --template '{node}\n' --rev "$1"
6 6 > }
7 7
8 8 enable obsolescence
9 9
10 10 $ cat > $TESTTMP/bundle2-pushkey-hook.sh << EOF
11 11 > echo pushkey: lock state after \"\$HG_NAMESPACE\"
12 12 > hg debuglock
13 13 > EOF
14 14
15 15 $ cat >> $HGRCPATH << EOF
16 16 > [experimental]
17 17 > evolution=createmarkers,exchange
18 18 > bundle2-exp=True
19 19 > bundle2-output-capture=True
20 20 > [ui]
21 21 > ssh=dummyssh
22 22 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
23 23 > [web]
24 24 > push_ssl = false
25 25 > allow_push = *
26 26 > [phases]
27 27 > publish=False
28 28 > [hooks]
29 29 > pretxnclose.tip = hg log -r tip -T "pre-close-tip:{node|short} {phase} {bookmarks}\n"
30 30 > txnclose.tip = hg log -r tip -T "postclose-tip:{node|short} {phase} {bookmarks}\n"
31 31 > txnclose.env = sh -c "HG_LOCAL= printenv.py txnclose"
32 32 > pushkey= sh "$TESTTMP/bundle2-pushkey-hook.sh"
33 33 > EOF
34 34
35 35 The extension requires a repo (currently unused)
36 36
37 37 $ hg init main
38 38 $ cd main
39 39 $ touch a
40 40 $ hg add a
41 41 $ hg commit -m 'a'
42 42 pre-close-tip:3903775176ed draft
43 43 postclose-tip:3903775176ed draft
44 44 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
45 45
46 46 $ hg unbundle $TESTDIR/bundles/rebase.hg
47 47 adding changesets
48 48 adding manifests
49 49 adding file changes
50 50 added 8 changesets with 7 changes to 7 files (+3 heads)
51 51 pre-close-tip:02de42196ebe draft
52 52 postclose-tip:02de42196ebe draft
53 53 txnclose hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:* HG_TXNNAME=unbundle (glob)
54 54 bundle:*/tests/bundles/rebase.hg HG_URL=bundle:*/tests/bundles/rebase.hg (glob)
55 55 (run 'hg heads' to see heads, 'hg merge' to merge)
56 56
57 57 $ cd ..
58 58
59 59 Real world exchange
60 60 =====================
61 61
62 62 Add more obsolescence information
63 63
64 64 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
65 65 pre-close-tip:02de42196ebe draft
66 66 postclose-tip:02de42196ebe draft
67 67 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
68 68 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
69 69 pre-close-tip:02de42196ebe draft
70 70 postclose-tip:02de42196ebe draft
71 71 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
72 72
73 73 clone --pull
74 74
75 75 $ hg -R main phase --public cd010b8cd998
76 76 pre-close-tip:02de42196ebe draft
77 77 postclose-tip:02de42196ebe draft
78 78 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
79 79 $ hg clone main other --pull --rev 9520eea781bc
80 80 adding changesets
81 81 adding manifests
82 82 adding file changes
83 83 added 2 changesets with 2 changes to 2 files
84 84 1 new obsolescence markers
85 85 pre-close-tip:9520eea781bc draft
86 86 postclose-tip:9520eea781bc draft
87 87 txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
88 88 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
89 89 updating to branch default
90 90 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
91 91 $ hg -R other log -G
92 92 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
93 93 |
94 94 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
95 95
96 96 $ hg -R other debugobsolete
97 97 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
98 98
99 99 pull
100 100
101 101 $ hg -R main phase --public 9520eea781bc
102 102 pre-close-tip:02de42196ebe draft
103 103 postclose-tip:02de42196ebe draft
104 104 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
105 105 $ hg -R other pull -r 24b6387c8c8c
106 106 pulling from $TESTTMP/main (glob)
107 107 searching for changes
108 108 adding changesets
109 109 adding manifests
110 110 adding file changes
111 111 added 1 changesets with 1 changes to 1 files (+1 heads)
112 112 1 new obsolescence markers
113 113 pre-close-tip:24b6387c8c8c draft
114 114 postclose-tip:24b6387c8c8c draft
115 115 txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
116 116 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
117 117 (run 'hg heads' to see heads, 'hg merge' to merge)
118 118 $ hg -R other log -G
119 119 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
120 120 |
121 121 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
122 122 |/
123 123 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
124 124
125 125 $ hg -R other debugobsolete
126 126 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
127 127 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
128 128
129 129 pull empty (with phase movement)
130 130
131 131 $ hg -R main phase --public 24b6387c8c8c
132 132 pre-close-tip:02de42196ebe draft
133 133 postclose-tip:02de42196ebe draft
134 134 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
135 135 $ hg -R other pull -r 24b6387c8c8c
136 136 pulling from $TESTTMP/main (glob)
137 137 no changes found
138 138 pre-close-tip:24b6387c8c8c public
139 139 postclose-tip:24b6387c8c8c public
140 140 txnclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
141 141 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
142 142 $ hg -R other log -G
143 143 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
144 144 |
145 145 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
146 146 |/
147 147 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
148 148
149 149 $ hg -R other debugobsolete
150 150 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
151 151 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
152 152
153 153 pull empty
154 154
155 155 $ hg -R other pull -r 24b6387c8c8c
156 156 pulling from $TESTTMP/main (glob)
157 157 no changes found
158 158 pre-close-tip:24b6387c8c8c public
159 159 postclose-tip:24b6387c8c8c public
160 160 txnclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
161 161 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
162 162 $ hg -R other log -G
163 163 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
164 164 |
165 165 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
166 166 |/
167 167 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
168 168
169 169 $ hg -R other debugobsolete
170 170 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
171 171 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
172 172
173 173 add extra data to test their exchange during push
174 174
175 175 $ hg -R main bookmark --rev eea13746799a book_eea1
176 176 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
177 177 pre-close-tip:02de42196ebe draft
178 178 postclose-tip:02de42196ebe draft
179 179 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
180 180 $ hg -R main bookmark --rev 02de42196ebe book_02de
181 181 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
182 182 pre-close-tip:02de42196ebe draft book_02de
183 183 postclose-tip:02de42196ebe draft book_02de
184 184 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
185 185 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
186 186 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
187 187 pre-close-tip:02de42196ebe draft book_02de
188 188 postclose-tip:02de42196ebe draft book_02de
189 189 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
190 190 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
191 191 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
192 192 pre-close-tip:02de42196ebe draft book_02de
193 193 postclose-tip:02de42196ebe draft book_02de
194 194 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
195 195 $ hg -R main bookmark --rev 32af7686d403 book_32af
196 196 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
197 197 pre-close-tip:02de42196ebe draft book_02de
198 198 postclose-tip:02de42196ebe draft book_02de
199 199 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
200 200
201 201 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
202 202 $ hg -R other bookmark --rev cd010b8cd998 book_02de
203 203 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
204 204 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
205 205 $ hg -R other bookmark --rev cd010b8cd998 book_32af
206 206
207 207 $ hg -R main phase --public eea13746799a
208 208 pre-close-tip:02de42196ebe draft book_02de
209 209 postclose-tip:02de42196ebe draft book_02de
210 210 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
211 211
212 212 push
213 213 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
214 214 pushing to other
215 215 searching for changes
216 216 remote: adding changesets
217 217 remote: adding manifests
218 218 remote: adding file changes
219 219 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
220 220 remote: 1 new obsolescence markers
221 221 remote: pre-close-tip:eea13746799a public book_eea1
222 222 remote: pushkey: lock state after "phases"
223 223 remote: lock: free
224 224 remote: wlock: free
225 225 remote: pushkey: lock state after "bookmarks"
226 226 remote: lock: free
227 227 remote: wlock: free
228 228 remote: postclose-tip:eea13746799a public book_eea1
229 229 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_TXNID=TXN:* HG_TXNNAME=push HG_URL=push (glob)
230 230 updating bookmark book_eea1
231 231 pre-close-tip:02de42196ebe draft book_02de
232 232 postclose-tip:02de42196ebe draft book_02de
233 233 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
234 234 file:/*/$TESTTMP/other HG_URL=file:$TESTTMP/other (glob)
235 235 $ hg -R other log -G
236 236 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
237 237 |\
238 238 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
239 239 | |
240 240 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
241 241 |/
242 242 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
243 243
244 244 $ hg -R other debugobsolete
245 245 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
246 246 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
247 247 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
248 248
249 249 pull over ssh
250 250
251 251 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
252 252 pulling from ssh://user@dummy/main
253 253 searching for changes
254 254 adding changesets
255 255 adding manifests
256 256 adding file changes
257 257 added 1 changesets with 1 changes to 1 files (+1 heads)
258 258 1 new obsolescence markers
259 259 updating bookmark book_02de
260 260 pre-close-tip:02de42196ebe draft book_02de
261 261 postclose-tip:02de42196ebe draft book_02de
262 262 txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
263 263 ssh://user@dummy/main HG_URL=ssh://user@dummy/main
264 264 (run 'hg heads' to see heads, 'hg merge' to merge)
265 265 $ hg -R other debugobsolete
266 266 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
267 267 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
268 268 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
269 269 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
270 270
271 271 pull over http
272 272
273 273 $ hg -R main serve -p $HGPORT -d --pid-file=main.pid -E main-error.log
274 274 $ cat main.pid >> $DAEMON_PIDS
275 275
276 276 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
277 277 pulling from http://localhost:$HGPORT/
278 278 searching for changes
279 279 adding changesets
280 280 adding manifests
281 281 adding file changes
282 282 added 1 changesets with 1 changes to 1 files (+1 heads)
283 283 1 new obsolescence markers
284 284 updating bookmark book_42cc
285 285 pre-close-tip:42ccdea3bb16 draft book_42cc
286 286 postclose-tip:42ccdea3bb16 draft book_42cc
287 287 txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
288 288 http://localhost:$HGPORT/ HG_URL=http://localhost:$HGPORT/
289 289 (run 'hg heads .' to see heads, 'hg merge' to merge)
290 290 $ cat main-error.log
291 291 $ hg -R other debugobsolete
292 292 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
293 293 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
294 294 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
295 295 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
296 296 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
297 297
298 298 push over ssh
299 299
300 300 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
301 301 pushing to ssh://user@dummy/other
302 302 searching for changes
303 303 remote: adding changesets
304 304 remote: adding manifests
305 305 remote: adding file changes
306 306 remote: added 1 changesets with 1 changes to 1 files
307 307 remote: 1 new obsolescence markers
308 308 remote: pre-close-tip:5fddd98957c8 draft book_5fdd
309 309 remote: pushkey: lock state after "bookmarks"
310 310 remote: lock: free
311 311 remote: wlock: free
312 312 remote: postclose-tip:5fddd98957c8 draft book_5fdd
313 313 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:ssh:127.0.0.1 (glob)
314 314 updating bookmark book_5fdd
315 315 pre-close-tip:02de42196ebe draft book_02de
316 316 postclose-tip:02de42196ebe draft book_02de
317 317 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
318 318 ssh://user@dummy/other HG_URL=ssh://user@dummy/other
319 319 $ hg -R other log -G
320 320 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
321 321 |
322 322 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
323 323 |
324 324 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
325 325 | |
326 326 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
327 327 | |/|
328 328 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
329 329 |/ /
330 330 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
331 331 |/
332 332 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
333 333
334 334 $ hg -R other debugobsolete
335 335 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
336 336 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
337 337 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
338 338 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
339 339 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
340 340 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
341 341
342 342 push over http
343 343
344 344 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
345 345 $ cat other.pid >> $DAEMON_PIDS
346 346
347 347 $ hg -R main phase --public 32af7686d403
348 348 pre-close-tip:02de42196ebe draft book_02de
349 349 postclose-tip:02de42196ebe draft book_02de
350 350 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
351 351 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
352 352 pushing to http://localhost:$HGPORT2/
353 353 searching for changes
354 354 remote: adding changesets
355 355 remote: adding manifests
356 356 remote: adding file changes
357 357 remote: added 1 changesets with 1 changes to 1 files
358 358 remote: 1 new obsolescence markers
359 359 remote: pre-close-tip:32af7686d403 public book_32af
360 360 remote: pushkey: lock state after "phases"
361 361 remote: lock: free
362 362 remote: wlock: free
363 363 remote: pushkey: lock state after "bookmarks"
364 364 remote: lock: free
365 365 remote: wlock: free
366 366 remote: postclose-tip:32af7686d403 public book_32af
367 367 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:http:127.0.0.1: (glob)
368 368 updating bookmark book_32af
369 369 pre-close-tip:02de42196ebe draft book_02de
370 370 postclose-tip:02de42196ebe draft book_02de
371 371 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
372 372 http://localhost:$HGPORT2/ HG_URL=http://localhost:$HGPORT2/
373 373 $ cat other-error.log
374 374
375 375 Check final content.
376 376
377 377 $ hg -R other log -G
378 378 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
379 379 |
380 380 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
381 381 |
382 382 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
383 383 |
384 384 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
385 385 | |
386 386 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
387 387 | |/|
388 388 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
389 389 |/ /
390 390 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
391 391 |/
392 392 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
393 393
394 394 $ hg -R other debugobsolete
395 395 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
396 396 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
397 397 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
398 398 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
399 399 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
400 400 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
401 401 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
402 402
403 403 (check that no 'pending' files remain)
404 404
405 405 $ ls -1 other/.hg/bookmarks*
406 406 other/.hg/bookmarks
407 407 $ ls -1 other/.hg/store/phaseroots*
408 408 other/.hg/store/phaseroots
409 409 $ ls -1 other/.hg/store/00changelog.i*
410 410 other/.hg/store/00changelog.i
411 411
412 412 Error Handling
413 413 ==============
414 414
415 415 Check that errors are properly returned to the client during push.
416 416
417 417 Setting up
418 418
419 419 $ cat > failpush.py << EOF
420 420 > """A small extension that makes push fails when using bundle2
421 421 >
422 422 > used to test error handling in bundle2
423 423 > """
424 424 >
425 425 > from mercurial import util
426 426 > from mercurial import bundle2
427 427 > from mercurial import exchange
428 428 > from mercurial import extensions
429 429 >
430 430 > def _pushbundle2failpart(pushop, bundler):
431 431 > reason = pushop.ui.config('failpush', 'reason', None)
432 432 > part = None
433 433 > if reason == 'abort':
434 434 > bundler.newpart('test:abort')
435 435 > if reason == 'unknown':
436 436 > bundler.newpart('test:unknown')
437 437 > if reason == 'race':
438 438 > # 20 Bytes of crap
439 439 > bundler.newpart('check:heads', data='01234567890123456789')
440 440 >
441 441 > @bundle2.parthandler("test:abort")
442 442 > def handleabort(op, part):
443 443 > raise util.Abort('Abandon ship!', hint="don't panic")
444 444 >
445 445 > def uisetup(ui):
446 446 > exchange.b2partsgenmapping['failpart'] = _pushbundle2failpart
447 447 > exchange.b2partsgenorder.insert(0, 'failpart')
448 448 >
449 449 > EOF
450 450
451 451 $ cd main
452 452 $ hg up tip
453 453 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
454 454 $ echo 'I' > I
455 455 $ hg add I
456 456 $ hg ci -m 'I'
457 457 pre-close-tip:e7ec4e813ba6 draft
458 458 postclose-tip:e7ec4e813ba6 draft
459 459 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
460 460 $ hg id
461 461 e7ec4e813ba6 tip
462 462 $ cd ..
463 463
464 464 $ cat << EOF >> $HGRCPATH
465 465 > [extensions]
466 466 > failpush=$TESTTMP/failpush.py
467 467 > EOF
468 468
469 469 $ killdaemons.py
470 470 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
471 471 $ cat other.pid >> $DAEMON_PIDS
472 472
473 473 Doing the actual push: Abort error
474 474
475 475 $ cat << EOF >> $HGRCPATH
476 476 > [failpush]
477 477 > reason = abort
478 478 > EOF
479 479
480 480 $ hg -R main push other -r e7ec4e813ba6
481 481 pushing to other
482 482 searching for changes
483 483 abort: Abandon ship!
484 484 (don't panic)
485 485 [255]
486 486
487 487 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
488 488 pushing to ssh://user@dummy/other
489 489 searching for changes
490 490 abort: Abandon ship!
491 491 (don't panic)
492 492 [255]
493 493
494 494 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
495 495 pushing to http://localhost:$HGPORT2/
496 496 searching for changes
497 497 abort: Abandon ship!
498 498 (don't panic)
499 499 [255]
500 500
501 501
502 502 Doing the actual push: unknown mandatory parts
503 503
504 504 $ cat << EOF >> $HGRCPATH
505 505 > [failpush]
506 506 > reason = unknown
507 507 > EOF
508 508
509 509 $ hg -R main push other -r e7ec4e813ba6
510 510 pushing to other
511 511 searching for changes
512 512 abort: missing support for test:unknown
513 513 [255]
514 514
515 515 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
516 516 pushing to ssh://user@dummy/other
517 517 searching for changes
518 518 abort: missing support for test:unknown
519 519 [255]
520 520
521 521 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
522 522 pushing to http://localhost:$HGPORT2/
523 523 searching for changes
524 524 abort: missing support for test:unknown
525 525 [255]
526 526
527 527 Doing the actual push: race
528 528
529 529 $ cat << EOF >> $HGRCPATH
530 530 > [failpush]
531 531 > reason = race
532 532 > EOF
533 533
534 534 $ hg -R main push other -r e7ec4e813ba6
535 535 pushing to other
536 536 searching for changes
537 537 abort: push failed:
538 538 'repository changed while pushing - please try again'
539 539 [255]
540 540
541 541 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
542 542 pushing to ssh://user@dummy/other
543 543 searching for changes
544 544 abort: push failed:
545 545 'repository changed while pushing - please try again'
546 546 [255]
547 547
548 548 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
549 549 pushing to http://localhost:$HGPORT2/
550 550 searching for changes
551 551 abort: push failed:
552 552 'repository changed while pushing - please try again'
553 553 [255]
554 554
555 555 Doing the actual push: hook abort
556 556
557 557 $ cat << EOF >> $HGRCPATH
558 558 > [failpush]
559 559 > reason =
560 560 > [hooks]
561 561 > pretxnclose.failpush = sh -c "echo 'You shall not pass!'; false"
562 562 > txnabort.failpush = sh -c "echo 'Cleaning up the mess...'"
563 563 > EOF
564 564
565 565 $ killdaemons.py
566 566 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
567 567 $ cat other.pid >> $DAEMON_PIDS
568 568
569 569 $ hg -R main push other -r e7ec4e813ba6
570 570 pushing to other
571 571 searching for changes
572 572 remote: adding changesets
573 573 remote: adding manifests
574 574 remote: adding file changes
575 575 remote: added 1 changesets with 1 changes to 1 files
576 576 remote: pre-close-tip:e7ec4e813ba6 draft
577 577 remote: You shall not pass!
578 578 remote: transaction abort!
579 579 remote: Cleaning up the mess...
580 580 remote: rollback completed
581 581 abort: pretxnclose.failpush hook exited with status 1
582 582 [255]
583 583
584 584 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
585 585 pushing to ssh://user@dummy/other
586 586 searching for changes
587 587 remote: adding changesets
588 588 remote: adding manifests
589 589 remote: adding file changes
590 590 remote: added 1 changesets with 1 changes to 1 files
591 591 remote: pre-close-tip:e7ec4e813ba6 draft
592 592 remote: You shall not pass!
593 593 remote: transaction abort!
594 594 remote: Cleaning up the mess...
595 595 remote: rollback completed
596 596 abort: pretxnclose.failpush hook exited with status 1
597 597 [255]
598 598
599 599 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
600 600 pushing to http://localhost:$HGPORT2/
601 601 searching for changes
602 602 remote: adding changesets
603 603 remote: adding manifests
604 604 remote: adding file changes
605 605 remote: added 1 changesets with 1 changes to 1 files
606 606 remote: pre-close-tip:e7ec4e813ba6 draft
607 607 remote: You shall not pass!
608 608 remote: transaction abort!
609 609 remote: Cleaning up the mess...
610 610 remote: rollback completed
611 611 abort: pretxnclose.failpush hook exited with status 1
612 612 [255]
613 613
614 614 (check that no 'pending' files remain)
615 615
616 616 $ ls -1 other/.hg/bookmarks*
617 617 other/.hg/bookmarks
618 618 $ ls -1 other/.hg/store/phaseroots*
619 619 other/.hg/store/phaseroots
620 620 $ ls -1 other/.hg/store/00changelog.i*
621 621 other/.hg/store/00changelog.i
622 622
623 623 Check error from hook during the unbundling process itself
624 624
625 625 $ cat << EOF >> $HGRCPATH
626 626 > pretxnchangegroup = sh -c "echo 'Fail early!'; false"
627 627 > EOF
628 628 $ killdaemons.py # reload http config
629 629 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
630 630 $ cat other.pid >> $DAEMON_PIDS
631 631
632 632 $ hg -R main push other -r e7ec4e813ba6
633 633 pushing to other
634 634 searching for changes
635 635 remote: adding changesets
636 636 remote: adding manifests
637 637 remote: adding file changes
638 638 remote: added 1 changesets with 1 changes to 1 files
639 639 remote: Fail early!
640 640 remote: transaction abort!
641 641 remote: Cleaning up the mess...
642 642 remote: rollback completed
643 643 abort: pretxnchangegroup hook exited with status 1
644 644 [255]
645 645 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
646 646 pushing to ssh://user@dummy/other
647 647 searching for changes
648 648 remote: adding changesets
649 649 remote: adding manifests
650 650 remote: adding file changes
651 651 remote: added 1 changesets with 1 changes to 1 files
652 652 remote: Fail early!
653 653 remote: transaction abort!
654 654 remote: Cleaning up the mess...
655 655 remote: rollback completed
656 656 abort: pretxnchangegroup hook exited with status 1
657 657 [255]
658 658 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
659 659 pushing to http://localhost:$HGPORT2/
660 660 searching for changes
661 661 remote: adding changesets
662 662 remote: adding manifests
663 663 remote: adding file changes
664 664 remote: added 1 changesets with 1 changes to 1 files
665 665 remote: Fail early!
666 666 remote: transaction abort!
667 667 remote: Cleaning up the mess...
668 668 remote: rollback completed
669 669 abort: pretxnchangegroup hook exited with status 1
670 670 [255]
671 671
672 672 Check output capture control.
673 673
674 674 (should be still forced for http, disabled for local and ssh)
675 675
676 676 $ cat >> $HGRCPATH << EOF
677 677 > [experimental]
678 678 > bundle2-output-capture=False
679 679 > EOF
680 680
681 681 $ hg -R main push other -r e7ec4e813ba6
682 682 pushing to other
683 683 searching for changes
684 684 adding changesets
685 685 adding manifests
686 686 adding file changes
687 687 added 1 changesets with 1 changes to 1 files
688 688 Fail early!
689 689 transaction abort!
690 690 Cleaning up the mess...
691 691 rollback completed
692 692 abort: pretxnchangegroup hook exited with status 1
693 693 [255]
694 694 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
695 695 pushing to ssh://user@dummy/other
696 696 searching for changes
697 697 remote: adding changesets
698 698 remote: adding manifests
699 699 remote: adding file changes
700 700 remote: added 1 changesets with 1 changes to 1 files
701 701 remote: Fail early!
702 702 remote: transaction abort!
703 703 remote: Cleaning up the mess...
704 704 remote: rollback completed
705 705 abort: pretxnchangegroup hook exited with status 1
706 706 [255]
707 707 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
708 708 pushing to http://localhost:$HGPORT2/
709 709 searching for changes
710 710 remote: adding changesets
711 711 remote: adding manifests
712 712 remote: adding file changes
713 713 remote: added 1 changesets with 1 changes to 1 files
714 714 remote: Fail early!
715 715 remote: transaction abort!
716 716 remote: Cleaning up the mess...
717 717 remote: rollback completed
718 718 abort: pretxnchangegroup hook exited with status 1
719 719 [255]
720 720
721 721 Check abort from mandatory pushkey
722 722
723 723 $ cat > mandatorypart.py << EOF
724 724 > from mercurial import exchange
725 725 > from mercurial import pushkey
726 726 > from mercurial import node
727 > from mercurial import error
727 728 > @exchange.b2partsgenerator('failingpuskey')
728 729 > def addfailingpushey(pushop, bundler):
729 730 > enc = pushkey.encode
730 731 > part = bundler.newpart('pushkey')
731 732 > part.addparam('namespace', enc('phases'))
732 733 > part.addparam('key', enc(pushop.repo['cd010b8cd998'].hex()))
733 734 > part.addparam('old', enc(str(0))) # successful update
734 735 > part.addparam('new', enc(str(0)))
736 > def fail(pushop, exc):
737 > raise error.Abort('Correct phase push failed (because hooks)')
738 > pushop.pkfailcb[part.id] = fail
735 739 > EOF
736 740 $ cat >> $HGRCPATH << EOF
737 741 > [hooks]
738 742 > pretxnchangegroup=
739 743 > pretxnclose.failpush=
740 744 > prepushkey.failpush = sh -c "echo 'do not push the key !'; false"
741 745 > [extensions]
742 746 > mandatorypart=$TESTTMP/mandatorypart.py
743 747 > EOF
744 748 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS # reload http config
745 749 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
746 750 $ cat other.pid >> $DAEMON_PIDS
747 751
748 752 (Failure from a hook)
749 753
750 754 $ hg -R main push other -r e7ec4e813ba6
751 755 pushing to other
752 756 searching for changes
753 757 adding changesets
754 758 adding manifests
755 759 adding file changes
756 760 added 1 changesets with 1 changes to 1 files
757 761 do not push the key !
758 762 pushkey-abort: prepushkey.failpush hook exited with status 1
759 763 transaction abort!
760 764 Cleaning up the mess...
761 765 rollback completed
762 abort: failed to update value for "phases/cd010b8cd998f3981a5a8115f94f8da4ab506089"
766 abort: Correct phase push failed (because hooks)
763 767 [255]
764 768 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
765 769 pushing to ssh://user@dummy/other
766 770 searching for changes
767 771 remote: adding changesets
768 772 remote: adding manifests
769 773 remote: adding file changes
770 774 remote: added 1 changesets with 1 changes to 1 files
771 775 remote: do not push the key !
772 776 remote: pushkey-abort: prepushkey.failpush hook exited with status 1
773 777 remote: transaction abort!
774 778 remote: Cleaning up the mess...
775 779 remote: rollback completed
776 780 abort: failed to update value for "phases/cd010b8cd998f3981a5a8115f94f8da4ab506089"
777 781 [255]
778 782 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
779 783 pushing to http://localhost:$HGPORT2/
780 784 searching for changes
781 785 remote: adding changesets
782 786 remote: adding manifests
783 787 remote: adding file changes
784 788 remote: added 1 changesets with 1 changes to 1 files
785 789 remote: do not push the key !
786 790 remote: pushkey-abort: prepushkey.failpush hook exited with status 1
787 791 remote: transaction abort!
788 792 remote: Cleaning up the mess...
789 793 remote: rollback completed
790 794 abort: failed to update value for "phases/cd010b8cd998f3981a5a8115f94f8da4ab506089"
791 795 [255]
792 796
793 797 (Failure from a the pushkey)
794 798
795 799 $ cat > mandatorypart.py << EOF
796 800 > from mercurial import exchange
797 801 > from mercurial import pushkey
798 802 > from mercurial import node
803 > from mercurial import error
799 804 > @exchange.b2partsgenerator('failingpuskey')
800 805 > def addfailingpushey(pushop, bundler):
801 806 > enc = pushkey.encode
802 807 > part = bundler.newpart('pushkey')
803 808 > part.addparam('namespace', enc('phases'))
804 809 > part.addparam('key', enc(pushop.repo['cd010b8cd998'].hex()))
805 810 > part.addparam('old', enc(str(4))) # will fail
806 811 > part.addparam('new', enc(str(3)))
812 > def fail(pushop, exc):
813 > raise error.Abort('Clown phase push failed')
814 > pushop.pkfailcb[part.id] = fail
807 815 > EOF
808 816 $ cat >> $HGRCPATH << EOF
809 817 > [hooks]
810 818 > prepushkey.failpush =
811 819 > EOF
812 820 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS # reload http config
813 821 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
814 822 $ cat other.pid >> $DAEMON_PIDS
815 823
816 824 $ hg -R main push other -r e7ec4e813ba6
817 825 pushing to other
818 826 searching for changes
819 827 adding changesets
820 828 adding manifests
821 829 adding file changes
822 830 added 1 changesets with 1 changes to 1 files
823 831 transaction abort!
824 832 Cleaning up the mess...
825 833 rollback completed
826 834 pushkey: lock state after "phases"
827 835 lock: free
828 836 wlock: free
829 abort: failed to update value for "phases/cd010b8cd998f3981a5a8115f94f8da4ab506089"
837 abort: Clown phase push failed
830 838 [255]
831 839 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
832 840 pushing to ssh://user@dummy/other
833 841 searching for changes
834 842 remote: adding changesets
835 843 remote: adding manifests
836 844 remote: adding file changes
837 845 remote: added 1 changesets with 1 changes to 1 files
838 846 remote: transaction abort!
839 847 remote: Cleaning up the mess...
840 848 remote: rollback completed
841 849 remote: pushkey: lock state after "phases"
842 850 remote: lock: free
843 851 remote: wlock: free
844 852 abort: failed to update value for "phases/cd010b8cd998f3981a5a8115f94f8da4ab506089"
845 853 [255]
846 854 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
847 855 pushing to http://localhost:$HGPORT2/
848 856 searching for changes
849 857 remote: adding changesets
850 858 remote: adding manifests
851 859 remote: adding file changes
852 860 remote: added 1 changesets with 1 changes to 1 files
853 861 remote: transaction abort!
854 862 remote: Cleaning up the mess...
855 863 remote: rollback completed
856 864 remote: pushkey: lock state after "phases"
857 865 remote: lock: free
858 866 remote: wlock: free
859 867 abort: failed to update value for "phases/cd010b8cd998f3981a5a8115f94f8da4ab506089"
860 868 [255]
861 869
General Comments 0
You need to be logged in to leave comments. Login now