##// END OF EJS Templates
bundle2: keep hint close to the primary message when remote abort...
Pierre-Yves David -
r30908:4c8dcb49 stable
parent child Browse files
Show More
@@ -1,2006 +1,2008
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 base85,
20 20 bookmarks as bookmod,
21 21 bundle2,
22 22 changegroup,
23 23 discovery,
24 24 error,
25 25 lock as lockmod,
26 26 obsolete,
27 27 phases,
28 28 pushkey,
29 29 scmutil,
30 30 sslutil,
31 31 streamclone,
32 32 tags,
33 33 url as urlmod,
34 34 util,
35 35 )
36 36
37 37 urlerr = util.urlerr
38 38 urlreq = util.urlreq
39 39
40 40 # Maps bundle version human names to changegroup versions.
41 41 _bundlespeccgversions = {'v1': '01',
42 42 'v2': '02',
43 43 'packed1': 's1',
44 44 'bundle2': '02', #legacy
45 45 }
46 46
47 47 def parsebundlespec(repo, spec, strict=True, externalnames=False):
48 48 """Parse a bundle string specification into parts.
49 49
50 50 Bundle specifications denote a well-defined bundle/exchange format.
51 51 The content of a given specification should not change over time in
52 52 order to ensure that bundles produced by a newer version of Mercurial are
53 53 readable from an older version.
54 54
55 55 The string currently has the form:
56 56
57 57 <compression>-<type>[;<parameter0>[;<parameter1>]]
58 58
59 59 Where <compression> is one of the supported compression formats
60 60 and <type> is (currently) a version string. A ";" can follow the type and
61 61 all text afterwards is interpreted as URI encoded, ";" delimited key=value
62 62 pairs.
63 63
64 64 If ``strict`` is True (the default) <compression> is required. Otherwise,
65 65 it is optional.
66 66
67 67 If ``externalnames`` is False (the default), the human-centric names will
68 68 be converted to their internal representation.
69 69
70 70 Returns a 3-tuple of (compression, version, parameters). Compression will
71 71 be ``None`` if not in strict mode and a compression isn't defined.
72 72
73 73 An ``InvalidBundleSpecification`` is raised when the specification is
74 74 not syntactically well formed.
75 75
76 76 An ``UnsupportedBundleSpecification`` is raised when the compression or
77 77 bundle type/version is not recognized.
78 78
79 79 Note: this function will likely eventually return a more complex data
80 80 structure, including bundle2 part information.
81 81 """
82 82 def parseparams(s):
83 83 if ';' not in s:
84 84 return s, {}
85 85
86 86 params = {}
87 87 version, paramstr = s.split(';', 1)
88 88
89 89 for p in paramstr.split(';'):
90 90 if '=' not in p:
91 91 raise error.InvalidBundleSpecification(
92 92 _('invalid bundle specification: '
93 93 'missing "=" in parameter: %s') % p)
94 94
95 95 key, value = p.split('=', 1)
96 96 key = urlreq.unquote(key)
97 97 value = urlreq.unquote(value)
98 98 params[key] = value
99 99
100 100 return version, params
101 101
102 102
103 103 if strict and '-' not in spec:
104 104 raise error.InvalidBundleSpecification(
105 105 _('invalid bundle specification; '
106 106 'must be prefixed with compression: %s') % spec)
107 107
108 108 if '-' in spec:
109 109 compression, version = spec.split('-', 1)
110 110
111 111 if compression not in util.compengines.supportedbundlenames:
112 112 raise error.UnsupportedBundleSpecification(
113 113 _('%s compression is not supported') % compression)
114 114
115 115 version, params = parseparams(version)
116 116
117 117 if version not in _bundlespeccgversions:
118 118 raise error.UnsupportedBundleSpecification(
119 119 _('%s is not a recognized bundle version') % version)
120 120 else:
121 121 # Value could be just the compression or just the version, in which
122 122 # case some defaults are assumed (but only when not in strict mode).
123 123 assert not strict
124 124
125 125 spec, params = parseparams(spec)
126 126
127 127 if spec in util.compengines.supportedbundlenames:
128 128 compression = spec
129 129 version = 'v1'
130 130 if 'generaldelta' in repo.requirements:
131 131 version = 'v2'
132 132 elif spec in _bundlespeccgversions:
133 133 if spec == 'packed1':
134 134 compression = 'none'
135 135 else:
136 136 compression = 'bzip2'
137 137 version = spec
138 138 else:
139 139 raise error.UnsupportedBundleSpecification(
140 140 _('%s is not a recognized bundle specification') % spec)
141 141
142 142 # The specification for packed1 can optionally declare the data formats
143 143 # required to apply it. If we see this metadata, compare against what the
144 144 # repo supports and error if the bundle isn't compatible.
145 145 if version == 'packed1' and 'requirements' in params:
146 146 requirements = set(params['requirements'].split(','))
147 147 missingreqs = requirements - repo.supportedformats
148 148 if missingreqs:
149 149 raise error.UnsupportedBundleSpecification(
150 150 _('missing support for repository features: %s') %
151 151 ', '.join(sorted(missingreqs)))
152 152
153 153 if not externalnames:
154 154 engine = util.compengines.forbundlename(compression)
155 155 compression = engine.bundletype()[1]
156 156 version = _bundlespeccgversions[version]
157 157 return compression, version, params
158 158
159 159 def readbundle(ui, fh, fname, vfs=None):
160 160 header = changegroup.readexactly(fh, 4)
161 161
162 162 alg = None
163 163 if not fname:
164 164 fname = "stream"
165 165 if not header.startswith('HG') and header.startswith('\0'):
166 166 fh = changegroup.headerlessfixup(fh, header)
167 167 header = "HG10"
168 168 alg = 'UN'
169 169 elif vfs:
170 170 fname = vfs.join(fname)
171 171
172 172 magic, version = header[0:2], header[2:4]
173 173
174 174 if magic != 'HG':
175 175 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
176 176 if version == '10':
177 177 if alg is None:
178 178 alg = changegroup.readexactly(fh, 2)
179 179 return changegroup.cg1unpacker(fh, alg)
180 180 elif version.startswith('2'):
181 181 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
182 182 elif version == 'S1':
183 183 return streamclone.streamcloneapplier(fh)
184 184 else:
185 185 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
186 186
187 187 def getbundlespec(ui, fh):
188 188 """Infer the bundlespec from a bundle file handle.
189 189
190 190 The input file handle is seeked and the original seek position is not
191 191 restored.
192 192 """
193 193 def speccompression(alg):
194 194 try:
195 195 return util.compengines.forbundletype(alg).bundletype()[0]
196 196 except KeyError:
197 197 return None
198 198
199 199 b = readbundle(ui, fh, None)
200 200 if isinstance(b, changegroup.cg1unpacker):
201 201 alg = b._type
202 202 if alg == '_truncatedBZ':
203 203 alg = 'BZ'
204 204 comp = speccompression(alg)
205 205 if not comp:
206 206 raise error.Abort(_('unknown compression algorithm: %s') % alg)
207 207 return '%s-v1' % comp
208 208 elif isinstance(b, bundle2.unbundle20):
209 209 if 'Compression' in b.params:
210 210 comp = speccompression(b.params['Compression'])
211 211 if not comp:
212 212 raise error.Abort(_('unknown compression algorithm: %s') % comp)
213 213 else:
214 214 comp = 'none'
215 215
216 216 version = None
217 217 for part in b.iterparts():
218 218 if part.type == 'changegroup':
219 219 version = part.params['version']
220 220 if version in ('01', '02'):
221 221 version = 'v2'
222 222 else:
223 223 raise error.Abort(_('changegroup version %s does not have '
224 224 'a known bundlespec') % version,
225 225 hint=_('try upgrading your Mercurial '
226 226 'client'))
227 227
228 228 if not version:
229 229 raise error.Abort(_('could not identify changegroup version in '
230 230 'bundle'))
231 231
232 232 return '%s-%s' % (comp, version)
233 233 elif isinstance(b, streamclone.streamcloneapplier):
234 234 requirements = streamclone.readbundle1header(fh)[2]
235 235 params = 'requirements=%s' % ','.join(sorted(requirements))
236 236 return 'none-packed1;%s' % urlreq.quote(params)
237 237 else:
238 238 raise error.Abort(_('unknown bundle type: %s') % b)
239 239
240 240 def buildobsmarkerspart(bundler, markers):
241 241 """add an obsmarker part to the bundler with <markers>
242 242
243 243 No part is created if markers is empty.
244 244 Raises ValueError if the bundler doesn't support any known obsmarker format.
245 245 """
246 246 if markers:
247 247 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
248 248 version = obsolete.commonversion(remoteversions)
249 249 if version is None:
250 250 raise ValueError('bundler does not support common obsmarker format')
251 251 stream = obsolete.encodemarkers(markers, True, version=version)
252 252 return bundler.newpart('obsmarkers', data=stream)
253 253 return None
254 254
255 255 def _computeoutgoing(repo, heads, common):
256 256 """Computes which revs are outgoing given a set of common
257 257 and a set of heads.
258 258
259 259 This is a separate function so extensions can have access to
260 260 the logic.
261 261
262 262 Returns a discovery.outgoing object.
263 263 """
264 264 cl = repo.changelog
265 265 if common:
266 266 hasnode = cl.hasnode
267 267 common = [n for n in common if hasnode(n)]
268 268 else:
269 269 common = [nullid]
270 270 if not heads:
271 271 heads = cl.heads()
272 272 return discovery.outgoing(repo, common, heads)
273 273
274 274 def _forcebundle1(op):
275 275 """return true if a pull/push must use bundle1
276 276
277 277 This function is used to allow testing of the older bundle version"""
278 278 ui = op.repo.ui
279 279 forcebundle1 = False
280 280 # The goal is this config is to allow developer to choose the bundle
281 281 # version used during exchanged. This is especially handy during test.
282 282 # Value is a list of bundle version to be picked from, highest version
283 283 # should be used.
284 284 #
285 285 # developer config: devel.legacy.exchange
286 286 exchange = ui.configlist('devel', 'legacy.exchange')
287 287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 288 return forcebundle1 or not op.remote.capable('bundle2')
289 289
290 290 class pushoperation(object):
291 291 """A object that represent a single push operation
292 292
293 293 Its purpose is to carry push related state and very common operations.
294 294
295 295 A new pushoperation should be created at the beginning of each push and
296 296 discarded afterward.
297 297 """
298 298
299 299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 300 bookmarks=()):
301 301 # repo we push from
302 302 self.repo = repo
303 303 self.ui = repo.ui
304 304 # repo we push to
305 305 self.remote = remote
306 306 # force option provided
307 307 self.force = force
308 308 # revs to be pushed (None is "all")
309 309 self.revs = revs
310 310 # bookmark explicitly pushed
311 311 self.bookmarks = bookmarks
312 312 # allow push of new branch
313 313 self.newbranch = newbranch
314 314 # did a local lock get acquired?
315 315 self.locallocked = None
316 316 # step already performed
317 317 # (used to check what steps have been already performed through bundle2)
318 318 self.stepsdone = set()
319 319 # Integer version of the changegroup push result
320 320 # - None means nothing to push
321 321 # - 0 means HTTP error
322 322 # - 1 means we pushed and remote head count is unchanged *or*
323 323 # we have outgoing changesets but refused to push
324 324 # - other values as described by addchangegroup()
325 325 self.cgresult = None
326 326 # Boolean value for the bookmark push
327 327 self.bkresult = None
328 328 # discover.outgoing object (contains common and outgoing data)
329 329 self.outgoing = None
330 330 # all remote heads before the push
331 331 self.remoteheads = None
332 332 # testable as a boolean indicating if any nodes are missing locally.
333 333 self.incoming = None
334 334 # phases changes that must be pushed along side the changesets
335 335 self.outdatedphases = None
336 336 # phases changes that must be pushed if changeset push fails
337 337 self.fallbackoutdatedphases = None
338 338 # outgoing obsmarkers
339 339 self.outobsmarkers = set()
340 340 # outgoing bookmarks
341 341 self.outbookmarks = []
342 342 # transaction manager
343 343 self.trmanager = None
344 344 # map { pushkey partid -> callback handling failure}
345 345 # used to handle exception from mandatory pushkey part failure
346 346 self.pkfailcb = {}
347 347
348 348 @util.propertycache
349 349 def futureheads(self):
350 350 """future remote heads if the changeset push succeeds"""
351 351 return self.outgoing.missingheads
352 352
353 353 @util.propertycache
354 354 def fallbackheads(self):
355 355 """future remote heads if the changeset push fails"""
356 356 if self.revs is None:
357 357 # not target to push, all common are relevant
358 358 return self.outgoing.commonheads
359 359 unfi = self.repo.unfiltered()
360 360 # I want cheads = heads(::missingheads and ::commonheads)
361 361 # (missingheads is revs with secret changeset filtered out)
362 362 #
363 363 # This can be expressed as:
364 364 # cheads = ( (missingheads and ::commonheads)
365 365 # + (commonheads and ::missingheads))"
366 366 # )
367 367 #
368 368 # while trying to push we already computed the following:
369 369 # common = (::commonheads)
370 370 # missing = ((commonheads::missingheads) - commonheads)
371 371 #
372 372 # We can pick:
373 373 # * missingheads part of common (::commonheads)
374 374 common = self.outgoing.common
375 375 nm = self.repo.changelog.nodemap
376 376 cheads = [node for node in self.revs if nm[node] in common]
377 377 # and
378 378 # * commonheads parents on missing
379 379 revset = unfi.set('%ln and parents(roots(%ln))',
380 380 self.outgoing.commonheads,
381 381 self.outgoing.missing)
382 382 cheads.extend(c.node() for c in revset)
383 383 return cheads
384 384
385 385 @property
386 386 def commonheads(self):
387 387 """set of all common heads after changeset bundle push"""
388 388 if self.cgresult:
389 389 return self.futureheads
390 390 else:
391 391 return self.fallbackheads
392 392
393 393 # mapping of message used when pushing bookmark
394 394 bookmsgmap = {'update': (_("updating bookmark %s\n"),
395 395 _('updating bookmark %s failed!\n')),
396 396 'export': (_("exporting bookmark %s\n"),
397 397 _('exporting bookmark %s failed!\n')),
398 398 'delete': (_("deleting remote bookmark %s\n"),
399 399 _('deleting remote bookmark %s failed!\n')),
400 400 }
401 401
402 402
403 403 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
404 404 opargs=None):
405 405 '''Push outgoing changesets (limited by revs) from a local
406 406 repository to remote. Return an integer:
407 407 - None means nothing to push
408 408 - 0 means HTTP error
409 409 - 1 means we pushed and remote head count is unchanged *or*
410 410 we have outgoing changesets but refused to push
411 411 - other values as described by addchangegroup()
412 412 '''
413 413 if opargs is None:
414 414 opargs = {}
415 415 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
416 416 **opargs)
417 417 if pushop.remote.local():
418 418 missing = (set(pushop.repo.requirements)
419 419 - pushop.remote.local().supported)
420 420 if missing:
421 421 msg = _("required features are not"
422 422 " supported in the destination:"
423 423 " %s") % (', '.join(sorted(missing)))
424 424 raise error.Abort(msg)
425 425
426 426 # there are two ways to push to remote repo:
427 427 #
428 428 # addchangegroup assumes local user can lock remote
429 429 # repo (local filesystem, old ssh servers).
430 430 #
431 431 # unbundle assumes local user cannot lock remote repo (new ssh
432 432 # servers, http servers).
433 433
434 434 if not pushop.remote.canpush():
435 435 raise error.Abort(_("destination does not support push"))
436 436 # get local lock as we might write phase data
437 437 localwlock = locallock = None
438 438 try:
439 439 # bundle2 push may receive a reply bundle touching bookmarks or other
440 440 # things requiring the wlock. Take it now to ensure proper ordering.
441 441 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
442 442 if (not _forcebundle1(pushop)) and maypushback:
443 443 localwlock = pushop.repo.wlock()
444 444 locallock = pushop.repo.lock()
445 445 pushop.locallocked = True
446 446 except IOError as err:
447 447 pushop.locallocked = False
448 448 if err.errno != errno.EACCES:
449 449 raise
450 450 # source repo cannot be locked.
451 451 # We do not abort the push, but just disable the local phase
452 452 # synchronisation.
453 453 msg = 'cannot lock source repository: %s\n' % err
454 454 pushop.ui.debug(msg)
455 455 try:
456 456 if pushop.locallocked:
457 457 pushop.trmanager = transactionmanager(pushop.repo,
458 458 'push-response',
459 459 pushop.remote.url())
460 460 pushop.repo.checkpush(pushop)
461 461 lock = None
462 462 unbundle = pushop.remote.capable('unbundle')
463 463 if not unbundle:
464 464 lock = pushop.remote.lock()
465 465 try:
466 466 _pushdiscovery(pushop)
467 467 if not _forcebundle1(pushop):
468 468 _pushbundle2(pushop)
469 469 _pushchangeset(pushop)
470 470 _pushsyncphase(pushop)
471 471 _pushobsolete(pushop)
472 472 _pushbookmark(pushop)
473 473 finally:
474 474 if lock is not None:
475 475 lock.release()
476 476 if pushop.trmanager:
477 477 pushop.trmanager.close()
478 478 finally:
479 479 if pushop.trmanager:
480 480 pushop.trmanager.release()
481 481 if locallock is not None:
482 482 locallock.release()
483 483 if localwlock is not None:
484 484 localwlock.release()
485 485
486 486 return pushop
487 487
488 488 # list of steps to perform discovery before push
489 489 pushdiscoveryorder = []
490 490
491 491 # Mapping between step name and function
492 492 #
493 493 # This exists to help extensions wrap steps if necessary
494 494 pushdiscoverymapping = {}
495 495
496 496 def pushdiscovery(stepname):
497 497 """decorator for function performing discovery before push
498 498
499 499 The function is added to the step -> function mapping and appended to the
500 500 list of steps. Beware that decorated function will be added in order (this
501 501 may matter).
502 502
503 503 You can only use this decorator for a new step, if you want to wrap a step
504 504 from an extension, change the pushdiscovery dictionary directly."""
505 505 def dec(func):
506 506 assert stepname not in pushdiscoverymapping
507 507 pushdiscoverymapping[stepname] = func
508 508 pushdiscoveryorder.append(stepname)
509 509 return func
510 510 return dec
511 511
512 512 def _pushdiscovery(pushop):
513 513 """Run all discovery steps"""
514 514 for stepname in pushdiscoveryorder:
515 515 step = pushdiscoverymapping[stepname]
516 516 step(pushop)
517 517
518 518 @pushdiscovery('changeset')
519 519 def _pushdiscoverychangeset(pushop):
520 520 """discover the changeset that need to be pushed"""
521 521 fci = discovery.findcommonincoming
522 522 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
523 523 common, inc, remoteheads = commoninc
524 524 fco = discovery.findcommonoutgoing
525 525 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
526 526 commoninc=commoninc, force=pushop.force)
527 527 pushop.outgoing = outgoing
528 528 pushop.remoteheads = remoteheads
529 529 pushop.incoming = inc
530 530
531 531 @pushdiscovery('phase')
532 532 def _pushdiscoveryphase(pushop):
533 533 """discover the phase that needs to be pushed
534 534
535 535 (computed for both success and failure case for changesets push)"""
536 536 outgoing = pushop.outgoing
537 537 unfi = pushop.repo.unfiltered()
538 538 remotephases = pushop.remote.listkeys('phases')
539 539 publishing = remotephases.get('publishing', False)
540 540 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
541 541 and remotephases # server supports phases
542 542 and not pushop.outgoing.missing # no changesets to be pushed
543 543 and publishing):
544 544 # When:
545 545 # - this is a subrepo push
546 546 # - and remote support phase
547 547 # - and no changeset are to be pushed
548 548 # - and remote is publishing
549 549 # We may be in issue 3871 case!
550 550 # We drop the possible phase synchronisation done by
551 551 # courtesy to publish changesets possibly locally draft
552 552 # on the remote.
553 553 remotephases = {'publishing': 'True'}
554 554 ana = phases.analyzeremotephases(pushop.repo,
555 555 pushop.fallbackheads,
556 556 remotephases)
557 557 pheads, droots = ana
558 558 extracond = ''
559 559 if not publishing:
560 560 extracond = ' and public()'
561 561 revset = 'heads((%%ln::%%ln) %s)' % extracond
562 562 # Get the list of all revs draft on remote by public here.
563 563 # XXX Beware that revset break if droots is not strictly
564 564 # XXX root we may want to ensure it is but it is costly
565 565 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
566 566 if not outgoing.missing:
567 567 future = fallback
568 568 else:
569 569 # adds changeset we are going to push as draft
570 570 #
571 571 # should not be necessary for publishing server, but because of an
572 572 # issue fixed in xxxxx we have to do it anyway.
573 573 fdroots = list(unfi.set('roots(%ln + %ln::)',
574 574 outgoing.missing, droots))
575 575 fdroots = [f.node() for f in fdroots]
576 576 future = list(unfi.set(revset, fdroots, pushop.futureheads))
577 577 pushop.outdatedphases = future
578 578 pushop.fallbackoutdatedphases = fallback
579 579
580 580 @pushdiscovery('obsmarker')
581 581 def _pushdiscoveryobsmarkers(pushop):
582 582 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
583 583 and pushop.repo.obsstore
584 584 and 'obsolete' in pushop.remote.listkeys('namespaces')):
585 585 repo = pushop.repo
586 586 # very naive computation, that can be quite expensive on big repo.
587 587 # However: evolution is currently slow on them anyway.
588 588 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
589 589 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
590 590
591 591 @pushdiscovery('bookmarks')
592 592 def _pushdiscoverybookmarks(pushop):
593 593 ui = pushop.ui
594 594 repo = pushop.repo.unfiltered()
595 595 remote = pushop.remote
596 596 ui.debug("checking for updated bookmarks\n")
597 597 ancestors = ()
598 598 if pushop.revs:
599 599 revnums = map(repo.changelog.rev, pushop.revs)
600 600 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
601 601 remotebookmark = remote.listkeys('bookmarks')
602 602
603 603 explicit = set([repo._bookmarks.expandname(bookmark)
604 604 for bookmark in pushop.bookmarks])
605 605
606 606 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
607 607 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
608 608
609 609 def safehex(x):
610 610 if x is None:
611 611 return x
612 612 return hex(x)
613 613
614 614 def hexifycompbookmarks(bookmarks):
615 615 for b, scid, dcid in bookmarks:
616 616 yield b, safehex(scid), safehex(dcid)
617 617
618 618 comp = [hexifycompbookmarks(marks) for marks in comp]
619 619 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
620 620
621 621 for b, scid, dcid in advsrc:
622 622 if b in explicit:
623 623 explicit.remove(b)
624 624 if not ancestors or repo[scid].rev() in ancestors:
625 625 pushop.outbookmarks.append((b, dcid, scid))
626 626 # search added bookmark
627 627 for b, scid, dcid in addsrc:
628 628 if b in explicit:
629 629 explicit.remove(b)
630 630 pushop.outbookmarks.append((b, '', scid))
631 631 # search for overwritten bookmark
632 632 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
633 633 if b in explicit:
634 634 explicit.remove(b)
635 635 pushop.outbookmarks.append((b, dcid, scid))
636 636 # search for bookmark to delete
637 637 for b, scid, dcid in adddst:
638 638 if b in explicit:
639 639 explicit.remove(b)
640 640 # treat as "deleted locally"
641 641 pushop.outbookmarks.append((b, dcid, ''))
642 642 # identical bookmarks shouldn't get reported
643 643 for b, scid, dcid in same:
644 644 if b in explicit:
645 645 explicit.remove(b)
646 646
647 647 if explicit:
648 648 explicit = sorted(explicit)
649 649 # we should probably list all of them
650 650 ui.warn(_('bookmark %s does not exist on the local '
651 651 'or remote repository!\n') % explicit[0])
652 652 pushop.bkresult = 2
653 653
654 654 pushop.outbookmarks.sort()
655 655
656 656 def _pushcheckoutgoing(pushop):
657 657 outgoing = pushop.outgoing
658 658 unfi = pushop.repo.unfiltered()
659 659 if not outgoing.missing:
660 660 # nothing to push
661 661 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
662 662 return False
663 663 # something to push
664 664 if not pushop.force:
665 665 # if repo.obsstore == False --> no obsolete
666 666 # then, save the iteration
667 667 if unfi.obsstore:
668 668 # this message are here for 80 char limit reason
669 669 mso = _("push includes obsolete changeset: %s!")
670 670 mst = {"unstable": _("push includes unstable changeset: %s!"),
671 671 "bumped": _("push includes bumped changeset: %s!"),
672 672 "divergent": _("push includes divergent changeset: %s!")}
673 673 # If we are to push if there is at least one
674 674 # obsolete or unstable changeset in missing, at
675 675 # least one of the missinghead will be obsolete or
676 676 # unstable. So checking heads only is ok
677 677 for node in outgoing.missingheads:
678 678 ctx = unfi[node]
679 679 if ctx.obsolete():
680 680 raise error.Abort(mso % ctx)
681 681 elif ctx.troubled():
682 682 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
683 683
684 684 discovery.checkheads(pushop)
685 685 return True
686 686
687 687 # List of names of steps to perform for an outgoing bundle2, order matters.
688 688 b2partsgenorder = []
689 689
690 690 # Mapping between step name and function
691 691 #
692 692 # This exists to help extensions wrap steps if necessary
693 693 b2partsgenmapping = {}
694 694
695 695 def b2partsgenerator(stepname, idx=None):
696 696 """decorator for function generating bundle2 part
697 697
698 698 The function is added to the step -> function mapping and appended to the
699 699 list of steps. Beware that decorated functions will be added in order
700 700 (this may matter).
701 701
702 702 You can only use this decorator for new steps, if you want to wrap a step
703 703 from an extension, attack the b2partsgenmapping dictionary directly."""
704 704 def dec(func):
705 705 assert stepname not in b2partsgenmapping
706 706 b2partsgenmapping[stepname] = func
707 707 if idx is None:
708 708 b2partsgenorder.append(stepname)
709 709 else:
710 710 b2partsgenorder.insert(idx, stepname)
711 711 return func
712 712 return dec
713 713
714 714 def _pushb2ctxcheckheads(pushop, bundler):
715 715 """Generate race condition checking parts
716 716
717 717 Exists as an independent function to aid extensions
718 718 """
719 719 if not pushop.force:
720 720 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
721 721
722 722 @b2partsgenerator('changeset')
723 723 def _pushb2ctx(pushop, bundler):
724 724 """handle changegroup push through bundle2
725 725
726 726 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
727 727 """
728 728 if 'changesets' in pushop.stepsdone:
729 729 return
730 730 pushop.stepsdone.add('changesets')
731 731 # Send known heads to the server for race detection.
732 732 if not _pushcheckoutgoing(pushop):
733 733 return
734 734 pushop.repo.prepushoutgoinghooks(pushop)
735 735
736 736 _pushb2ctxcheckheads(pushop, bundler)
737 737
738 738 b2caps = bundle2.bundle2caps(pushop.remote)
739 739 version = '01'
740 740 cgversions = b2caps.get('changegroup')
741 741 if cgversions: # 3.1 and 3.2 ship with an empty value
742 742 cgversions = [v for v in cgversions
743 743 if v in changegroup.supportedoutgoingversions(
744 744 pushop.repo)]
745 745 if not cgversions:
746 746 raise ValueError(_('no common changegroup version'))
747 747 version = max(cgversions)
748 748 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
749 749 pushop.outgoing,
750 750 version=version)
751 751 cgpart = bundler.newpart('changegroup', data=cg)
752 752 if cgversions:
753 753 cgpart.addparam('version', version)
754 754 if 'treemanifest' in pushop.repo.requirements:
755 755 cgpart.addparam('treemanifest', '1')
756 756 def handlereply(op):
757 757 """extract addchangegroup returns from server reply"""
758 758 cgreplies = op.records.getreplies(cgpart.id)
759 759 assert len(cgreplies['changegroup']) == 1
760 760 pushop.cgresult = cgreplies['changegroup'][0]['return']
761 761 return handlereply
762 762
763 763 @b2partsgenerator('phase')
764 764 def _pushb2phases(pushop, bundler):
765 765 """handle phase push through bundle2"""
766 766 if 'phases' in pushop.stepsdone:
767 767 return
768 768 b2caps = bundle2.bundle2caps(pushop.remote)
769 769 if not 'pushkey' in b2caps:
770 770 return
771 771 pushop.stepsdone.add('phases')
772 772 part2node = []
773 773
774 774 def handlefailure(pushop, exc):
775 775 targetid = int(exc.partid)
776 776 for partid, node in part2node:
777 777 if partid == targetid:
778 778 raise error.Abort(_('updating %s to public failed') % node)
779 779
780 780 enc = pushkey.encode
781 781 for newremotehead in pushop.outdatedphases:
782 782 part = bundler.newpart('pushkey')
783 783 part.addparam('namespace', enc('phases'))
784 784 part.addparam('key', enc(newremotehead.hex()))
785 785 part.addparam('old', enc(str(phases.draft)))
786 786 part.addparam('new', enc(str(phases.public)))
787 787 part2node.append((part.id, newremotehead))
788 788 pushop.pkfailcb[part.id] = handlefailure
789 789
790 790 def handlereply(op):
791 791 for partid, node in part2node:
792 792 partrep = op.records.getreplies(partid)
793 793 results = partrep['pushkey']
794 794 assert len(results) <= 1
795 795 msg = None
796 796 if not results:
797 797 msg = _('server ignored update of %s to public!\n') % node
798 798 elif not int(results[0]['return']):
799 799 msg = _('updating %s to public failed!\n') % node
800 800 if msg is not None:
801 801 pushop.ui.warn(msg)
802 802 return handlereply
803 803
804 804 @b2partsgenerator('obsmarkers')
805 805 def _pushb2obsmarkers(pushop, bundler):
806 806 if 'obsmarkers' in pushop.stepsdone:
807 807 return
808 808 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
809 809 if obsolete.commonversion(remoteversions) is None:
810 810 return
811 811 pushop.stepsdone.add('obsmarkers')
812 812 if pushop.outobsmarkers:
813 813 markers = sorted(pushop.outobsmarkers)
814 814 buildobsmarkerspart(bundler, markers)
815 815
816 816 @b2partsgenerator('bookmarks')
817 817 def _pushb2bookmarks(pushop, bundler):
818 818 """handle bookmark push through bundle2"""
819 819 if 'bookmarks' in pushop.stepsdone:
820 820 return
821 821 b2caps = bundle2.bundle2caps(pushop.remote)
822 822 if 'pushkey' not in b2caps:
823 823 return
824 824 pushop.stepsdone.add('bookmarks')
825 825 part2book = []
826 826 enc = pushkey.encode
827 827
828 828 def handlefailure(pushop, exc):
829 829 targetid = int(exc.partid)
830 830 for partid, book, action in part2book:
831 831 if partid == targetid:
832 832 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
833 833 # we should not be called for part we did not generated
834 834 assert False
835 835
836 836 for book, old, new in pushop.outbookmarks:
837 837 part = bundler.newpart('pushkey')
838 838 part.addparam('namespace', enc('bookmarks'))
839 839 part.addparam('key', enc(book))
840 840 part.addparam('old', enc(old))
841 841 part.addparam('new', enc(new))
842 842 action = 'update'
843 843 if not old:
844 844 action = 'export'
845 845 elif not new:
846 846 action = 'delete'
847 847 part2book.append((part.id, book, action))
848 848 pushop.pkfailcb[part.id] = handlefailure
849 849
850 850 def handlereply(op):
851 851 ui = pushop.ui
852 852 for partid, book, action in part2book:
853 853 partrep = op.records.getreplies(partid)
854 854 results = partrep['pushkey']
855 855 assert len(results) <= 1
856 856 if not results:
857 857 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
858 858 else:
859 859 ret = int(results[0]['return'])
860 860 if ret:
861 861 ui.status(bookmsgmap[action][0] % book)
862 862 else:
863 863 ui.warn(bookmsgmap[action][1] % book)
864 864 if pushop.bkresult is not None:
865 865 pushop.bkresult = 1
866 866 return handlereply
867 867
868 868
869 869 def _pushbundle2(pushop):
870 870 """push data to the remote using bundle2
871 871
872 872 The only currently supported type of data is changegroup but this will
873 873 evolve in the future."""
874 874 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
875 875 pushback = (pushop.trmanager
876 876 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
877 877
878 878 # create reply capability
879 879 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
880 880 allowpushback=pushback))
881 881 bundler.newpart('replycaps', data=capsblob)
882 882 replyhandlers = []
883 883 for partgenname in b2partsgenorder:
884 884 partgen = b2partsgenmapping[partgenname]
885 885 ret = partgen(pushop, bundler)
886 886 if callable(ret):
887 887 replyhandlers.append(ret)
888 888 # do not push if nothing to push
889 889 if bundler.nbparts <= 1:
890 890 return
891 891 stream = util.chunkbuffer(bundler.getchunks())
892 892 try:
893 893 try:
894 894 reply = pushop.remote.unbundle(
895 895 stream, ['force'], pushop.remote.url())
896 896 except error.BundleValueError as exc:
897 897 raise error.Abort(_('missing support for %s') % exc)
898 898 try:
899 899 trgetter = None
900 900 if pushback:
901 901 trgetter = pushop.trmanager.transaction
902 902 op = bundle2.processbundle(pushop.repo, reply, trgetter)
903 903 except error.BundleValueError as exc:
904 904 raise error.Abort(_('missing support for %s') % exc)
905 905 except bundle2.AbortFromPart as exc:
906 906 pushop.ui.status(_('remote: %s\n') % exc)
907 raise error.Abort(_('push failed on remote'), hint=exc.hint)
907 if exc.hint is not None:
908 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
909 raise error.Abort(_('push failed on remote'))
908 910 except error.PushkeyFailed as exc:
909 911 partid = int(exc.partid)
910 912 if partid not in pushop.pkfailcb:
911 913 raise
912 914 pushop.pkfailcb[partid](pushop, exc)
913 915 for rephand in replyhandlers:
914 916 rephand(op)
915 917
916 918 def _pushchangeset(pushop):
917 919 """Make the actual push of changeset bundle to remote repo"""
918 920 if 'changesets' in pushop.stepsdone:
919 921 return
920 922 pushop.stepsdone.add('changesets')
921 923 if not _pushcheckoutgoing(pushop):
922 924 return
923 925 pushop.repo.prepushoutgoinghooks(pushop)
924 926 outgoing = pushop.outgoing
925 927 unbundle = pushop.remote.capable('unbundle')
926 928 # TODO: get bundlecaps from remote
927 929 bundlecaps = None
928 930 # create a changegroup from local
929 931 if pushop.revs is None and not (outgoing.excluded
930 932 or pushop.repo.changelog.filteredrevs):
931 933 # push everything,
932 934 # use the fast path, no race possible on push
933 935 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
934 936 cg = changegroup.getsubset(pushop.repo,
935 937 outgoing,
936 938 bundler,
937 939 'push',
938 940 fastpath=True)
939 941 else:
940 942 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
941 943 bundlecaps)
942 944
943 945 # apply changegroup to remote
944 946 if unbundle:
945 947 # local repo finds heads on server, finds out what
946 948 # revs it must push. once revs transferred, if server
947 949 # finds it has different heads (someone else won
948 950 # commit/push race), server aborts.
949 951 if pushop.force:
950 952 remoteheads = ['force']
951 953 else:
952 954 remoteheads = pushop.remoteheads
953 955 # ssh: return remote's addchangegroup()
954 956 # http: return remote's addchangegroup() or 0 for error
955 957 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
956 958 pushop.repo.url())
957 959 else:
958 960 # we return an integer indicating remote head count
959 961 # change
960 962 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
961 963 pushop.repo.url())
962 964
963 965 def _pushsyncphase(pushop):
964 966 """synchronise phase information locally and remotely"""
965 967 cheads = pushop.commonheads
966 968 # even when we don't push, exchanging phase data is useful
967 969 remotephases = pushop.remote.listkeys('phases')
968 970 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
969 971 and remotephases # server supports phases
970 972 and pushop.cgresult is None # nothing was pushed
971 973 and remotephases.get('publishing', False)):
972 974 # When:
973 975 # - this is a subrepo push
974 976 # - and remote support phase
975 977 # - and no changeset was pushed
976 978 # - and remote is publishing
977 979 # We may be in issue 3871 case!
978 980 # We drop the possible phase synchronisation done by
979 981 # courtesy to publish changesets possibly locally draft
980 982 # on the remote.
981 983 remotephases = {'publishing': 'True'}
982 984 if not remotephases: # old server or public only reply from non-publishing
983 985 _localphasemove(pushop, cheads)
984 986 # don't push any phase data as there is nothing to push
985 987 else:
986 988 ana = phases.analyzeremotephases(pushop.repo, cheads,
987 989 remotephases)
988 990 pheads, droots = ana
989 991 ### Apply remote phase on local
990 992 if remotephases.get('publishing', False):
991 993 _localphasemove(pushop, cheads)
992 994 else: # publish = False
993 995 _localphasemove(pushop, pheads)
994 996 _localphasemove(pushop, cheads, phases.draft)
995 997 ### Apply local phase on remote
996 998
997 999 if pushop.cgresult:
998 1000 if 'phases' in pushop.stepsdone:
999 1001 # phases already pushed though bundle2
1000 1002 return
1001 1003 outdated = pushop.outdatedphases
1002 1004 else:
1003 1005 outdated = pushop.fallbackoutdatedphases
1004 1006
1005 1007 pushop.stepsdone.add('phases')
1006 1008
1007 1009 # filter heads already turned public by the push
1008 1010 outdated = [c for c in outdated if c.node() not in pheads]
1009 1011 # fallback to independent pushkey command
1010 1012 for newremotehead in outdated:
1011 1013 r = pushop.remote.pushkey('phases',
1012 1014 newremotehead.hex(),
1013 1015 str(phases.draft),
1014 1016 str(phases.public))
1015 1017 if not r:
1016 1018 pushop.ui.warn(_('updating %s to public failed!\n')
1017 1019 % newremotehead)
1018 1020
1019 1021 def _localphasemove(pushop, nodes, phase=phases.public):
1020 1022 """move <nodes> to <phase> in the local source repo"""
1021 1023 if pushop.trmanager:
1022 1024 phases.advanceboundary(pushop.repo,
1023 1025 pushop.trmanager.transaction(),
1024 1026 phase,
1025 1027 nodes)
1026 1028 else:
1027 1029 # repo is not locked, do not change any phases!
1028 1030 # Informs the user that phases should have been moved when
1029 1031 # applicable.
1030 1032 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1031 1033 phasestr = phases.phasenames[phase]
1032 1034 if actualmoves:
1033 1035 pushop.ui.status(_('cannot lock source repo, skipping '
1034 1036 'local %s phase update\n') % phasestr)
1035 1037
1036 1038 def _pushobsolete(pushop):
1037 1039 """utility function to push obsolete markers to a remote"""
1038 1040 if 'obsmarkers' in pushop.stepsdone:
1039 1041 return
1040 1042 repo = pushop.repo
1041 1043 remote = pushop.remote
1042 1044 pushop.stepsdone.add('obsmarkers')
1043 1045 if pushop.outobsmarkers:
1044 1046 pushop.ui.debug('try to push obsolete markers to remote\n')
1045 1047 rslts = []
1046 1048 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1047 1049 for key in sorted(remotedata, reverse=True):
1048 1050 # reverse sort to ensure we end with dump0
1049 1051 data = remotedata[key]
1050 1052 rslts.append(remote.pushkey('obsolete', key, '', data))
1051 1053 if [r for r in rslts if not r]:
1052 1054 msg = _('failed to push some obsolete markers!\n')
1053 1055 repo.ui.warn(msg)
1054 1056
1055 1057 def _pushbookmark(pushop):
1056 1058 """Update bookmark position on remote"""
1057 1059 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1058 1060 return
1059 1061 pushop.stepsdone.add('bookmarks')
1060 1062 ui = pushop.ui
1061 1063 remote = pushop.remote
1062 1064
1063 1065 for b, old, new in pushop.outbookmarks:
1064 1066 action = 'update'
1065 1067 if not old:
1066 1068 action = 'export'
1067 1069 elif not new:
1068 1070 action = 'delete'
1069 1071 if remote.pushkey('bookmarks', b, old, new):
1070 1072 ui.status(bookmsgmap[action][0] % b)
1071 1073 else:
1072 1074 ui.warn(bookmsgmap[action][1] % b)
1073 1075 # discovery can have set the value form invalid entry
1074 1076 if pushop.bkresult is not None:
1075 1077 pushop.bkresult = 1
1076 1078
1077 1079 class pulloperation(object):
1078 1080 """A object that represent a single pull operation
1079 1081
1080 1082 It purpose is to carry pull related state and very common operation.
1081 1083
1082 1084 A new should be created at the beginning of each pull and discarded
1083 1085 afterward.
1084 1086 """
1085 1087
1086 1088 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1087 1089 remotebookmarks=None, streamclonerequested=None):
1088 1090 # repo we pull into
1089 1091 self.repo = repo
1090 1092 # repo we pull from
1091 1093 self.remote = remote
1092 1094 # revision we try to pull (None is "all")
1093 1095 self.heads = heads
1094 1096 # bookmark pulled explicitly
1095 1097 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1096 1098 for bookmark in bookmarks]
1097 1099 # do we force pull?
1098 1100 self.force = force
1099 1101 # whether a streaming clone was requested
1100 1102 self.streamclonerequested = streamclonerequested
1101 1103 # transaction manager
1102 1104 self.trmanager = None
1103 1105 # set of common changeset between local and remote before pull
1104 1106 self.common = None
1105 1107 # set of pulled head
1106 1108 self.rheads = None
1107 1109 # list of missing changeset to fetch remotely
1108 1110 self.fetch = None
1109 1111 # remote bookmarks data
1110 1112 self.remotebookmarks = remotebookmarks
1111 1113 # result of changegroup pulling (used as return code by pull)
1112 1114 self.cgresult = None
1113 1115 # list of step already done
1114 1116 self.stepsdone = set()
1115 1117 # Whether we attempted a clone from pre-generated bundles.
1116 1118 self.clonebundleattempted = False
1117 1119
1118 1120 @util.propertycache
1119 1121 def pulledsubset(self):
1120 1122 """heads of the set of changeset target by the pull"""
1121 1123 # compute target subset
1122 1124 if self.heads is None:
1123 1125 # We pulled every thing possible
1124 1126 # sync on everything common
1125 1127 c = set(self.common)
1126 1128 ret = list(self.common)
1127 1129 for n in self.rheads:
1128 1130 if n not in c:
1129 1131 ret.append(n)
1130 1132 return ret
1131 1133 else:
1132 1134 # We pulled a specific subset
1133 1135 # sync on this subset
1134 1136 return self.heads
1135 1137
1136 1138 @util.propertycache
1137 1139 def canusebundle2(self):
1138 1140 return not _forcebundle1(self)
1139 1141
1140 1142 @util.propertycache
1141 1143 def remotebundle2caps(self):
1142 1144 return bundle2.bundle2caps(self.remote)
1143 1145
1144 1146 def gettransaction(self):
1145 1147 # deprecated; talk to trmanager directly
1146 1148 return self.trmanager.transaction()
1147 1149
1148 1150 class transactionmanager(object):
1149 1151 """An object to manage the life cycle of a transaction
1150 1152
1151 1153 It creates the transaction on demand and calls the appropriate hooks when
1152 1154 closing the transaction."""
1153 1155 def __init__(self, repo, source, url):
1154 1156 self.repo = repo
1155 1157 self.source = source
1156 1158 self.url = url
1157 1159 self._tr = None
1158 1160
1159 1161 def transaction(self):
1160 1162 """Return an open transaction object, constructing if necessary"""
1161 1163 if not self._tr:
1162 1164 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1163 1165 self._tr = self.repo.transaction(trname)
1164 1166 self._tr.hookargs['source'] = self.source
1165 1167 self._tr.hookargs['url'] = self.url
1166 1168 return self._tr
1167 1169
1168 1170 def close(self):
1169 1171 """close transaction if created"""
1170 1172 if self._tr is not None:
1171 1173 self._tr.close()
1172 1174
1173 1175 def release(self):
1174 1176 """release transaction if created"""
1175 1177 if self._tr is not None:
1176 1178 self._tr.release()
1177 1179
1178 1180 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1179 1181 streamclonerequested=None):
1180 1182 """Fetch repository data from a remote.
1181 1183
1182 1184 This is the main function used to retrieve data from a remote repository.
1183 1185
1184 1186 ``repo`` is the local repository to clone into.
1185 1187 ``remote`` is a peer instance.
1186 1188 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1187 1189 default) means to pull everything from the remote.
1188 1190 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1189 1191 default, all remote bookmarks are pulled.
1190 1192 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1191 1193 initialization.
1192 1194 ``streamclonerequested`` is a boolean indicating whether a "streaming
1193 1195 clone" is requested. A "streaming clone" is essentially a raw file copy
1194 1196 of revlogs from the server. This only works when the local repository is
1195 1197 empty. The default value of ``None`` means to respect the server
1196 1198 configuration for preferring stream clones.
1197 1199
1198 1200 Returns the ``pulloperation`` created for this pull.
1199 1201 """
1200 1202 if opargs is None:
1201 1203 opargs = {}
1202 1204 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1203 1205 streamclonerequested=streamclonerequested, **opargs)
1204 1206 if pullop.remote.local():
1205 1207 missing = set(pullop.remote.requirements) - pullop.repo.supported
1206 1208 if missing:
1207 1209 msg = _("required features are not"
1208 1210 " supported in the destination:"
1209 1211 " %s") % (', '.join(sorted(missing)))
1210 1212 raise error.Abort(msg)
1211 1213
1212 1214 wlock = lock = None
1213 1215 try:
1214 1216 wlock = pullop.repo.wlock()
1215 1217 lock = pullop.repo.lock()
1216 1218 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1217 1219 streamclone.maybeperformlegacystreamclone(pullop)
1218 1220 # This should ideally be in _pullbundle2(). However, it needs to run
1219 1221 # before discovery to avoid extra work.
1220 1222 _maybeapplyclonebundle(pullop)
1221 1223 _pulldiscovery(pullop)
1222 1224 if pullop.canusebundle2:
1223 1225 _pullbundle2(pullop)
1224 1226 _pullchangeset(pullop)
1225 1227 _pullphase(pullop)
1226 1228 _pullbookmarks(pullop)
1227 1229 _pullobsolete(pullop)
1228 1230 pullop.trmanager.close()
1229 1231 finally:
1230 1232 lockmod.release(pullop.trmanager, lock, wlock)
1231 1233
1232 1234 return pullop
1233 1235
1234 1236 # list of steps to perform discovery before pull
1235 1237 pulldiscoveryorder = []
1236 1238
1237 1239 # Mapping between step name and function
1238 1240 #
1239 1241 # This exists to help extensions wrap steps if necessary
1240 1242 pulldiscoverymapping = {}
1241 1243
1242 1244 def pulldiscovery(stepname):
1243 1245 """decorator for function performing discovery before pull
1244 1246
1245 1247 The function is added to the step -> function mapping and appended to the
1246 1248 list of steps. Beware that decorated function will be added in order (this
1247 1249 may matter).
1248 1250
1249 1251 You can only use this decorator for a new step, if you want to wrap a step
1250 1252 from an extension, change the pulldiscovery dictionary directly."""
1251 1253 def dec(func):
1252 1254 assert stepname not in pulldiscoverymapping
1253 1255 pulldiscoverymapping[stepname] = func
1254 1256 pulldiscoveryorder.append(stepname)
1255 1257 return func
1256 1258 return dec
1257 1259
1258 1260 def _pulldiscovery(pullop):
1259 1261 """Run all discovery steps"""
1260 1262 for stepname in pulldiscoveryorder:
1261 1263 step = pulldiscoverymapping[stepname]
1262 1264 step(pullop)
1263 1265
1264 1266 @pulldiscovery('b1:bookmarks')
1265 1267 def _pullbookmarkbundle1(pullop):
1266 1268 """fetch bookmark data in bundle1 case
1267 1269
1268 1270 If not using bundle2, we have to fetch bookmarks before changeset
1269 1271 discovery to reduce the chance and impact of race conditions."""
1270 1272 if pullop.remotebookmarks is not None:
1271 1273 return
1272 1274 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1273 1275 # all known bundle2 servers now support listkeys, but lets be nice with
1274 1276 # new implementation.
1275 1277 return
1276 1278 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1277 1279
1278 1280
1279 1281 @pulldiscovery('changegroup')
1280 1282 def _pulldiscoverychangegroup(pullop):
1281 1283 """discovery phase for the pull
1282 1284
1283 1285 Current handle changeset discovery only, will change handle all discovery
1284 1286 at some point."""
1285 1287 tmp = discovery.findcommonincoming(pullop.repo,
1286 1288 pullop.remote,
1287 1289 heads=pullop.heads,
1288 1290 force=pullop.force)
1289 1291 common, fetch, rheads = tmp
1290 1292 nm = pullop.repo.unfiltered().changelog.nodemap
1291 1293 if fetch and rheads:
1292 1294 # If a remote heads in filtered locally, lets drop it from the unknown
1293 1295 # remote heads and put in back in common.
1294 1296 #
1295 1297 # This is a hackish solution to catch most of "common but locally
1296 1298 # hidden situation". We do not performs discovery on unfiltered
1297 1299 # repository because it end up doing a pathological amount of round
1298 1300 # trip for w huge amount of changeset we do not care about.
1299 1301 #
1300 1302 # If a set of such "common but filtered" changeset exist on the server
1301 1303 # but are not including a remote heads, we'll not be able to detect it,
1302 1304 scommon = set(common)
1303 1305 filteredrheads = []
1304 1306 for n in rheads:
1305 1307 if n in nm:
1306 1308 if n not in scommon:
1307 1309 common.append(n)
1308 1310 else:
1309 1311 filteredrheads.append(n)
1310 1312 if not filteredrheads:
1311 1313 fetch = []
1312 1314 rheads = filteredrheads
1313 1315 pullop.common = common
1314 1316 pullop.fetch = fetch
1315 1317 pullop.rheads = rheads
1316 1318
1317 1319 def _pullbundle2(pullop):
1318 1320 """pull data using bundle2
1319 1321
1320 1322 For now, the only supported data are changegroup."""
1321 1323 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1322 1324
1323 1325 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1324 1326
1325 1327 # pulling changegroup
1326 1328 pullop.stepsdone.add('changegroup')
1327 1329
1328 1330 kwargs['common'] = pullop.common
1329 1331 kwargs['heads'] = pullop.heads or pullop.rheads
1330 1332 kwargs['cg'] = pullop.fetch
1331 1333 if 'listkeys' in pullop.remotebundle2caps:
1332 1334 kwargs['listkeys'] = ['phases']
1333 1335 if pullop.remotebookmarks is None:
1334 1336 # make sure to always includes bookmark data when migrating
1335 1337 # `hg incoming --bundle` to using this function.
1336 1338 kwargs['listkeys'].append('bookmarks')
1337 1339
1338 1340 # If this is a full pull / clone and the server supports the clone bundles
1339 1341 # feature, tell the server whether we attempted a clone bundle. The
1340 1342 # presence of this flag indicates the client supports clone bundles. This
1341 1343 # will enable the server to treat clients that support clone bundles
1342 1344 # differently from those that don't.
1343 1345 if (pullop.remote.capable('clonebundles')
1344 1346 and pullop.heads is None and list(pullop.common) == [nullid]):
1345 1347 kwargs['cbattempted'] = pullop.clonebundleattempted
1346 1348
1347 1349 if streaming:
1348 1350 pullop.repo.ui.status(_('streaming all changes\n'))
1349 1351 elif not pullop.fetch:
1350 1352 pullop.repo.ui.status(_("no changes found\n"))
1351 1353 pullop.cgresult = 0
1352 1354 else:
1353 1355 if pullop.heads is None and list(pullop.common) == [nullid]:
1354 1356 pullop.repo.ui.status(_("requesting all changes\n"))
1355 1357 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1356 1358 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1357 1359 if obsolete.commonversion(remoteversions) is not None:
1358 1360 kwargs['obsmarkers'] = True
1359 1361 pullop.stepsdone.add('obsmarkers')
1360 1362 _pullbundle2extraprepare(pullop, kwargs)
1361 1363 bundle = pullop.remote.getbundle('pull', **kwargs)
1362 1364 try:
1363 1365 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1364 1366 except error.BundleValueError as exc:
1365 1367 raise error.Abort(_('missing support for %s') % exc)
1366 1368
1367 1369 if pullop.fetch:
1368 1370 results = [cg['return'] for cg in op.records['changegroup']]
1369 1371 pullop.cgresult = changegroup.combineresults(results)
1370 1372
1371 1373 # processing phases change
1372 1374 for namespace, value in op.records['listkeys']:
1373 1375 if namespace == 'phases':
1374 1376 _pullapplyphases(pullop, value)
1375 1377
1376 1378 # processing bookmark update
1377 1379 for namespace, value in op.records['listkeys']:
1378 1380 if namespace == 'bookmarks':
1379 1381 pullop.remotebookmarks = value
1380 1382
1381 1383 # bookmark data were either already there or pulled in the bundle
1382 1384 if pullop.remotebookmarks is not None:
1383 1385 _pullbookmarks(pullop)
1384 1386
1385 1387 def _pullbundle2extraprepare(pullop, kwargs):
1386 1388 """hook function so that extensions can extend the getbundle call"""
1387 1389 pass
1388 1390
1389 1391 def _pullchangeset(pullop):
1390 1392 """pull changeset from unbundle into the local repo"""
1391 1393 # We delay the open of the transaction as late as possible so we
1392 1394 # don't open transaction for nothing or you break future useful
1393 1395 # rollback call
1394 1396 if 'changegroup' in pullop.stepsdone:
1395 1397 return
1396 1398 pullop.stepsdone.add('changegroup')
1397 1399 if not pullop.fetch:
1398 1400 pullop.repo.ui.status(_("no changes found\n"))
1399 1401 pullop.cgresult = 0
1400 1402 return
1401 1403 pullop.gettransaction()
1402 1404 if pullop.heads is None and list(pullop.common) == [nullid]:
1403 1405 pullop.repo.ui.status(_("requesting all changes\n"))
1404 1406 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1405 1407 # issue1320, avoid a race if remote changed after discovery
1406 1408 pullop.heads = pullop.rheads
1407 1409
1408 1410 if pullop.remote.capable('getbundle'):
1409 1411 # TODO: get bundlecaps from remote
1410 1412 cg = pullop.remote.getbundle('pull', common=pullop.common,
1411 1413 heads=pullop.heads or pullop.rheads)
1412 1414 elif pullop.heads is None:
1413 1415 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1414 1416 elif not pullop.remote.capable('changegroupsubset'):
1415 1417 raise error.Abort(_("partial pull cannot be done because "
1416 1418 "other repository doesn't support "
1417 1419 "changegroupsubset."))
1418 1420 else:
1419 1421 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1420 1422 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1421 1423
1422 1424 def _pullphase(pullop):
1423 1425 # Get remote phases data from remote
1424 1426 if 'phases' in pullop.stepsdone:
1425 1427 return
1426 1428 remotephases = pullop.remote.listkeys('phases')
1427 1429 _pullapplyphases(pullop, remotephases)
1428 1430
1429 1431 def _pullapplyphases(pullop, remotephases):
1430 1432 """apply phase movement from observed remote state"""
1431 1433 if 'phases' in pullop.stepsdone:
1432 1434 return
1433 1435 pullop.stepsdone.add('phases')
1434 1436 publishing = bool(remotephases.get('publishing', False))
1435 1437 if remotephases and not publishing:
1436 1438 # remote is new and non-publishing
1437 1439 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1438 1440 pullop.pulledsubset,
1439 1441 remotephases)
1440 1442 dheads = pullop.pulledsubset
1441 1443 else:
1442 1444 # Remote is old or publishing all common changesets
1443 1445 # should be seen as public
1444 1446 pheads = pullop.pulledsubset
1445 1447 dheads = []
1446 1448 unfi = pullop.repo.unfiltered()
1447 1449 phase = unfi._phasecache.phase
1448 1450 rev = unfi.changelog.nodemap.get
1449 1451 public = phases.public
1450 1452 draft = phases.draft
1451 1453
1452 1454 # exclude changesets already public locally and update the others
1453 1455 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1454 1456 if pheads:
1455 1457 tr = pullop.gettransaction()
1456 1458 phases.advanceboundary(pullop.repo, tr, public, pheads)
1457 1459
1458 1460 # exclude changesets already draft locally and update the others
1459 1461 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1460 1462 if dheads:
1461 1463 tr = pullop.gettransaction()
1462 1464 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1463 1465
1464 1466 def _pullbookmarks(pullop):
1465 1467 """process the remote bookmark information to update the local one"""
1466 1468 if 'bookmarks' in pullop.stepsdone:
1467 1469 return
1468 1470 pullop.stepsdone.add('bookmarks')
1469 1471 repo = pullop.repo
1470 1472 remotebookmarks = pullop.remotebookmarks
1471 1473 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1472 1474 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1473 1475 pullop.remote.url(),
1474 1476 pullop.gettransaction,
1475 1477 explicit=pullop.explicitbookmarks)
1476 1478
1477 1479 def _pullobsolete(pullop):
1478 1480 """utility function to pull obsolete markers from a remote
1479 1481
1480 1482 The `gettransaction` is function that return the pull transaction, creating
1481 1483 one if necessary. We return the transaction to inform the calling code that
1482 1484 a new transaction have been created (when applicable).
1483 1485
1484 1486 Exists mostly to allow overriding for experimentation purpose"""
1485 1487 if 'obsmarkers' in pullop.stepsdone:
1486 1488 return
1487 1489 pullop.stepsdone.add('obsmarkers')
1488 1490 tr = None
1489 1491 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1490 1492 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1491 1493 remoteobs = pullop.remote.listkeys('obsolete')
1492 1494 if 'dump0' in remoteobs:
1493 1495 tr = pullop.gettransaction()
1494 1496 markers = []
1495 1497 for key in sorted(remoteobs, reverse=True):
1496 1498 if key.startswith('dump'):
1497 1499 data = base85.b85decode(remoteobs[key])
1498 1500 version, newmarks = obsolete._readmarkers(data)
1499 1501 markers += newmarks
1500 1502 if markers:
1501 1503 pullop.repo.obsstore.add(tr, markers)
1502 1504 pullop.repo.invalidatevolatilesets()
1503 1505 return tr
1504 1506
1505 1507 def caps20to10(repo):
1506 1508 """return a set with appropriate options to use bundle20 during getbundle"""
1507 1509 caps = set(['HG20'])
1508 1510 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1509 1511 caps.add('bundle2=' + urlreq.quote(capsblob))
1510 1512 return caps
1511 1513
1512 1514 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1513 1515 getbundle2partsorder = []
1514 1516
1515 1517 # Mapping between step name and function
1516 1518 #
1517 1519 # This exists to help extensions wrap steps if necessary
1518 1520 getbundle2partsmapping = {}
1519 1521
1520 1522 def getbundle2partsgenerator(stepname, idx=None):
1521 1523 """decorator for function generating bundle2 part for getbundle
1522 1524
1523 1525 The function is added to the step -> function mapping and appended to the
1524 1526 list of steps. Beware that decorated functions will be added in order
1525 1527 (this may matter).
1526 1528
1527 1529 You can only use this decorator for new steps, if you want to wrap a step
1528 1530 from an extension, attack the getbundle2partsmapping dictionary directly."""
1529 1531 def dec(func):
1530 1532 assert stepname not in getbundle2partsmapping
1531 1533 getbundle2partsmapping[stepname] = func
1532 1534 if idx is None:
1533 1535 getbundle2partsorder.append(stepname)
1534 1536 else:
1535 1537 getbundle2partsorder.insert(idx, stepname)
1536 1538 return func
1537 1539 return dec
1538 1540
1539 1541 def bundle2requested(bundlecaps):
1540 1542 if bundlecaps is not None:
1541 1543 return any(cap.startswith('HG2') for cap in bundlecaps)
1542 1544 return False
1543 1545
1544 1546 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1545 1547 **kwargs):
1546 1548 """Return chunks constituting a bundle's raw data.
1547 1549
1548 1550 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1549 1551 passed.
1550 1552
1551 1553 Returns an iterator over raw chunks (of varying sizes).
1552 1554 """
1553 1555 usebundle2 = bundle2requested(bundlecaps)
1554 1556 # bundle10 case
1555 1557 if not usebundle2:
1556 1558 if bundlecaps and not kwargs.get('cg', True):
1557 1559 raise ValueError(_('request for bundle10 must include changegroup'))
1558 1560
1559 1561 if kwargs:
1560 1562 raise ValueError(_('unsupported getbundle arguments: %s')
1561 1563 % ', '.join(sorted(kwargs.keys())))
1562 1564 outgoing = _computeoutgoing(repo, heads, common)
1563 1565 bundler = changegroup.getbundler('01', repo, bundlecaps)
1564 1566 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1565 1567
1566 1568 # bundle20 case
1567 1569 b2caps = {}
1568 1570 for bcaps in bundlecaps:
1569 1571 if bcaps.startswith('bundle2='):
1570 1572 blob = urlreq.unquote(bcaps[len('bundle2='):])
1571 1573 b2caps.update(bundle2.decodecaps(blob))
1572 1574 bundler = bundle2.bundle20(repo.ui, b2caps)
1573 1575
1574 1576 kwargs['heads'] = heads
1575 1577 kwargs['common'] = common
1576 1578
1577 1579 for name in getbundle2partsorder:
1578 1580 func = getbundle2partsmapping[name]
1579 1581 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1580 1582 **kwargs)
1581 1583
1582 1584 return bundler.getchunks()
1583 1585
1584 1586 @getbundle2partsgenerator('changegroup')
1585 1587 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1586 1588 b2caps=None, heads=None, common=None, **kwargs):
1587 1589 """add a changegroup part to the requested bundle"""
1588 1590 cg = None
1589 1591 if kwargs.get('cg', True):
1590 1592 # build changegroup bundle here.
1591 1593 version = '01'
1592 1594 cgversions = b2caps.get('changegroup')
1593 1595 if cgversions: # 3.1 and 3.2 ship with an empty value
1594 1596 cgversions = [v for v in cgversions
1595 1597 if v in changegroup.supportedoutgoingversions(repo)]
1596 1598 if not cgversions:
1597 1599 raise ValueError(_('no common changegroup version'))
1598 1600 version = max(cgversions)
1599 1601 outgoing = _computeoutgoing(repo, heads, common)
1600 1602 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1601 1603 bundlecaps=bundlecaps,
1602 1604 version=version)
1603 1605
1604 1606 if cg:
1605 1607 part = bundler.newpart('changegroup', data=cg)
1606 1608 if cgversions:
1607 1609 part.addparam('version', version)
1608 1610 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1609 1611 if 'treemanifest' in repo.requirements:
1610 1612 part.addparam('treemanifest', '1')
1611 1613
1612 1614 @getbundle2partsgenerator('listkeys')
1613 1615 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1614 1616 b2caps=None, **kwargs):
1615 1617 """add parts containing listkeys namespaces to the requested bundle"""
1616 1618 listkeys = kwargs.get('listkeys', ())
1617 1619 for namespace in listkeys:
1618 1620 part = bundler.newpart('listkeys')
1619 1621 part.addparam('namespace', namespace)
1620 1622 keys = repo.listkeys(namespace).items()
1621 1623 part.data = pushkey.encodekeys(keys)
1622 1624
1623 1625 @getbundle2partsgenerator('obsmarkers')
1624 1626 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1625 1627 b2caps=None, heads=None, **kwargs):
1626 1628 """add an obsolescence markers part to the requested bundle"""
1627 1629 if kwargs.get('obsmarkers', False):
1628 1630 if heads is None:
1629 1631 heads = repo.heads()
1630 1632 subset = [c.node() for c in repo.set('::%ln', heads)]
1631 1633 markers = repo.obsstore.relevantmarkers(subset)
1632 1634 markers = sorted(markers)
1633 1635 buildobsmarkerspart(bundler, markers)
1634 1636
1635 1637 @getbundle2partsgenerator('hgtagsfnodes')
1636 1638 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1637 1639 b2caps=None, heads=None, common=None,
1638 1640 **kwargs):
1639 1641 """Transfer the .hgtags filenodes mapping.
1640 1642
1641 1643 Only values for heads in this bundle will be transferred.
1642 1644
1643 1645 The part data consists of pairs of 20 byte changeset node and .hgtags
1644 1646 filenodes raw values.
1645 1647 """
1646 1648 # Don't send unless:
1647 1649 # - changeset are being exchanged,
1648 1650 # - the client supports it.
1649 1651 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1650 1652 return
1651 1653
1652 1654 outgoing = _computeoutgoing(repo, heads, common)
1653 1655
1654 1656 if not outgoing.missingheads:
1655 1657 return
1656 1658
1657 1659 cache = tags.hgtagsfnodescache(repo.unfiltered())
1658 1660 chunks = []
1659 1661
1660 1662 # .hgtags fnodes are only relevant for head changesets. While we could
1661 1663 # transfer values for all known nodes, there will likely be little to
1662 1664 # no benefit.
1663 1665 #
1664 1666 # We don't bother using a generator to produce output data because
1665 1667 # a) we only have 40 bytes per head and even esoteric numbers of heads
1666 1668 # consume little memory (1M heads is 40MB) b) we don't want to send the
1667 1669 # part if we don't have entries and knowing if we have entries requires
1668 1670 # cache lookups.
1669 1671 for node in outgoing.missingheads:
1670 1672 # Don't compute missing, as this may slow down serving.
1671 1673 fnode = cache.getfnode(node, computemissing=False)
1672 1674 if fnode is not None:
1673 1675 chunks.extend([node, fnode])
1674 1676
1675 1677 if chunks:
1676 1678 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1677 1679
1678 1680 def _getbookmarks(repo, **kwargs):
1679 1681 """Returns bookmark to node mapping.
1680 1682
1681 1683 This function is primarily used to generate `bookmarks` bundle2 part.
1682 1684 It is a separate function in order to make it easy to wrap it
1683 1685 in extensions. Passing `kwargs` to the function makes it easy to
1684 1686 add new parameters in extensions.
1685 1687 """
1686 1688
1687 1689 return dict(bookmod.listbinbookmarks(repo))
1688 1690
1689 1691 def check_heads(repo, their_heads, context):
1690 1692 """check if the heads of a repo have been modified
1691 1693
1692 1694 Used by peer for unbundling.
1693 1695 """
1694 1696 heads = repo.heads()
1695 1697 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1696 1698 if not (their_heads == ['force'] or their_heads == heads or
1697 1699 their_heads == ['hashed', heads_hash]):
1698 1700 # someone else committed/pushed/unbundled while we
1699 1701 # were transferring data
1700 1702 raise error.PushRaced('repository changed while %s - '
1701 1703 'please try again' % context)
1702 1704
1703 1705 def unbundle(repo, cg, heads, source, url):
1704 1706 """Apply a bundle to a repo.
1705 1707
1706 1708 this function makes sure the repo is locked during the application and have
1707 1709 mechanism to check that no push race occurred between the creation of the
1708 1710 bundle and its application.
1709 1711
1710 1712 If the push was raced as PushRaced exception is raised."""
1711 1713 r = 0
1712 1714 # need a transaction when processing a bundle2 stream
1713 1715 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1714 1716 lockandtr = [None, None, None]
1715 1717 recordout = None
1716 1718 # quick fix for output mismatch with bundle2 in 3.4
1717 1719 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1718 1720 False)
1719 1721 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1720 1722 captureoutput = True
1721 1723 try:
1722 1724 check_heads(repo, heads, 'uploading changes')
1723 1725 # push can proceed
1724 1726 if util.safehasattr(cg, 'params'):
1725 1727 r = None
1726 1728 try:
1727 1729 def gettransaction():
1728 1730 if not lockandtr[2]:
1729 1731 lockandtr[0] = repo.wlock()
1730 1732 lockandtr[1] = repo.lock()
1731 1733 lockandtr[2] = repo.transaction(source)
1732 1734 lockandtr[2].hookargs['source'] = source
1733 1735 lockandtr[2].hookargs['url'] = url
1734 1736 lockandtr[2].hookargs['bundle2'] = '1'
1735 1737 return lockandtr[2]
1736 1738
1737 1739 # Do greedy locking by default until we're satisfied with lazy
1738 1740 # locking.
1739 1741 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1740 1742 gettransaction()
1741 1743
1742 1744 op = bundle2.bundleoperation(repo, gettransaction,
1743 1745 captureoutput=captureoutput)
1744 1746 try:
1745 1747 op = bundle2.processbundle(repo, cg, op=op)
1746 1748 finally:
1747 1749 r = op.reply
1748 1750 if captureoutput and r is not None:
1749 1751 repo.ui.pushbuffer(error=True, subproc=True)
1750 1752 def recordout(output):
1751 1753 r.newpart('output', data=output, mandatory=False)
1752 1754 if lockandtr[2] is not None:
1753 1755 lockandtr[2].close()
1754 1756 except BaseException as exc:
1755 1757 exc.duringunbundle2 = True
1756 1758 if captureoutput and r is not None:
1757 1759 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1758 1760 def recordout(output):
1759 1761 part = bundle2.bundlepart('output', data=output,
1760 1762 mandatory=False)
1761 1763 parts.append(part)
1762 1764 raise
1763 1765 else:
1764 1766 lockandtr[1] = repo.lock()
1765 1767 r = cg.apply(repo, source, url)
1766 1768 finally:
1767 1769 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1768 1770 if recordout is not None:
1769 1771 recordout(repo.ui.popbuffer())
1770 1772 return r
1771 1773
1772 1774 def _maybeapplyclonebundle(pullop):
1773 1775 """Apply a clone bundle from a remote, if possible."""
1774 1776
1775 1777 repo = pullop.repo
1776 1778 remote = pullop.remote
1777 1779
1778 1780 if not repo.ui.configbool('ui', 'clonebundles', True):
1779 1781 return
1780 1782
1781 1783 # Only run if local repo is empty.
1782 1784 if len(repo):
1783 1785 return
1784 1786
1785 1787 if pullop.heads:
1786 1788 return
1787 1789
1788 1790 if not remote.capable('clonebundles'):
1789 1791 return
1790 1792
1791 1793 res = remote._call('clonebundles')
1792 1794
1793 1795 # If we call the wire protocol command, that's good enough to record the
1794 1796 # attempt.
1795 1797 pullop.clonebundleattempted = True
1796 1798
1797 1799 entries = parseclonebundlesmanifest(repo, res)
1798 1800 if not entries:
1799 1801 repo.ui.note(_('no clone bundles available on remote; '
1800 1802 'falling back to regular clone\n'))
1801 1803 return
1802 1804
1803 1805 entries = filterclonebundleentries(repo, entries)
1804 1806 if not entries:
1805 1807 # There is a thundering herd concern here. However, if a server
1806 1808 # operator doesn't advertise bundles appropriate for its clients,
1807 1809 # they deserve what's coming. Furthermore, from a client's
1808 1810 # perspective, no automatic fallback would mean not being able to
1809 1811 # clone!
1810 1812 repo.ui.warn(_('no compatible clone bundles available on server; '
1811 1813 'falling back to regular clone\n'))
1812 1814 repo.ui.warn(_('(you may want to report this to the server '
1813 1815 'operator)\n'))
1814 1816 return
1815 1817
1816 1818 entries = sortclonebundleentries(repo.ui, entries)
1817 1819
1818 1820 url = entries[0]['URL']
1819 1821 repo.ui.status(_('applying clone bundle from %s\n') % url)
1820 1822 if trypullbundlefromurl(repo.ui, repo, url):
1821 1823 repo.ui.status(_('finished applying clone bundle\n'))
1822 1824 # Bundle failed.
1823 1825 #
1824 1826 # We abort by default to avoid the thundering herd of
1825 1827 # clients flooding a server that was expecting expensive
1826 1828 # clone load to be offloaded.
1827 1829 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1828 1830 repo.ui.warn(_('falling back to normal clone\n'))
1829 1831 else:
1830 1832 raise error.Abort(_('error applying bundle'),
1831 1833 hint=_('if this error persists, consider contacting '
1832 1834 'the server operator or disable clone '
1833 1835 'bundles via '
1834 1836 '"--config ui.clonebundles=false"'))
1835 1837
1836 1838 def parseclonebundlesmanifest(repo, s):
1837 1839 """Parses the raw text of a clone bundles manifest.
1838 1840
1839 1841 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1840 1842 to the URL and other keys are the attributes for the entry.
1841 1843 """
1842 1844 m = []
1843 1845 for line in s.splitlines():
1844 1846 fields = line.split()
1845 1847 if not fields:
1846 1848 continue
1847 1849 attrs = {'URL': fields[0]}
1848 1850 for rawattr in fields[1:]:
1849 1851 key, value = rawattr.split('=', 1)
1850 1852 key = urlreq.unquote(key)
1851 1853 value = urlreq.unquote(value)
1852 1854 attrs[key] = value
1853 1855
1854 1856 # Parse BUNDLESPEC into components. This makes client-side
1855 1857 # preferences easier to specify since you can prefer a single
1856 1858 # component of the BUNDLESPEC.
1857 1859 if key == 'BUNDLESPEC':
1858 1860 try:
1859 1861 comp, version, params = parsebundlespec(repo, value,
1860 1862 externalnames=True)
1861 1863 attrs['COMPRESSION'] = comp
1862 1864 attrs['VERSION'] = version
1863 1865 except error.InvalidBundleSpecification:
1864 1866 pass
1865 1867 except error.UnsupportedBundleSpecification:
1866 1868 pass
1867 1869
1868 1870 m.append(attrs)
1869 1871
1870 1872 return m
1871 1873
1872 1874 def filterclonebundleentries(repo, entries):
1873 1875 """Remove incompatible clone bundle manifest entries.
1874 1876
1875 1877 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1876 1878 and returns a new list consisting of only the entries that this client
1877 1879 should be able to apply.
1878 1880
1879 1881 There is no guarantee we'll be able to apply all returned entries because
1880 1882 the metadata we use to filter on may be missing or wrong.
1881 1883 """
1882 1884 newentries = []
1883 1885 for entry in entries:
1884 1886 spec = entry.get('BUNDLESPEC')
1885 1887 if spec:
1886 1888 try:
1887 1889 parsebundlespec(repo, spec, strict=True)
1888 1890 except error.InvalidBundleSpecification as e:
1889 1891 repo.ui.debug(str(e) + '\n')
1890 1892 continue
1891 1893 except error.UnsupportedBundleSpecification as e:
1892 1894 repo.ui.debug('filtering %s because unsupported bundle '
1893 1895 'spec: %s\n' % (entry['URL'], str(e)))
1894 1896 continue
1895 1897
1896 1898 if 'REQUIRESNI' in entry and not sslutil.hassni:
1897 1899 repo.ui.debug('filtering %s because SNI not supported\n' %
1898 1900 entry['URL'])
1899 1901 continue
1900 1902
1901 1903 newentries.append(entry)
1902 1904
1903 1905 return newentries
1904 1906
1905 1907 class clonebundleentry(object):
1906 1908 """Represents an item in a clone bundles manifest.
1907 1909
1908 1910 This rich class is needed to support sorting since sorted() in Python 3
1909 1911 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1910 1912 won't work.
1911 1913 """
1912 1914
1913 1915 def __init__(self, value, prefers):
1914 1916 self.value = value
1915 1917 self.prefers = prefers
1916 1918
1917 1919 def _cmp(self, other):
1918 1920 for prefkey, prefvalue in self.prefers:
1919 1921 avalue = self.value.get(prefkey)
1920 1922 bvalue = other.value.get(prefkey)
1921 1923
1922 1924 # Special case for b missing attribute and a matches exactly.
1923 1925 if avalue is not None and bvalue is None and avalue == prefvalue:
1924 1926 return -1
1925 1927
1926 1928 # Special case for a missing attribute and b matches exactly.
1927 1929 if bvalue is not None and avalue is None and bvalue == prefvalue:
1928 1930 return 1
1929 1931
1930 1932 # We can't compare unless attribute present on both.
1931 1933 if avalue is None or bvalue is None:
1932 1934 continue
1933 1935
1934 1936 # Same values should fall back to next attribute.
1935 1937 if avalue == bvalue:
1936 1938 continue
1937 1939
1938 1940 # Exact matches come first.
1939 1941 if avalue == prefvalue:
1940 1942 return -1
1941 1943 if bvalue == prefvalue:
1942 1944 return 1
1943 1945
1944 1946 # Fall back to next attribute.
1945 1947 continue
1946 1948
1947 1949 # If we got here we couldn't sort by attributes and prefers. Fall
1948 1950 # back to index order.
1949 1951 return 0
1950 1952
1951 1953 def __lt__(self, other):
1952 1954 return self._cmp(other) < 0
1953 1955
1954 1956 def __gt__(self, other):
1955 1957 return self._cmp(other) > 0
1956 1958
1957 1959 def __eq__(self, other):
1958 1960 return self._cmp(other) == 0
1959 1961
1960 1962 def __le__(self, other):
1961 1963 return self._cmp(other) <= 0
1962 1964
1963 1965 def __ge__(self, other):
1964 1966 return self._cmp(other) >= 0
1965 1967
1966 1968 def __ne__(self, other):
1967 1969 return self._cmp(other) != 0
1968 1970
1969 1971 def sortclonebundleentries(ui, entries):
1970 1972 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1971 1973 if not prefers:
1972 1974 return list(entries)
1973 1975
1974 1976 prefers = [p.split('=', 1) for p in prefers]
1975 1977
1976 1978 items = sorted(clonebundleentry(v, prefers) for v in entries)
1977 1979 return [i.value for i in items]
1978 1980
1979 1981 def trypullbundlefromurl(ui, repo, url):
1980 1982 """Attempt to apply a bundle from a URL."""
1981 1983 lock = repo.lock()
1982 1984 try:
1983 1985 tr = repo.transaction('bundleurl')
1984 1986 try:
1985 1987 try:
1986 1988 fh = urlmod.open(ui, url)
1987 1989 cg = readbundle(ui, fh, 'stream')
1988 1990
1989 1991 if isinstance(cg, bundle2.unbundle20):
1990 1992 bundle2.processbundle(repo, cg, lambda: tr)
1991 1993 elif isinstance(cg, streamclone.streamcloneapplier):
1992 1994 cg.apply(repo)
1993 1995 else:
1994 1996 cg.apply(repo, 'clonebundles', url)
1995 1997 tr.close()
1996 1998 return True
1997 1999 except urlerr.httperror as e:
1998 2000 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1999 2001 except urlerr.urlerror as e:
2000 2002 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
2001 2003
2002 2004 return False
2003 2005 finally:
2004 2006 tr.release()
2005 2007 finally:
2006 2008 lock.release()
@@ -1,1116 +1,1116
1 1 Test exchange of common information using bundle2
2 2
3 3
4 4 $ getmainid() {
5 5 > hg -R main log --template '{node}\n' --rev "$1"
6 6 > }
7 7
8 8 enable obsolescence
9 9
10 10 $ cp $HGRCPATH $TESTTMP/hgrc.orig
11 11 $ cat > $TESTTMP/bundle2-pushkey-hook.sh << EOF
12 12 > echo pushkey: lock state after \"\$HG_NAMESPACE\"
13 13 > hg debuglock
14 14 > EOF
15 15
16 16 $ cat >> $HGRCPATH << EOF
17 17 > [experimental]
18 18 > evolution=createmarkers,exchange
19 19 > bundle2-output-capture=True
20 20 > [ui]
21 21 > ssh=python "$TESTDIR/dummyssh"
22 22 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
23 23 > [web]
24 24 > push_ssl = false
25 25 > allow_push = *
26 26 > [phases]
27 27 > publish=False
28 28 > [hooks]
29 29 > pretxnclose.tip = hg log -r tip -T "pre-close-tip:{node|short} {phase} {bookmarks}\n"
30 30 > txnclose.tip = hg log -r tip -T "postclose-tip:{node|short} {phase} {bookmarks}\n"
31 31 > txnclose.env = sh -c "HG_LOCAL= printenv.py txnclose"
32 32 > pushkey= sh "$TESTTMP/bundle2-pushkey-hook.sh"
33 33 > EOF
34 34
35 35 The extension requires a repo (currently unused)
36 36
37 37 $ hg init main
38 38 $ cd main
39 39 $ touch a
40 40 $ hg add a
41 41 $ hg commit -m 'a'
42 42 pre-close-tip:3903775176ed draft
43 43 postclose-tip:3903775176ed draft
44 44 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
45 45
46 46 $ hg unbundle $TESTDIR/bundles/rebase.hg
47 47 adding changesets
48 48 adding manifests
49 49 adding file changes
50 50 added 8 changesets with 7 changes to 7 files (+3 heads)
51 51 pre-close-tip:02de42196ebe draft
52 52 postclose-tip:02de42196ebe draft
53 53 txnclose hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:* HG_TXNNAME=unbundle (glob)
54 54 bundle:*/tests/bundles/rebase.hg HG_URL=bundle:*/tests/bundles/rebase.hg (glob)
55 55 (run 'hg heads' to see heads, 'hg merge' to merge)
56 56
57 57 $ cd ..
58 58
59 59 Real world exchange
60 60 =====================
61 61
62 62 Add more obsolescence information
63 63
64 64 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
65 65 pre-close-tip:02de42196ebe draft
66 66 postclose-tip:02de42196ebe draft
67 67 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
68 68 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
69 69 pre-close-tip:02de42196ebe draft
70 70 postclose-tip:02de42196ebe draft
71 71 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
72 72
73 73 clone --pull
74 74
75 75 $ hg -R main phase --public cd010b8cd998
76 76 pre-close-tip:02de42196ebe draft
77 77 postclose-tip:02de42196ebe draft
78 78 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
79 79 $ hg clone main other --pull --rev 9520eea781bc
80 80 adding changesets
81 81 adding manifests
82 82 adding file changes
83 83 added 2 changesets with 2 changes to 2 files
84 84 1 new obsolescence markers
85 85 pre-close-tip:9520eea781bc draft
86 86 postclose-tip:9520eea781bc draft
87 87 txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=9520eea781bcca16c1e15acc0ba14335a0e8e5ba HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
88 88 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
89 89 updating to branch default
90 90 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
91 91 $ hg -R other log -G
92 92 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
93 93 |
94 94 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
95 95
96 96 $ hg -R other debugobsolete
97 97 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
98 98
99 99 pull
100 100
101 101 $ hg -R main phase --public 9520eea781bc
102 102 pre-close-tip:02de42196ebe draft
103 103 postclose-tip:02de42196ebe draft
104 104 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
105 105 $ hg -R other pull -r 24b6387c8c8c
106 106 pulling from $TESTTMP/main (glob)
107 107 searching for changes
108 108 adding changesets
109 109 adding manifests
110 110 adding file changes
111 111 added 1 changesets with 1 changes to 1 files (+1 heads)
112 112 1 new obsolescence markers
113 113 pre-close-tip:24b6387c8c8c draft
114 114 postclose-tip:24b6387c8c8c draft
115 115 txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_NODE_LAST=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
116 116 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
117 117 (run 'hg heads' to see heads, 'hg merge' to merge)
118 118 $ hg -R other log -G
119 119 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
120 120 |
121 121 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
122 122 |/
123 123 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
124 124
125 125 $ hg -R other debugobsolete
126 126 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
127 127 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
128 128
129 129 pull empty (with phase movement)
130 130
131 131 $ hg -R main phase --public 24b6387c8c8c
132 132 pre-close-tip:02de42196ebe draft
133 133 postclose-tip:02de42196ebe draft
134 134 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
135 135 $ hg -R other pull -r 24b6387c8c8c
136 136 pulling from $TESTTMP/main (glob)
137 137 no changes found
138 138 pre-close-tip:24b6387c8c8c public
139 139 postclose-tip:24b6387c8c8c public
140 140 txnclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
141 141 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
142 142 $ hg -R other log -G
143 143 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
144 144 |
145 145 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
146 146 |/
147 147 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
148 148
149 149 $ hg -R other debugobsolete
150 150 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
151 151 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
152 152
153 153 pull empty
154 154
155 155 $ hg -R other pull -r 24b6387c8c8c
156 156 pulling from $TESTTMP/main (glob)
157 157 no changes found
158 158 pre-close-tip:24b6387c8c8c public
159 159 postclose-tip:24b6387c8c8c public
160 160 txnclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
161 161 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
162 162 $ hg -R other log -G
163 163 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
164 164 |
165 165 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
166 166 |/
167 167 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
168 168
169 169 $ hg -R other debugobsolete
170 170 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
171 171 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
172 172
173 173 add extra data to test their exchange during push
174 174
175 175 $ hg -R main bookmark --rev eea13746799a book_eea1
176 176 pre-close-tip:02de42196ebe draft
177 177 postclose-tip:02de42196ebe draft
178 178 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
179 179 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
180 180 pre-close-tip:02de42196ebe draft
181 181 postclose-tip:02de42196ebe draft
182 182 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
183 183 $ hg -R main bookmark --rev 02de42196ebe book_02de
184 184 pre-close-tip:02de42196ebe draft book_02de
185 185 postclose-tip:02de42196ebe draft book_02de
186 186 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
187 187 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
188 188 pre-close-tip:02de42196ebe draft book_02de
189 189 postclose-tip:02de42196ebe draft book_02de
190 190 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
191 191 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
192 192 pre-close-tip:02de42196ebe draft book_02de
193 193 postclose-tip:02de42196ebe draft book_02de
194 194 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
195 195 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
196 196 pre-close-tip:02de42196ebe draft book_02de
197 197 postclose-tip:02de42196ebe draft book_02de
198 198 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
199 199 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
200 200 pre-close-tip:02de42196ebe draft book_02de
201 201 postclose-tip:02de42196ebe draft book_02de
202 202 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
203 203 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
204 204 pre-close-tip:02de42196ebe draft book_02de
205 205 postclose-tip:02de42196ebe draft book_02de
206 206 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
207 207 $ hg -R main bookmark --rev 32af7686d403 book_32af
208 208 pre-close-tip:02de42196ebe draft book_02de
209 209 postclose-tip:02de42196ebe draft book_02de
210 210 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
211 211 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
212 212 pre-close-tip:02de42196ebe draft book_02de
213 213 postclose-tip:02de42196ebe draft book_02de
214 214 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
215 215
216 216 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
217 217 pre-close-tip:24b6387c8c8c public
218 218 postclose-tip:24b6387c8c8c public
219 219 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
220 220 $ hg -R other bookmark --rev cd010b8cd998 book_02de
221 221 pre-close-tip:24b6387c8c8c public
222 222 postclose-tip:24b6387c8c8c public
223 223 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
224 224 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
225 225 pre-close-tip:24b6387c8c8c public
226 226 postclose-tip:24b6387c8c8c public
227 227 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
228 228 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
229 229 pre-close-tip:24b6387c8c8c public
230 230 postclose-tip:24b6387c8c8c public
231 231 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
232 232 $ hg -R other bookmark --rev cd010b8cd998 book_32af
233 233 pre-close-tip:24b6387c8c8c public
234 234 postclose-tip:24b6387c8c8c public
235 235 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
236 236
237 237 $ hg -R main phase --public eea13746799a
238 238 pre-close-tip:02de42196ebe draft book_02de
239 239 postclose-tip:02de42196ebe draft book_02de
240 240 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
241 241
242 242 push
243 243 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
244 244 pushing to other
245 245 searching for changes
246 246 remote: adding changesets
247 247 remote: adding manifests
248 248 remote: adding file changes
249 249 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
250 250 remote: 1 new obsolescence markers
251 251 remote: pre-close-tip:eea13746799a public book_eea1
252 252 remote: pushkey: lock state after "phases"
253 253 remote: lock: free
254 254 remote: wlock: free
255 255 remote: pushkey: lock state after "bookmarks"
256 256 remote: lock: free
257 257 remote: wlock: free
258 258 remote: postclose-tip:eea13746799a public book_eea1
259 259 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_NODE_LAST=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_TXNID=TXN:* HG_TXNNAME=push HG_URL=file:$TESTTMP/other (glob)
260 260 updating bookmark book_eea1
261 261 pre-close-tip:02de42196ebe draft book_02de
262 262 postclose-tip:02de42196ebe draft book_02de
263 263 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
264 264 file:/*/$TESTTMP/other HG_URL=file:$TESTTMP/other (glob)
265 265 $ hg -R other log -G
266 266 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
267 267 |\
268 268 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
269 269 | |
270 270 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
271 271 |/
272 272 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
273 273
274 274 $ hg -R other debugobsolete
275 275 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
276 276 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
277 277 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
278 278
279 279 pull over ssh
280 280
281 281 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
282 282 pulling from ssh://user@dummy/main
283 283 searching for changes
284 284 adding changesets
285 285 adding manifests
286 286 adding file changes
287 287 added 1 changesets with 1 changes to 1 files (+1 heads)
288 288 1 new obsolescence markers
289 289 updating bookmark book_02de
290 290 pre-close-tip:02de42196ebe draft book_02de
291 291 postclose-tip:02de42196ebe draft book_02de
292 292 txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
293 293 ssh://user@dummy/main HG_URL=ssh://user@dummy/main
294 294 (run 'hg heads' to see heads, 'hg merge' to merge)
295 295 $ hg -R other debugobsolete
296 296 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
297 297 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
298 298 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
299 299 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
300 300
301 301 pull over http
302 302
303 303 $ hg serve -R main -p $HGPORT -d --pid-file=main.pid -E main-error.log
304 304 $ cat main.pid >> $DAEMON_PIDS
305 305
306 306 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
307 307 pulling from http://localhost:$HGPORT/
308 308 searching for changes
309 309 adding changesets
310 310 adding manifests
311 311 adding file changes
312 312 added 1 changesets with 1 changes to 1 files (+1 heads)
313 313 1 new obsolescence markers
314 314 updating bookmark book_42cc
315 315 pre-close-tip:42ccdea3bb16 draft book_42cc
316 316 postclose-tip:42ccdea3bb16 draft book_42cc
317 317 txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_NODE_LAST=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
318 318 http://localhost:$HGPORT/ HG_URL=http://localhost:$HGPORT/
319 319 (run 'hg heads .' to see heads, 'hg merge' to merge)
320 320 $ cat main-error.log
321 321 $ hg -R other debugobsolete
322 322 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
323 323 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
324 324 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
325 325 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
326 326 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
327 327
328 328 push over ssh
329 329
330 330 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
331 331 pushing to ssh://user@dummy/other
332 332 searching for changes
333 333 remote: adding changesets
334 334 remote: adding manifests
335 335 remote: adding file changes
336 336 remote: added 1 changesets with 1 changes to 1 files
337 337 remote: 1 new obsolescence markers
338 338 remote: pre-close-tip:5fddd98957c8 draft book_5fdd
339 339 remote: pushkey: lock state after "bookmarks"
340 340 remote: lock: free
341 341 remote: wlock: free
342 342 remote: postclose-tip:5fddd98957c8 draft book_5fdd
343 343 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_NODE_LAST=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:ssh:127.0.0.1 (glob)
344 344 updating bookmark book_5fdd
345 345 pre-close-tip:02de42196ebe draft book_02de
346 346 postclose-tip:02de42196ebe draft book_02de
347 347 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
348 348 ssh://user@dummy/other HG_URL=ssh://user@dummy/other
349 349 $ hg -R other log -G
350 350 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
351 351 |
352 352 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
353 353 |
354 354 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
355 355 | |
356 356 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
357 357 | |/|
358 358 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
359 359 |/ /
360 360 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
361 361 |/
362 362 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
363 363
364 364 $ hg -R other debugobsolete
365 365 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
366 366 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
367 367 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
368 368 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
369 369 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
370 370 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
371 371
372 372 push over http
373 373
374 374 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
375 375 $ cat other.pid >> $DAEMON_PIDS
376 376
377 377 $ hg -R main phase --public 32af7686d403
378 378 pre-close-tip:02de42196ebe draft book_02de
379 379 postclose-tip:02de42196ebe draft book_02de
380 380 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
381 381 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
382 382 pushing to http://localhost:$HGPORT2/
383 383 searching for changes
384 384 remote: adding changesets
385 385 remote: adding manifests
386 386 remote: adding file changes
387 387 remote: added 1 changesets with 1 changes to 1 files
388 388 remote: 1 new obsolescence markers
389 389 remote: pre-close-tip:32af7686d403 public book_32af
390 390 remote: pushkey: lock state after "phases"
391 391 remote: lock: free
392 392 remote: wlock: free
393 393 remote: pushkey: lock state after "bookmarks"
394 394 remote: lock: free
395 395 remote: wlock: free
396 396 remote: postclose-tip:32af7686d403 public book_32af
397 397 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_NODE_LAST=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:http:127.0.0.1: (glob)
398 398 updating bookmark book_32af
399 399 pre-close-tip:02de42196ebe draft book_02de
400 400 postclose-tip:02de42196ebe draft book_02de
401 401 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
402 402 http://localhost:$HGPORT2/ HG_URL=http://localhost:$HGPORT2/
403 403 $ cat other-error.log
404 404
405 405 Check final content.
406 406
407 407 $ hg -R other log -G
408 408 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
409 409 |
410 410 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
411 411 |
412 412 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
413 413 |
414 414 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
415 415 | |
416 416 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
417 417 | |/|
418 418 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
419 419 |/ /
420 420 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
421 421 |/
422 422 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
423 423
424 424 $ hg -R other debugobsolete
425 425 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
426 426 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
427 427 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
428 428 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
429 429 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
430 430 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
431 431 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
432 432
433 433 (check that no 'pending' files remain)
434 434
435 435 $ ls -1 other/.hg/bookmarks*
436 436 other/.hg/bookmarks
437 437 $ ls -1 other/.hg/store/phaseroots*
438 438 other/.hg/store/phaseroots
439 439 $ ls -1 other/.hg/store/00changelog.i*
440 440 other/.hg/store/00changelog.i
441 441
442 442 Error Handling
443 443 ==============
444 444
445 445 Check that errors are properly returned to the client during push.
446 446
447 447 Setting up
448 448
449 449 $ cat > failpush.py << EOF
450 450 > """A small extension that makes push fails when using bundle2
451 451 >
452 452 > used to test error handling in bundle2
453 453 > """
454 454 >
455 455 > from mercurial import error
456 456 > from mercurial import bundle2
457 457 > from mercurial import exchange
458 458 > from mercurial import extensions
459 459 >
460 460 > def _pushbundle2failpart(pushop, bundler):
461 461 > reason = pushop.ui.config('failpush', 'reason', None)
462 462 > part = None
463 463 > if reason == 'abort':
464 464 > bundler.newpart('test:abort')
465 465 > if reason == 'unknown':
466 466 > bundler.newpart('test:unknown')
467 467 > if reason == 'race':
468 468 > # 20 Bytes of crap
469 469 > bundler.newpart('check:heads', data='01234567890123456789')
470 470 >
471 471 > @bundle2.parthandler("test:abort")
472 472 > def handleabort(op, part):
473 473 > raise error.Abort('Abandon ship!', hint="don't panic")
474 474 >
475 475 > def uisetup(ui):
476 476 > exchange.b2partsgenmapping['failpart'] = _pushbundle2failpart
477 477 > exchange.b2partsgenorder.insert(0, 'failpart')
478 478 >
479 479 > EOF
480 480
481 481 $ cd main
482 482 $ hg up tip
483 483 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
484 484 $ echo 'I' > I
485 485 $ hg add I
486 486 $ hg ci -m 'I'
487 487 pre-close-tip:e7ec4e813ba6 draft
488 488 postclose-tip:e7ec4e813ba6 draft
489 489 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
490 490 $ hg id
491 491 e7ec4e813ba6 tip
492 492 $ cd ..
493 493
494 494 $ cat << EOF >> $HGRCPATH
495 495 > [extensions]
496 496 > failpush=$TESTTMP/failpush.py
497 497 > EOF
498 498
499 499 $ killdaemons.py
500 500 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
501 501 $ cat other.pid >> $DAEMON_PIDS
502 502
503 503 Doing the actual push: Abort error
504 504
505 505 $ cat << EOF >> $HGRCPATH
506 506 > [failpush]
507 507 > reason = abort
508 508 > EOF
509 509
510 510 $ hg -R main push other -r e7ec4e813ba6
511 511 pushing to other
512 512 searching for changes
513 513 abort: Abandon ship!
514 514 (don't panic)
515 515 [255]
516 516
517 517 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
518 518 pushing to ssh://user@dummy/other
519 519 searching for changes
520 520 remote: Abandon ship!
521 remote: (don't panic)
521 522 abort: push failed on remote
522 (don't panic)
523 523 [255]
524 524
525 525 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
526 526 pushing to http://localhost:$HGPORT2/
527 527 searching for changes
528 528 remote: Abandon ship!
529 remote: (don't panic)
529 530 abort: push failed on remote
530 (don't panic)
531 531 [255]
532 532
533 533
534 534 Doing the actual push: unknown mandatory parts
535 535
536 536 $ cat << EOF >> $HGRCPATH
537 537 > [failpush]
538 538 > reason = unknown
539 539 > EOF
540 540
541 541 $ hg -R main push other -r e7ec4e813ba6
542 542 pushing to other
543 543 searching for changes
544 544 abort: missing support for test:unknown
545 545 [255]
546 546
547 547 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
548 548 pushing to ssh://user@dummy/other
549 549 searching for changes
550 550 abort: missing support for test:unknown
551 551 [255]
552 552
553 553 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
554 554 pushing to http://localhost:$HGPORT2/
555 555 searching for changes
556 556 abort: missing support for test:unknown
557 557 [255]
558 558
559 559 Doing the actual push: race
560 560
561 561 $ cat << EOF >> $HGRCPATH
562 562 > [failpush]
563 563 > reason = race
564 564 > EOF
565 565
566 566 $ hg -R main push other -r e7ec4e813ba6
567 567 pushing to other
568 568 searching for changes
569 569 abort: push failed:
570 570 'repository changed while pushing - please try again'
571 571 [255]
572 572
573 573 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
574 574 pushing to ssh://user@dummy/other
575 575 searching for changes
576 576 abort: push failed:
577 577 'repository changed while pushing - please try again'
578 578 [255]
579 579
580 580 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
581 581 pushing to http://localhost:$HGPORT2/
582 582 searching for changes
583 583 abort: push failed:
584 584 'repository changed while pushing - please try again'
585 585 [255]
586 586
587 587 Doing the actual push: hook abort
588 588
589 589 $ cat << EOF >> $HGRCPATH
590 590 > [failpush]
591 591 > reason =
592 592 > [hooks]
593 593 > pretxnclose.failpush = sh -c "echo 'You shall not pass!'; false"
594 594 > txnabort.failpush = sh -c "echo 'Cleaning up the mess...'"
595 595 > EOF
596 596
597 597 $ killdaemons.py
598 598 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
599 599 $ cat other.pid >> $DAEMON_PIDS
600 600
601 601 $ hg -R main push other -r e7ec4e813ba6
602 602 pushing to other
603 603 searching for changes
604 604 remote: adding changesets
605 605 remote: adding manifests
606 606 remote: adding file changes
607 607 remote: added 1 changesets with 1 changes to 1 files
608 608 remote: pre-close-tip:e7ec4e813ba6 draft
609 609 remote: You shall not pass!
610 610 remote: transaction abort!
611 611 remote: Cleaning up the mess...
612 612 remote: rollback completed
613 613 abort: pretxnclose.failpush hook exited with status 1
614 614 [255]
615 615
616 616 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
617 617 pushing to ssh://user@dummy/other
618 618 searching for changes
619 619 remote: adding changesets
620 620 remote: adding manifests
621 621 remote: adding file changes
622 622 remote: added 1 changesets with 1 changes to 1 files
623 623 remote: pre-close-tip:e7ec4e813ba6 draft
624 624 remote: You shall not pass!
625 625 remote: transaction abort!
626 626 remote: Cleaning up the mess...
627 627 remote: rollback completed
628 628 remote: pretxnclose.failpush hook exited with status 1
629 629 abort: push failed on remote
630 630 [255]
631 631
632 632 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
633 633 pushing to http://localhost:$HGPORT2/
634 634 searching for changes
635 635 remote: adding changesets
636 636 remote: adding manifests
637 637 remote: adding file changes
638 638 remote: added 1 changesets with 1 changes to 1 files
639 639 remote: pre-close-tip:e7ec4e813ba6 draft
640 640 remote: You shall not pass!
641 641 remote: transaction abort!
642 642 remote: Cleaning up the mess...
643 643 remote: rollback completed
644 644 remote: pretxnclose.failpush hook exited with status 1
645 645 abort: push failed on remote
646 646 [255]
647 647
648 648 (check that no 'pending' files remain)
649 649
650 650 $ ls -1 other/.hg/bookmarks*
651 651 other/.hg/bookmarks
652 652 $ ls -1 other/.hg/store/phaseroots*
653 653 other/.hg/store/phaseroots
654 654 $ ls -1 other/.hg/store/00changelog.i*
655 655 other/.hg/store/00changelog.i
656 656
657 657 Check error from hook during the unbundling process itself
658 658
659 659 $ cat << EOF >> $HGRCPATH
660 660 > pretxnchangegroup = sh -c "echo 'Fail early!'; false"
661 661 > EOF
662 662 $ killdaemons.py # reload http config
663 663 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
664 664 $ cat other.pid >> $DAEMON_PIDS
665 665
666 666 $ hg -R main push other -r e7ec4e813ba6
667 667 pushing to other
668 668 searching for changes
669 669 remote: adding changesets
670 670 remote: adding manifests
671 671 remote: adding file changes
672 672 remote: added 1 changesets with 1 changes to 1 files
673 673 remote: Fail early!
674 674 remote: transaction abort!
675 675 remote: Cleaning up the mess...
676 676 remote: rollback completed
677 677 abort: pretxnchangegroup hook exited with status 1
678 678 [255]
679 679 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
680 680 pushing to ssh://user@dummy/other
681 681 searching for changes
682 682 remote: adding changesets
683 683 remote: adding manifests
684 684 remote: adding file changes
685 685 remote: added 1 changesets with 1 changes to 1 files
686 686 remote: Fail early!
687 687 remote: transaction abort!
688 688 remote: Cleaning up the mess...
689 689 remote: rollback completed
690 690 remote: pretxnchangegroup hook exited with status 1
691 691 abort: push failed on remote
692 692 [255]
693 693 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
694 694 pushing to http://localhost:$HGPORT2/
695 695 searching for changes
696 696 remote: adding changesets
697 697 remote: adding manifests
698 698 remote: adding file changes
699 699 remote: added 1 changesets with 1 changes to 1 files
700 700 remote: Fail early!
701 701 remote: transaction abort!
702 702 remote: Cleaning up the mess...
703 703 remote: rollback completed
704 704 remote: pretxnchangegroup hook exited with status 1
705 705 abort: push failed on remote
706 706 [255]
707 707
708 708 Check output capture control.
709 709
710 710 (should be still forced for http, disabled for local and ssh)
711 711
712 712 $ cat >> $HGRCPATH << EOF
713 713 > [experimental]
714 714 > bundle2-output-capture=False
715 715 > EOF
716 716
717 717 $ hg -R main push other -r e7ec4e813ba6
718 718 pushing to other
719 719 searching for changes
720 720 adding changesets
721 721 adding manifests
722 722 adding file changes
723 723 added 1 changesets with 1 changes to 1 files
724 724 Fail early!
725 725 transaction abort!
726 726 Cleaning up the mess...
727 727 rollback completed
728 728 abort: pretxnchangegroup hook exited with status 1
729 729 [255]
730 730 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
731 731 pushing to ssh://user@dummy/other
732 732 searching for changes
733 733 remote: adding changesets
734 734 remote: adding manifests
735 735 remote: adding file changes
736 736 remote: added 1 changesets with 1 changes to 1 files
737 737 remote: Fail early!
738 738 remote: transaction abort!
739 739 remote: Cleaning up the mess...
740 740 remote: rollback completed
741 741 remote: pretxnchangegroup hook exited with status 1
742 742 abort: push failed on remote
743 743 [255]
744 744 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
745 745 pushing to http://localhost:$HGPORT2/
746 746 searching for changes
747 747 remote: adding changesets
748 748 remote: adding manifests
749 749 remote: adding file changes
750 750 remote: added 1 changesets with 1 changes to 1 files
751 751 remote: Fail early!
752 752 remote: transaction abort!
753 753 remote: Cleaning up the mess...
754 754 remote: rollback completed
755 755 remote: pretxnchangegroup hook exited with status 1
756 756 abort: push failed on remote
757 757 [255]
758 758
759 759 Check abort from mandatory pushkey
760 760
761 761 $ cat > mandatorypart.py << EOF
762 762 > from mercurial import exchange
763 763 > from mercurial import pushkey
764 764 > from mercurial import node
765 765 > from mercurial import error
766 766 > @exchange.b2partsgenerator('failingpuskey')
767 767 > def addfailingpushey(pushop, bundler):
768 768 > enc = pushkey.encode
769 769 > part = bundler.newpart('pushkey')
770 770 > part.addparam('namespace', enc('phases'))
771 771 > part.addparam('key', enc(pushop.repo['cd010b8cd998'].hex()))
772 772 > part.addparam('old', enc(str(0))) # successful update
773 773 > part.addparam('new', enc(str(0)))
774 774 > def fail(pushop, exc):
775 775 > raise error.Abort('Correct phase push failed (because hooks)')
776 776 > pushop.pkfailcb[part.id] = fail
777 777 > EOF
778 778 $ cat >> $HGRCPATH << EOF
779 779 > [hooks]
780 780 > pretxnchangegroup=
781 781 > pretxnclose.failpush=
782 782 > prepushkey.failpush = sh -c "echo 'do not push the key !'; false"
783 783 > [extensions]
784 784 > mandatorypart=$TESTTMP/mandatorypart.py
785 785 > EOF
786 786 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS # reload http config
787 787 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
788 788 $ cat other.pid >> $DAEMON_PIDS
789 789
790 790 (Failure from a hook)
791 791
792 792 $ hg -R main push other -r e7ec4e813ba6
793 793 pushing to other
794 794 searching for changes
795 795 adding changesets
796 796 adding manifests
797 797 adding file changes
798 798 added 1 changesets with 1 changes to 1 files
799 799 do not push the key !
800 800 pushkey-abort: prepushkey.failpush hook exited with status 1
801 801 transaction abort!
802 802 Cleaning up the mess...
803 803 rollback completed
804 804 abort: Correct phase push failed (because hooks)
805 805 [255]
806 806 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
807 807 pushing to ssh://user@dummy/other
808 808 searching for changes
809 809 remote: adding changesets
810 810 remote: adding manifests
811 811 remote: adding file changes
812 812 remote: added 1 changesets with 1 changes to 1 files
813 813 remote: do not push the key !
814 814 remote: pushkey-abort: prepushkey.failpush hook exited with status 1
815 815 remote: transaction abort!
816 816 remote: Cleaning up the mess...
817 817 remote: rollback completed
818 818 abort: Correct phase push failed (because hooks)
819 819 [255]
820 820 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
821 821 pushing to http://localhost:$HGPORT2/
822 822 searching for changes
823 823 remote: adding changesets
824 824 remote: adding manifests
825 825 remote: adding file changes
826 826 remote: added 1 changesets with 1 changes to 1 files
827 827 remote: do not push the key !
828 828 remote: pushkey-abort: prepushkey.failpush hook exited with status 1
829 829 remote: transaction abort!
830 830 remote: Cleaning up the mess...
831 831 remote: rollback completed
832 832 abort: Correct phase push failed (because hooks)
833 833 [255]
834 834
835 835 (Failure from a the pushkey)
836 836
837 837 $ cat > mandatorypart.py << EOF
838 838 > from mercurial import exchange
839 839 > from mercurial import pushkey
840 840 > from mercurial import node
841 841 > from mercurial import error
842 842 > @exchange.b2partsgenerator('failingpuskey')
843 843 > def addfailingpushey(pushop, bundler):
844 844 > enc = pushkey.encode
845 845 > part = bundler.newpart('pushkey')
846 846 > part.addparam('namespace', enc('phases'))
847 847 > part.addparam('key', enc(pushop.repo['cd010b8cd998'].hex()))
848 848 > part.addparam('old', enc(str(4))) # will fail
849 849 > part.addparam('new', enc(str(3)))
850 850 > def fail(pushop, exc):
851 851 > raise error.Abort('Clown phase push failed')
852 852 > pushop.pkfailcb[part.id] = fail
853 853 > EOF
854 854 $ cat >> $HGRCPATH << EOF
855 855 > [hooks]
856 856 > prepushkey.failpush =
857 857 > EOF
858 858 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS # reload http config
859 859 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
860 860 $ cat other.pid >> $DAEMON_PIDS
861 861
862 862 $ hg -R main push other -r e7ec4e813ba6
863 863 pushing to other
864 864 searching for changes
865 865 adding changesets
866 866 adding manifests
867 867 adding file changes
868 868 added 1 changesets with 1 changes to 1 files
869 869 transaction abort!
870 870 Cleaning up the mess...
871 871 rollback completed
872 872 pushkey: lock state after "phases"
873 873 lock: free
874 874 wlock: free
875 875 abort: Clown phase push failed
876 876 [255]
877 877 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
878 878 pushing to ssh://user@dummy/other
879 879 searching for changes
880 880 remote: adding changesets
881 881 remote: adding manifests
882 882 remote: adding file changes
883 883 remote: added 1 changesets with 1 changes to 1 files
884 884 remote: transaction abort!
885 885 remote: Cleaning up the mess...
886 886 remote: rollback completed
887 887 remote: pushkey: lock state after "phases"
888 888 remote: lock: free
889 889 remote: wlock: free
890 890 abort: Clown phase push failed
891 891 [255]
892 892 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
893 893 pushing to http://localhost:$HGPORT2/
894 894 searching for changes
895 895 remote: adding changesets
896 896 remote: adding manifests
897 897 remote: adding file changes
898 898 remote: added 1 changesets with 1 changes to 1 files
899 899 remote: transaction abort!
900 900 remote: Cleaning up the mess...
901 901 remote: rollback completed
902 902 remote: pushkey: lock state after "phases"
903 903 remote: lock: free
904 904 remote: wlock: free
905 905 abort: Clown phase push failed
906 906 [255]
907 907
908 908 Test lazily acquiring the lock during unbundle
909 909 $ cp $TESTTMP/hgrc.orig $HGRCPATH
910 910 $ cat >> $HGRCPATH <<EOF
911 911 > [ui]
912 912 > ssh=python "$TESTDIR/dummyssh"
913 913 > EOF
914 914
915 915 $ cat >> $TESTTMP/locktester.py <<EOF
916 916 > import os
917 917 > from mercurial import extensions, bundle2, util
918 918 > def checklock(orig, repo, *args, **kwargs):
919 919 > if repo.svfs.lexists("lock"):
920 920 > raise util.Abort("Lock should not be taken")
921 921 > return orig(repo, *args, **kwargs)
922 922 > def extsetup(ui):
923 923 > extensions.wrapfunction(bundle2, 'processbundle', checklock)
924 924 > EOF
925 925
926 926 $ hg init lazylock
927 927 $ cat >> lazylock/.hg/hgrc <<EOF
928 928 > [extensions]
929 929 > locktester=$TESTTMP/locktester.py
930 930 > EOF
931 931
932 932 $ hg clone -q ssh://user@dummy/lazylock lazylockclient
933 933 $ cd lazylockclient
934 934 $ touch a && hg ci -Aqm a
935 935 $ hg push
936 936 pushing to ssh://user@dummy/lazylock
937 937 searching for changes
938 938 remote: Lock should not be taken
939 939 abort: push failed on remote
940 940 [255]
941 941
942 942 $ cat >> ../lazylock/.hg/hgrc <<EOF
943 943 > [experimental]
944 944 > bundle2lazylocking=True
945 945 > EOF
946 946 $ hg push
947 947 pushing to ssh://user@dummy/lazylock
948 948 searching for changes
949 949 remote: adding changesets
950 950 remote: adding manifests
951 951 remote: adding file changes
952 952 remote: added 1 changesets with 1 changes to 1 files
953 953
954 954 $ cd ..
955 955
956 956 Servers can disable bundle1 for clone/pull operations
957 957
958 958 $ killdaemons.py
959 959 $ hg init bundle2onlyserver
960 960 $ cd bundle2onlyserver
961 961 $ cat > .hg/hgrc << EOF
962 962 > [server]
963 963 > bundle1.pull = false
964 964 > EOF
965 965
966 966 $ touch foo
967 967 $ hg -q commit -A -m initial
968 968
969 969 $ hg serve -p $HGPORT -d --pid-file=hg.pid
970 970 $ cat hg.pid >> $DAEMON_PIDS
971 971
972 972 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
973 973 requesting all changes
974 974 abort: remote error:
975 975 incompatible Mercurial client; bundle2 required
976 976 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
977 977 [255]
978 978 $ killdaemons.py
979 979 $ cd ..
980 980
981 981 bundle1 can still pull non-generaldelta repos when generaldelta bundle1 disabled
982 982
983 983 $ hg --config format.usegeneraldelta=false init notgdserver
984 984 $ cd notgdserver
985 985 $ cat > .hg/hgrc << EOF
986 986 > [server]
987 987 > bundle1gd.pull = false
988 988 > EOF
989 989
990 990 $ touch foo
991 991 $ hg -q commit -A -m initial
992 992 $ hg serve -p $HGPORT -d --pid-file=hg.pid
993 993 $ cat hg.pid >> $DAEMON_PIDS
994 994
995 995 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2-1
996 996 requesting all changes
997 997 adding changesets
998 998 adding manifests
999 999 adding file changes
1000 1000 added 1 changesets with 1 changes to 1 files
1001 1001 updating to branch default
1002 1002 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1003 1003
1004 1004 $ killdaemons.py
1005 1005 $ cd ../bundle2onlyserver
1006 1006
1007 1007 bundle1 pull can be disabled for generaldelta repos only
1008 1008
1009 1009 $ cat > .hg/hgrc << EOF
1010 1010 > [server]
1011 1011 > bundle1gd.pull = false
1012 1012 > EOF
1013 1013
1014 1014 $ hg serve -p $HGPORT -d --pid-file=hg.pid
1015 1015 $ cat hg.pid >> $DAEMON_PIDS
1016 1016 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
1017 1017 requesting all changes
1018 1018 abort: remote error:
1019 1019 incompatible Mercurial client; bundle2 required
1020 1020 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1021 1021 [255]
1022 1022
1023 1023 $ killdaemons.py
1024 1024
1025 1025 Verify the global server.bundle1 option works
1026 1026
1027 1027 $ cat > .hg/hgrc << EOF
1028 1028 > [server]
1029 1029 > bundle1 = false
1030 1030 > EOF
1031 1031 $ hg serve -p $HGPORT -d --pid-file=hg.pid
1032 1032 $ cat hg.pid >> $DAEMON_PIDS
1033 1033 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT not-bundle2
1034 1034 requesting all changes
1035 1035 abort: remote error:
1036 1036 incompatible Mercurial client; bundle2 required
1037 1037 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1038 1038 [255]
1039 1039 $ killdaemons.py
1040 1040
1041 1041 $ cat > .hg/hgrc << EOF
1042 1042 > [server]
1043 1043 > bundle1gd = false
1044 1044 > EOF
1045 1045 $ hg serve -p $HGPORT -d --pid-file=hg.pid
1046 1046 $ cat hg.pid >> $DAEMON_PIDS
1047 1047
1048 1048 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
1049 1049 requesting all changes
1050 1050 abort: remote error:
1051 1051 incompatible Mercurial client; bundle2 required
1052 1052 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1053 1053 [255]
1054 1054
1055 1055 $ killdaemons.py
1056 1056
1057 1057 $ cd ../notgdserver
1058 1058 $ cat > .hg/hgrc << EOF
1059 1059 > [server]
1060 1060 > bundle1gd = false
1061 1061 > EOF
1062 1062 $ hg serve -p $HGPORT -d --pid-file=hg.pid
1063 1063 $ cat hg.pid >> $DAEMON_PIDS
1064 1064
1065 1065 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2-2
1066 1066 requesting all changes
1067 1067 adding changesets
1068 1068 adding manifests
1069 1069 adding file changes
1070 1070 added 1 changesets with 1 changes to 1 files
1071 1071 updating to branch default
1072 1072 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1073 1073
1074 1074 $ killdaemons.py
1075 1075 $ cd ../bundle2onlyserver
1076 1076
1077 1077 Verify bundle1 pushes can be disabled
1078 1078
1079 1079 $ cat > .hg/hgrc << EOF
1080 1080 > [server]
1081 1081 > bundle1.push = false
1082 1082 > [web]
1083 1083 > allow_push = *
1084 1084 > push_ssl = false
1085 1085 > EOF
1086 1086
1087 1087 $ hg serve -p $HGPORT -d --pid-file=hg.pid -E error.log
1088 1088 $ cat hg.pid >> $DAEMON_PIDS
1089 1089 $ cd ..
1090 1090
1091 1091 $ hg clone http://localhost:$HGPORT bundle2-only
1092 1092 requesting all changes
1093 1093 adding changesets
1094 1094 adding manifests
1095 1095 adding file changes
1096 1096 added 1 changesets with 1 changes to 1 files
1097 1097 updating to branch default
1098 1098 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1099 1099 $ cd bundle2-only
1100 1100 $ echo commit > foo
1101 1101 $ hg commit -m commit
1102 1102 $ hg --config devel.legacy.exchange=bundle1 push
1103 1103 pushing to http://localhost:$HGPORT/
1104 1104 searching for changes
1105 1105 abort: remote error:
1106 1106 incompatible Mercurial client; bundle2 required
1107 1107 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1108 1108 [255]
1109 1109
1110 1110 $ hg push
1111 1111 pushing to http://localhost:$HGPORT/
1112 1112 searching for changes
1113 1113 remote: adding changesets
1114 1114 remote: adding manifests
1115 1115 remote: adding file changes
1116 1116 remote: added 1 changesets with 1 changes to 1 files
General Comments 0
You need to be logged in to leave comments. Login now