##// END OF EJS Templates
clonebundle: support bundle2...
Gregory Szorc -
r26643:d2e16419 default
parent child Browse files
Show More
@@ -1,1668 +1,1672 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib, urllib2
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13 import lock as lockmod
14 14 import streamclone
15 15 import tags
16 16 import url as urlmod
17 17
18 18 # Maps bundle compression human names to internal representation.
19 19 _bundlespeccompressions = {'none': None,
20 20 'bzip2': 'BZ',
21 21 'gzip': 'GZ',
22 22 }
23 23
24 24 # Maps bundle version human names to changegroup versions.
25 25 _bundlespeccgversions = {'v1': '01',
26 26 'v2': '02',
27 27 'bundle2': '02', #legacy
28 28 }
29 29
30 30 def parsebundlespec(repo, spec, strict=True):
31 31 """Parse a bundle string specification into parts.
32 32
33 33 Bundle specifications denote a well-defined bundle/exchange format.
34 34 The content of a given specification should not change over time in
35 35 order to ensure that bundles produced by a newer version of Mercurial are
36 36 readable from an older version.
37 37
38 38 The string currently has the form:
39 39
40 40 <compression>-<type>
41 41
42 42 Where <compression> is one of the supported compression formats
43 43 and <type> is (currently) a version string.
44 44
45 45 If ``strict`` is True (the default) <compression> is required. Otherwise,
46 46 it is optional.
47 47
48 48 Returns a 2-tuple of (compression, version). Compression will be ``None``
49 49 if not in strict mode and a compression isn't defined.
50 50
51 51 An ``InvalidBundleSpecification`` is raised when the specification is
52 52 not syntactically well formed.
53 53
54 54 An ``UnsupportedBundleSpecification`` is raised when the compression or
55 55 bundle type/version is not recognized.
56 56
57 57 Note: this function will likely eventually return a more complex data
58 58 structure, including bundle2 part information.
59 59 """
60 60 if strict and '-' not in spec:
61 61 raise error.InvalidBundleSpecification(
62 62 _('invalid bundle specification; '
63 63 'must be prefixed with compression: %s') % spec)
64 64
65 65 if '-' in spec:
66 66 compression, version = spec.split('-', 1)
67 67
68 68 if compression not in _bundlespeccompressions:
69 69 raise error.UnsupportedBundleSpecification(
70 70 _('%s compression is not supported') % compression)
71 71
72 72 if version not in _bundlespeccgversions:
73 73 raise error.UnsupportedBundleSpecification(
74 74 _('%s is not a recognized bundle version') % version)
75 75 else:
76 76 # Value could be just the compression or just the version, in which
77 77 # case some defaults are assumed (but only when not in strict mode).
78 78 assert not strict
79 79
80 80 if spec in _bundlespeccompressions:
81 81 compression = spec
82 82 version = 'v1'
83 83 if 'generaldelta' in repo.requirements:
84 84 version = 'v2'
85 85 elif spec in _bundlespeccgversions:
86 86 compression = 'bzip2'
87 87 version = spec
88 88 else:
89 89 raise error.UnsupportedBundleSpecification(
90 90 _('%s is not a recognized bundle specification') % spec)
91 91
92 92 compression = _bundlespeccompressions[compression]
93 93 version = _bundlespeccgversions[version]
94 94 return compression, version
95 95
96 96 def readbundle(ui, fh, fname, vfs=None):
97 97 header = changegroup.readexactly(fh, 4)
98 98
99 99 alg = None
100 100 if not fname:
101 101 fname = "stream"
102 102 if not header.startswith('HG') and header.startswith('\0'):
103 103 fh = changegroup.headerlessfixup(fh, header)
104 104 header = "HG10"
105 105 alg = 'UN'
106 106 elif vfs:
107 107 fname = vfs.join(fname)
108 108
109 109 magic, version = header[0:2], header[2:4]
110 110
111 111 if magic != 'HG':
112 112 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
113 113 if version == '10':
114 114 if alg is None:
115 115 alg = changegroup.readexactly(fh, 2)
116 116 return changegroup.cg1unpacker(fh, alg)
117 117 elif version.startswith('2'):
118 118 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
119 119 else:
120 120 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
121 121
122 122 def buildobsmarkerspart(bundler, markers):
123 123 """add an obsmarker part to the bundler with <markers>
124 124
125 125 No part is created if markers is empty.
126 126 Raises ValueError if the bundler doesn't support any known obsmarker format.
127 127 """
128 128 if markers:
129 129 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
130 130 version = obsolete.commonversion(remoteversions)
131 131 if version is None:
132 132 raise ValueError('bundler do not support common obsmarker format')
133 133 stream = obsolete.encodemarkers(markers, True, version=version)
134 134 return bundler.newpart('obsmarkers', data=stream)
135 135 return None
136 136
137 137 def _canusebundle2(op):
138 138 """return true if a pull/push can use bundle2
139 139
140 140 Feel free to nuke this function when we drop the experimental option"""
141 141 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
142 142 and op.remote.capable('bundle2'))
143 143
144 144
145 145 class pushoperation(object):
146 146 """A object that represent a single push operation
147 147
148 148 It purpose is to carry push related state and very common operation.
149 149
150 150 A new should be created at the beginning of each push and discarded
151 151 afterward.
152 152 """
153 153
154 154 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
155 155 bookmarks=()):
156 156 # repo we push from
157 157 self.repo = repo
158 158 self.ui = repo.ui
159 159 # repo we push to
160 160 self.remote = remote
161 161 # force option provided
162 162 self.force = force
163 163 # revs to be pushed (None is "all")
164 164 self.revs = revs
165 165 # bookmark explicitly pushed
166 166 self.bookmarks = bookmarks
167 167 # allow push of new branch
168 168 self.newbranch = newbranch
169 169 # did a local lock get acquired?
170 170 self.locallocked = None
171 171 # step already performed
172 172 # (used to check what steps have been already performed through bundle2)
173 173 self.stepsdone = set()
174 174 # Integer version of the changegroup push result
175 175 # - None means nothing to push
176 176 # - 0 means HTTP error
177 177 # - 1 means we pushed and remote head count is unchanged *or*
178 178 # we have outgoing changesets but refused to push
179 179 # - other values as described by addchangegroup()
180 180 self.cgresult = None
181 181 # Boolean value for the bookmark push
182 182 self.bkresult = None
183 183 # discover.outgoing object (contains common and outgoing data)
184 184 self.outgoing = None
185 185 # all remote heads before the push
186 186 self.remoteheads = None
187 187 # testable as a boolean indicating if any nodes are missing locally.
188 188 self.incoming = None
189 189 # phases changes that must be pushed along side the changesets
190 190 self.outdatedphases = None
191 191 # phases changes that must be pushed if changeset push fails
192 192 self.fallbackoutdatedphases = None
193 193 # outgoing obsmarkers
194 194 self.outobsmarkers = set()
195 195 # outgoing bookmarks
196 196 self.outbookmarks = []
197 197 # transaction manager
198 198 self.trmanager = None
199 199 # map { pushkey partid -> callback handling failure}
200 200 # used to handle exception from mandatory pushkey part failure
201 201 self.pkfailcb = {}
202 202
203 203 @util.propertycache
204 204 def futureheads(self):
205 205 """future remote heads if the changeset push succeeds"""
206 206 return self.outgoing.missingheads
207 207
208 208 @util.propertycache
209 209 def fallbackheads(self):
210 210 """future remote heads if the changeset push fails"""
211 211 if self.revs is None:
212 212 # not target to push, all common are relevant
213 213 return self.outgoing.commonheads
214 214 unfi = self.repo.unfiltered()
215 215 # I want cheads = heads(::missingheads and ::commonheads)
216 216 # (missingheads is revs with secret changeset filtered out)
217 217 #
218 218 # This can be expressed as:
219 219 # cheads = ( (missingheads and ::commonheads)
220 220 # + (commonheads and ::missingheads))"
221 221 # )
222 222 #
223 223 # while trying to push we already computed the following:
224 224 # common = (::commonheads)
225 225 # missing = ((commonheads::missingheads) - commonheads)
226 226 #
227 227 # We can pick:
228 228 # * missingheads part of common (::commonheads)
229 229 common = self.outgoing.common
230 230 nm = self.repo.changelog.nodemap
231 231 cheads = [node for node in self.revs if nm[node] in common]
232 232 # and
233 233 # * commonheads parents on missing
234 234 revset = unfi.set('%ln and parents(roots(%ln))',
235 235 self.outgoing.commonheads,
236 236 self.outgoing.missing)
237 237 cheads.extend(c.node() for c in revset)
238 238 return cheads
239 239
240 240 @property
241 241 def commonheads(self):
242 242 """set of all common heads after changeset bundle push"""
243 243 if self.cgresult:
244 244 return self.futureheads
245 245 else:
246 246 return self.fallbackheads
247 247
248 248 # mapping of message used when pushing bookmark
249 249 bookmsgmap = {'update': (_("updating bookmark %s\n"),
250 250 _('updating bookmark %s failed!\n')),
251 251 'export': (_("exporting bookmark %s\n"),
252 252 _('exporting bookmark %s failed!\n')),
253 253 'delete': (_("deleting remote bookmark %s\n"),
254 254 _('deleting remote bookmark %s failed!\n')),
255 255 }
256 256
257 257
258 258 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
259 259 '''Push outgoing changesets (limited by revs) from a local
260 260 repository to remote. Return an integer:
261 261 - None means nothing to push
262 262 - 0 means HTTP error
263 263 - 1 means we pushed and remote head count is unchanged *or*
264 264 we have outgoing changesets but refused to push
265 265 - other values as described by addchangegroup()
266 266 '''
267 267 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
268 268 if pushop.remote.local():
269 269 missing = (set(pushop.repo.requirements)
270 270 - pushop.remote.local().supported)
271 271 if missing:
272 272 msg = _("required features are not"
273 273 " supported in the destination:"
274 274 " %s") % (', '.join(sorted(missing)))
275 275 raise error.Abort(msg)
276 276
277 277 # there are two ways to push to remote repo:
278 278 #
279 279 # addchangegroup assumes local user can lock remote
280 280 # repo (local filesystem, old ssh servers).
281 281 #
282 282 # unbundle assumes local user cannot lock remote repo (new ssh
283 283 # servers, http servers).
284 284
285 285 if not pushop.remote.canpush():
286 286 raise error.Abort(_("destination does not support push"))
287 287 # get local lock as we might write phase data
288 288 localwlock = locallock = None
289 289 try:
290 290 # bundle2 push may receive a reply bundle touching bookmarks or other
291 291 # things requiring the wlock. Take it now to ensure proper ordering.
292 292 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
293 293 if _canusebundle2(pushop) and maypushback:
294 294 localwlock = pushop.repo.wlock()
295 295 locallock = pushop.repo.lock()
296 296 pushop.locallocked = True
297 297 except IOError as err:
298 298 pushop.locallocked = False
299 299 if err.errno != errno.EACCES:
300 300 raise
301 301 # source repo cannot be locked.
302 302 # We do not abort the push, but just disable the local phase
303 303 # synchronisation.
304 304 msg = 'cannot lock source repository: %s\n' % err
305 305 pushop.ui.debug(msg)
306 306 try:
307 307 if pushop.locallocked:
308 308 pushop.trmanager = transactionmanager(repo,
309 309 'push-response',
310 310 pushop.remote.url())
311 311 pushop.repo.checkpush(pushop)
312 312 lock = None
313 313 unbundle = pushop.remote.capable('unbundle')
314 314 if not unbundle:
315 315 lock = pushop.remote.lock()
316 316 try:
317 317 _pushdiscovery(pushop)
318 318 if _canusebundle2(pushop):
319 319 _pushbundle2(pushop)
320 320 _pushchangeset(pushop)
321 321 _pushsyncphase(pushop)
322 322 _pushobsolete(pushop)
323 323 _pushbookmark(pushop)
324 324 finally:
325 325 if lock is not None:
326 326 lock.release()
327 327 if pushop.trmanager:
328 328 pushop.trmanager.close()
329 329 finally:
330 330 if pushop.trmanager:
331 331 pushop.trmanager.release()
332 332 if locallock is not None:
333 333 locallock.release()
334 334 if localwlock is not None:
335 335 localwlock.release()
336 336
337 337 return pushop
338 338
339 339 # list of steps to perform discovery before push
340 340 pushdiscoveryorder = []
341 341
342 342 # Mapping between step name and function
343 343 #
344 344 # This exists to help extensions wrap steps if necessary
345 345 pushdiscoverymapping = {}
346 346
347 347 def pushdiscovery(stepname):
348 348 """decorator for function performing discovery before push
349 349
350 350 The function is added to the step -> function mapping and appended to the
351 351 list of steps. Beware that decorated function will be added in order (this
352 352 may matter).
353 353
354 354 You can only use this decorator for a new step, if you want to wrap a step
355 355 from an extension, change the pushdiscovery dictionary directly."""
356 356 def dec(func):
357 357 assert stepname not in pushdiscoverymapping
358 358 pushdiscoverymapping[stepname] = func
359 359 pushdiscoveryorder.append(stepname)
360 360 return func
361 361 return dec
362 362
363 363 def _pushdiscovery(pushop):
364 364 """Run all discovery steps"""
365 365 for stepname in pushdiscoveryorder:
366 366 step = pushdiscoverymapping[stepname]
367 367 step(pushop)
368 368
369 369 @pushdiscovery('changeset')
370 370 def _pushdiscoverychangeset(pushop):
371 371 """discover the changeset that need to be pushed"""
372 372 fci = discovery.findcommonincoming
373 373 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
374 374 common, inc, remoteheads = commoninc
375 375 fco = discovery.findcommonoutgoing
376 376 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
377 377 commoninc=commoninc, force=pushop.force)
378 378 pushop.outgoing = outgoing
379 379 pushop.remoteheads = remoteheads
380 380 pushop.incoming = inc
381 381
382 382 @pushdiscovery('phase')
383 383 def _pushdiscoveryphase(pushop):
384 384 """discover the phase that needs to be pushed
385 385
386 386 (computed for both success and failure case for changesets push)"""
387 387 outgoing = pushop.outgoing
388 388 unfi = pushop.repo.unfiltered()
389 389 remotephases = pushop.remote.listkeys('phases')
390 390 publishing = remotephases.get('publishing', False)
391 391 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
392 392 and remotephases # server supports phases
393 393 and not pushop.outgoing.missing # no changesets to be pushed
394 394 and publishing):
395 395 # When:
396 396 # - this is a subrepo push
397 397 # - and remote support phase
398 398 # - and no changeset are to be pushed
399 399 # - and remote is publishing
400 400 # We may be in issue 3871 case!
401 401 # We drop the possible phase synchronisation done by
402 402 # courtesy to publish changesets possibly locally draft
403 403 # on the remote.
404 404 remotephases = {'publishing': 'True'}
405 405 ana = phases.analyzeremotephases(pushop.repo,
406 406 pushop.fallbackheads,
407 407 remotephases)
408 408 pheads, droots = ana
409 409 extracond = ''
410 410 if not publishing:
411 411 extracond = ' and public()'
412 412 revset = 'heads((%%ln::%%ln) %s)' % extracond
413 413 # Get the list of all revs draft on remote by public here.
414 414 # XXX Beware that revset break if droots is not strictly
415 415 # XXX root we may want to ensure it is but it is costly
416 416 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
417 417 if not outgoing.missing:
418 418 future = fallback
419 419 else:
420 420 # adds changeset we are going to push as draft
421 421 #
422 422 # should not be necessary for publishing server, but because of an
423 423 # issue fixed in xxxxx we have to do it anyway.
424 424 fdroots = list(unfi.set('roots(%ln + %ln::)',
425 425 outgoing.missing, droots))
426 426 fdroots = [f.node() for f in fdroots]
427 427 future = list(unfi.set(revset, fdroots, pushop.futureheads))
428 428 pushop.outdatedphases = future
429 429 pushop.fallbackoutdatedphases = fallback
430 430
431 431 @pushdiscovery('obsmarker')
432 432 def _pushdiscoveryobsmarkers(pushop):
433 433 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
434 434 and pushop.repo.obsstore
435 435 and 'obsolete' in pushop.remote.listkeys('namespaces')):
436 436 repo = pushop.repo
437 437 # very naive computation, that can be quite expensive on big repo.
438 438 # However: evolution is currently slow on them anyway.
439 439 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
440 440 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
441 441
442 442 @pushdiscovery('bookmarks')
443 443 def _pushdiscoverybookmarks(pushop):
444 444 ui = pushop.ui
445 445 repo = pushop.repo.unfiltered()
446 446 remote = pushop.remote
447 447 ui.debug("checking for updated bookmarks\n")
448 448 ancestors = ()
449 449 if pushop.revs:
450 450 revnums = map(repo.changelog.rev, pushop.revs)
451 451 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
452 452 remotebookmark = remote.listkeys('bookmarks')
453 453
454 454 explicit = set(pushop.bookmarks)
455 455
456 456 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
457 457 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
458 458 for b, scid, dcid in advsrc:
459 459 if b in explicit:
460 460 explicit.remove(b)
461 461 if not ancestors or repo[scid].rev() in ancestors:
462 462 pushop.outbookmarks.append((b, dcid, scid))
463 463 # search added bookmark
464 464 for b, scid, dcid in addsrc:
465 465 if b in explicit:
466 466 explicit.remove(b)
467 467 pushop.outbookmarks.append((b, '', scid))
468 468 # search for overwritten bookmark
469 469 for b, scid, dcid in advdst + diverge + differ:
470 470 if b in explicit:
471 471 explicit.remove(b)
472 472 pushop.outbookmarks.append((b, dcid, scid))
473 473 # search for bookmark to delete
474 474 for b, scid, dcid in adddst:
475 475 if b in explicit:
476 476 explicit.remove(b)
477 477 # treat as "deleted locally"
478 478 pushop.outbookmarks.append((b, dcid, ''))
479 479 # identical bookmarks shouldn't get reported
480 480 for b, scid, dcid in same:
481 481 if b in explicit:
482 482 explicit.remove(b)
483 483
484 484 if explicit:
485 485 explicit = sorted(explicit)
486 486 # we should probably list all of them
487 487 ui.warn(_('bookmark %s does not exist on the local '
488 488 'or remote repository!\n') % explicit[0])
489 489 pushop.bkresult = 2
490 490
491 491 pushop.outbookmarks.sort()
492 492
493 493 def _pushcheckoutgoing(pushop):
494 494 outgoing = pushop.outgoing
495 495 unfi = pushop.repo.unfiltered()
496 496 if not outgoing.missing:
497 497 # nothing to push
498 498 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
499 499 return False
500 500 # something to push
501 501 if not pushop.force:
502 502 # if repo.obsstore == False --> no obsolete
503 503 # then, save the iteration
504 504 if unfi.obsstore:
505 505 # this message are here for 80 char limit reason
506 506 mso = _("push includes obsolete changeset: %s!")
507 507 mst = {"unstable": _("push includes unstable changeset: %s!"),
508 508 "bumped": _("push includes bumped changeset: %s!"),
509 509 "divergent": _("push includes divergent changeset: %s!")}
510 510 # If we are to push if there is at least one
511 511 # obsolete or unstable changeset in missing, at
512 512 # least one of the missinghead will be obsolete or
513 513 # unstable. So checking heads only is ok
514 514 for node in outgoing.missingheads:
515 515 ctx = unfi[node]
516 516 if ctx.obsolete():
517 517 raise error.Abort(mso % ctx)
518 518 elif ctx.troubled():
519 519 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
520 520
521 521 # internal config: bookmarks.pushing
522 522 newbm = pushop.ui.configlist('bookmarks', 'pushing')
523 523 discovery.checkheads(unfi, pushop.remote, outgoing,
524 524 pushop.remoteheads,
525 525 pushop.newbranch,
526 526 bool(pushop.incoming),
527 527 newbm)
528 528 return True
529 529
530 530 # List of names of steps to perform for an outgoing bundle2, order matters.
531 531 b2partsgenorder = []
532 532
533 533 # Mapping between step name and function
534 534 #
535 535 # This exists to help extensions wrap steps if necessary
536 536 b2partsgenmapping = {}
537 537
538 538 def b2partsgenerator(stepname, idx=None):
539 539 """decorator for function generating bundle2 part
540 540
541 541 The function is added to the step -> function mapping and appended to the
542 542 list of steps. Beware that decorated functions will be added in order
543 543 (this may matter).
544 544
545 545 You can only use this decorator for new steps, if you want to wrap a step
546 546 from an extension, attack the b2partsgenmapping dictionary directly."""
547 547 def dec(func):
548 548 assert stepname not in b2partsgenmapping
549 549 b2partsgenmapping[stepname] = func
550 550 if idx is None:
551 551 b2partsgenorder.append(stepname)
552 552 else:
553 553 b2partsgenorder.insert(idx, stepname)
554 554 return func
555 555 return dec
556 556
557 557 def _pushb2ctxcheckheads(pushop, bundler):
558 558 """Generate race condition checking parts
559 559
560 560 Exists as an indepedent function to aid extensions
561 561 """
562 562 if not pushop.force:
563 563 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
564 564
565 565 @b2partsgenerator('changeset')
566 566 def _pushb2ctx(pushop, bundler):
567 567 """handle changegroup push through bundle2
568 568
569 569 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
570 570 """
571 571 if 'changesets' in pushop.stepsdone:
572 572 return
573 573 pushop.stepsdone.add('changesets')
574 574 # Send known heads to the server for race detection.
575 575 if not _pushcheckoutgoing(pushop):
576 576 return
577 577 pushop.repo.prepushoutgoinghooks(pushop.repo,
578 578 pushop.remote,
579 579 pushop.outgoing)
580 580
581 581 _pushb2ctxcheckheads(pushop, bundler)
582 582
583 583 b2caps = bundle2.bundle2caps(pushop.remote)
584 584 version = None
585 585 cgversions = b2caps.get('changegroup')
586 586 if not cgversions: # 3.1 and 3.2 ship with an empty value
587 587 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
588 588 pushop.outgoing)
589 589 else:
590 590 cgversions = [v for v in cgversions if v in changegroup.packermap]
591 591 if not cgversions:
592 592 raise ValueError(_('no common changegroup version'))
593 593 version = max(cgversions)
594 594 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
595 595 pushop.outgoing,
596 596 version=version)
597 597 cgpart = bundler.newpart('changegroup', data=cg)
598 598 if version is not None:
599 599 cgpart.addparam('version', version)
600 600 def handlereply(op):
601 601 """extract addchangegroup returns from server reply"""
602 602 cgreplies = op.records.getreplies(cgpart.id)
603 603 assert len(cgreplies['changegroup']) == 1
604 604 pushop.cgresult = cgreplies['changegroup'][0]['return']
605 605 return handlereply
606 606
607 607 @b2partsgenerator('phase')
608 608 def _pushb2phases(pushop, bundler):
609 609 """handle phase push through bundle2"""
610 610 if 'phases' in pushop.stepsdone:
611 611 return
612 612 b2caps = bundle2.bundle2caps(pushop.remote)
613 613 if not 'pushkey' in b2caps:
614 614 return
615 615 pushop.stepsdone.add('phases')
616 616 part2node = []
617 617
618 618 def handlefailure(pushop, exc):
619 619 targetid = int(exc.partid)
620 620 for partid, node in part2node:
621 621 if partid == targetid:
622 622 raise error.Abort(_('updating %s to public failed') % node)
623 623
624 624 enc = pushkey.encode
625 625 for newremotehead in pushop.outdatedphases:
626 626 part = bundler.newpart('pushkey')
627 627 part.addparam('namespace', enc('phases'))
628 628 part.addparam('key', enc(newremotehead.hex()))
629 629 part.addparam('old', enc(str(phases.draft)))
630 630 part.addparam('new', enc(str(phases.public)))
631 631 part2node.append((part.id, newremotehead))
632 632 pushop.pkfailcb[part.id] = handlefailure
633 633
634 634 def handlereply(op):
635 635 for partid, node in part2node:
636 636 partrep = op.records.getreplies(partid)
637 637 results = partrep['pushkey']
638 638 assert len(results) <= 1
639 639 msg = None
640 640 if not results:
641 641 msg = _('server ignored update of %s to public!\n') % node
642 642 elif not int(results[0]['return']):
643 643 msg = _('updating %s to public failed!\n') % node
644 644 if msg is not None:
645 645 pushop.ui.warn(msg)
646 646 return handlereply
647 647
648 648 @b2partsgenerator('obsmarkers')
649 649 def _pushb2obsmarkers(pushop, bundler):
650 650 if 'obsmarkers' in pushop.stepsdone:
651 651 return
652 652 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
653 653 if obsolete.commonversion(remoteversions) is None:
654 654 return
655 655 pushop.stepsdone.add('obsmarkers')
656 656 if pushop.outobsmarkers:
657 657 markers = sorted(pushop.outobsmarkers)
658 658 buildobsmarkerspart(bundler, markers)
659 659
660 660 @b2partsgenerator('bookmarks')
661 661 def _pushb2bookmarks(pushop, bundler):
662 662 """handle bookmark push through bundle2"""
663 663 if 'bookmarks' in pushop.stepsdone:
664 664 return
665 665 b2caps = bundle2.bundle2caps(pushop.remote)
666 666 if 'pushkey' not in b2caps:
667 667 return
668 668 pushop.stepsdone.add('bookmarks')
669 669 part2book = []
670 670 enc = pushkey.encode
671 671
672 672 def handlefailure(pushop, exc):
673 673 targetid = int(exc.partid)
674 674 for partid, book, action in part2book:
675 675 if partid == targetid:
676 676 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
677 677 # we should not be called for part we did not generated
678 678 assert False
679 679
680 680 for book, old, new in pushop.outbookmarks:
681 681 part = bundler.newpart('pushkey')
682 682 part.addparam('namespace', enc('bookmarks'))
683 683 part.addparam('key', enc(book))
684 684 part.addparam('old', enc(old))
685 685 part.addparam('new', enc(new))
686 686 action = 'update'
687 687 if not old:
688 688 action = 'export'
689 689 elif not new:
690 690 action = 'delete'
691 691 part2book.append((part.id, book, action))
692 692 pushop.pkfailcb[part.id] = handlefailure
693 693
694 694 def handlereply(op):
695 695 ui = pushop.ui
696 696 for partid, book, action in part2book:
697 697 partrep = op.records.getreplies(partid)
698 698 results = partrep['pushkey']
699 699 assert len(results) <= 1
700 700 if not results:
701 701 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
702 702 else:
703 703 ret = int(results[0]['return'])
704 704 if ret:
705 705 ui.status(bookmsgmap[action][0] % book)
706 706 else:
707 707 ui.warn(bookmsgmap[action][1] % book)
708 708 if pushop.bkresult is not None:
709 709 pushop.bkresult = 1
710 710 return handlereply
711 711
712 712
713 713 def _pushbundle2(pushop):
714 714 """push data to the remote using bundle2
715 715
716 716 The only currently supported type of data is changegroup but this will
717 717 evolve in the future."""
718 718 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
719 719 pushback = (pushop.trmanager
720 720 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
721 721
722 722 # create reply capability
723 723 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
724 724 allowpushback=pushback))
725 725 bundler.newpart('replycaps', data=capsblob)
726 726 replyhandlers = []
727 727 for partgenname in b2partsgenorder:
728 728 partgen = b2partsgenmapping[partgenname]
729 729 ret = partgen(pushop, bundler)
730 730 if callable(ret):
731 731 replyhandlers.append(ret)
732 732 # do not push if nothing to push
733 733 if bundler.nbparts <= 1:
734 734 return
735 735 stream = util.chunkbuffer(bundler.getchunks())
736 736 try:
737 737 try:
738 738 reply = pushop.remote.unbundle(stream, ['force'], 'push')
739 739 except error.BundleValueError as exc:
740 740 raise error.Abort('missing support for %s' % exc)
741 741 try:
742 742 trgetter = None
743 743 if pushback:
744 744 trgetter = pushop.trmanager.transaction
745 745 op = bundle2.processbundle(pushop.repo, reply, trgetter)
746 746 except error.BundleValueError as exc:
747 747 raise error.Abort('missing support for %s' % exc)
748 748 except error.PushkeyFailed as exc:
749 749 partid = int(exc.partid)
750 750 if partid not in pushop.pkfailcb:
751 751 raise
752 752 pushop.pkfailcb[partid](pushop, exc)
753 753 for rephand in replyhandlers:
754 754 rephand(op)
755 755
756 756 def _pushchangeset(pushop):
757 757 """Make the actual push of changeset bundle to remote repo"""
758 758 if 'changesets' in pushop.stepsdone:
759 759 return
760 760 pushop.stepsdone.add('changesets')
761 761 if not _pushcheckoutgoing(pushop):
762 762 return
763 763 pushop.repo.prepushoutgoinghooks(pushop.repo,
764 764 pushop.remote,
765 765 pushop.outgoing)
766 766 outgoing = pushop.outgoing
767 767 unbundle = pushop.remote.capable('unbundle')
768 768 # TODO: get bundlecaps from remote
769 769 bundlecaps = None
770 770 # create a changegroup from local
771 771 if pushop.revs is None and not (outgoing.excluded
772 772 or pushop.repo.changelog.filteredrevs):
773 773 # push everything,
774 774 # use the fast path, no race possible on push
775 775 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
776 776 cg = changegroup.getsubset(pushop.repo,
777 777 outgoing,
778 778 bundler,
779 779 'push',
780 780 fastpath=True)
781 781 else:
782 782 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
783 783 bundlecaps)
784 784
785 785 # apply changegroup to remote
786 786 if unbundle:
787 787 # local repo finds heads on server, finds out what
788 788 # revs it must push. once revs transferred, if server
789 789 # finds it has different heads (someone else won
790 790 # commit/push race), server aborts.
791 791 if pushop.force:
792 792 remoteheads = ['force']
793 793 else:
794 794 remoteheads = pushop.remoteheads
795 795 # ssh: return remote's addchangegroup()
796 796 # http: return remote's addchangegroup() or 0 for error
797 797 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
798 798 pushop.repo.url())
799 799 else:
800 800 # we return an integer indicating remote head count
801 801 # change
802 802 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
803 803 pushop.repo.url())
804 804
805 805 def _pushsyncphase(pushop):
806 806 """synchronise phase information locally and remotely"""
807 807 cheads = pushop.commonheads
808 808 # even when we don't push, exchanging phase data is useful
809 809 remotephases = pushop.remote.listkeys('phases')
810 810 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
811 811 and remotephases # server supports phases
812 812 and pushop.cgresult is None # nothing was pushed
813 813 and remotephases.get('publishing', False)):
814 814 # When:
815 815 # - this is a subrepo push
816 816 # - and remote support phase
817 817 # - and no changeset was pushed
818 818 # - and remote is publishing
819 819 # We may be in issue 3871 case!
820 820 # We drop the possible phase synchronisation done by
821 821 # courtesy to publish changesets possibly locally draft
822 822 # on the remote.
823 823 remotephases = {'publishing': 'True'}
824 824 if not remotephases: # old server or public only reply from non-publishing
825 825 _localphasemove(pushop, cheads)
826 826 # don't push any phase data as there is nothing to push
827 827 else:
828 828 ana = phases.analyzeremotephases(pushop.repo, cheads,
829 829 remotephases)
830 830 pheads, droots = ana
831 831 ### Apply remote phase on local
832 832 if remotephases.get('publishing', False):
833 833 _localphasemove(pushop, cheads)
834 834 else: # publish = False
835 835 _localphasemove(pushop, pheads)
836 836 _localphasemove(pushop, cheads, phases.draft)
837 837 ### Apply local phase on remote
838 838
839 839 if pushop.cgresult:
840 840 if 'phases' in pushop.stepsdone:
841 841 # phases already pushed though bundle2
842 842 return
843 843 outdated = pushop.outdatedphases
844 844 else:
845 845 outdated = pushop.fallbackoutdatedphases
846 846
847 847 pushop.stepsdone.add('phases')
848 848
849 849 # filter heads already turned public by the push
850 850 outdated = [c for c in outdated if c.node() not in pheads]
851 851 # fallback to independent pushkey command
852 852 for newremotehead in outdated:
853 853 r = pushop.remote.pushkey('phases',
854 854 newremotehead.hex(),
855 855 str(phases.draft),
856 856 str(phases.public))
857 857 if not r:
858 858 pushop.ui.warn(_('updating %s to public failed!\n')
859 859 % newremotehead)
860 860
861 861 def _localphasemove(pushop, nodes, phase=phases.public):
862 862 """move <nodes> to <phase> in the local source repo"""
863 863 if pushop.trmanager:
864 864 phases.advanceboundary(pushop.repo,
865 865 pushop.trmanager.transaction(),
866 866 phase,
867 867 nodes)
868 868 else:
869 869 # repo is not locked, do not change any phases!
870 870 # Informs the user that phases should have been moved when
871 871 # applicable.
872 872 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
873 873 phasestr = phases.phasenames[phase]
874 874 if actualmoves:
875 875 pushop.ui.status(_('cannot lock source repo, skipping '
876 876 'local %s phase update\n') % phasestr)
877 877
878 878 def _pushobsolete(pushop):
879 879 """utility function to push obsolete markers to a remote"""
880 880 if 'obsmarkers' in pushop.stepsdone:
881 881 return
882 882 repo = pushop.repo
883 883 remote = pushop.remote
884 884 pushop.stepsdone.add('obsmarkers')
885 885 if pushop.outobsmarkers:
886 886 pushop.ui.debug('try to push obsolete markers to remote\n')
887 887 rslts = []
888 888 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
889 889 for key in sorted(remotedata, reverse=True):
890 890 # reverse sort to ensure we end with dump0
891 891 data = remotedata[key]
892 892 rslts.append(remote.pushkey('obsolete', key, '', data))
893 893 if [r for r in rslts if not r]:
894 894 msg = _('failed to push some obsolete markers!\n')
895 895 repo.ui.warn(msg)
896 896
897 897 def _pushbookmark(pushop):
898 898 """Update bookmark position on remote"""
899 899 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
900 900 return
901 901 pushop.stepsdone.add('bookmarks')
902 902 ui = pushop.ui
903 903 remote = pushop.remote
904 904
905 905 for b, old, new in pushop.outbookmarks:
906 906 action = 'update'
907 907 if not old:
908 908 action = 'export'
909 909 elif not new:
910 910 action = 'delete'
911 911 if remote.pushkey('bookmarks', b, old, new):
912 912 ui.status(bookmsgmap[action][0] % b)
913 913 else:
914 914 ui.warn(bookmsgmap[action][1] % b)
915 915 # discovery can have set the value form invalid entry
916 916 if pushop.bkresult is not None:
917 917 pushop.bkresult = 1
918 918
919 919 class pulloperation(object):
920 920 """A object that represent a single pull operation
921 921
922 922 It purpose is to carry pull related state and very common operation.
923 923
924 924 A new should be created at the beginning of each pull and discarded
925 925 afterward.
926 926 """
927 927
928 928 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
929 929 remotebookmarks=None, streamclonerequested=None):
930 930 # repo we pull into
931 931 self.repo = repo
932 932 # repo we pull from
933 933 self.remote = remote
934 934 # revision we try to pull (None is "all")
935 935 self.heads = heads
936 936 # bookmark pulled explicitly
937 937 self.explicitbookmarks = bookmarks
938 938 # do we force pull?
939 939 self.force = force
940 940 # whether a streaming clone was requested
941 941 self.streamclonerequested = streamclonerequested
942 942 # transaction manager
943 943 self.trmanager = None
944 944 # set of common changeset between local and remote before pull
945 945 self.common = None
946 946 # set of pulled head
947 947 self.rheads = None
948 948 # list of missing changeset to fetch remotely
949 949 self.fetch = None
950 950 # remote bookmarks data
951 951 self.remotebookmarks = remotebookmarks
952 952 # result of changegroup pulling (used as return code by pull)
953 953 self.cgresult = None
954 954 # list of step already done
955 955 self.stepsdone = set()
956 956
957 957 @util.propertycache
958 958 def pulledsubset(self):
959 959 """heads of the set of changeset target by the pull"""
960 960 # compute target subset
961 961 if self.heads is None:
962 962 # We pulled every thing possible
963 963 # sync on everything common
964 964 c = set(self.common)
965 965 ret = list(self.common)
966 966 for n in self.rheads:
967 967 if n not in c:
968 968 ret.append(n)
969 969 return ret
970 970 else:
971 971 # We pulled a specific subset
972 972 # sync on this subset
973 973 return self.heads
974 974
975 975 @util.propertycache
976 976 def canusebundle2(self):
977 977 return _canusebundle2(self)
978 978
979 979 @util.propertycache
980 980 def remotebundle2caps(self):
981 981 return bundle2.bundle2caps(self.remote)
982 982
983 983 def gettransaction(self):
984 984 # deprecated; talk to trmanager directly
985 985 return self.trmanager.transaction()
986 986
987 987 class transactionmanager(object):
988 988 """An object to manage the life cycle of a transaction
989 989
990 990 It creates the transaction on demand and calls the appropriate hooks when
991 991 closing the transaction."""
992 992 def __init__(self, repo, source, url):
993 993 self.repo = repo
994 994 self.source = source
995 995 self.url = url
996 996 self._tr = None
997 997
998 998 def transaction(self):
999 999 """Return an open transaction object, constructing if necessary"""
1000 1000 if not self._tr:
1001 1001 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1002 1002 self._tr = self.repo.transaction(trname)
1003 1003 self._tr.hookargs['source'] = self.source
1004 1004 self._tr.hookargs['url'] = self.url
1005 1005 return self._tr
1006 1006
1007 1007 def close(self):
1008 1008 """close transaction if created"""
1009 1009 if self._tr is not None:
1010 1010 self._tr.close()
1011 1011
1012 1012 def release(self):
1013 1013 """release transaction if created"""
1014 1014 if self._tr is not None:
1015 1015 self._tr.release()
1016 1016
1017 1017 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1018 1018 streamclonerequested=None):
1019 1019 """Fetch repository data from a remote.
1020 1020
1021 1021 This is the main function used to retrieve data from a remote repository.
1022 1022
1023 1023 ``repo`` is the local repository to clone into.
1024 1024 ``remote`` is a peer instance.
1025 1025 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1026 1026 default) means to pull everything from the remote.
1027 1027 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1028 1028 default, all remote bookmarks are pulled.
1029 1029 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1030 1030 initialization.
1031 1031 ``streamclonerequested`` is a boolean indicating whether a "streaming
1032 1032 clone" is requested. A "streaming clone" is essentially a raw file copy
1033 1033 of revlogs from the server. This only works when the local repository is
1034 1034 empty. The default value of ``None`` means to respect the server
1035 1035 configuration for preferring stream clones.
1036 1036
1037 1037 Returns the ``pulloperation`` created for this pull.
1038 1038 """
1039 1039 if opargs is None:
1040 1040 opargs = {}
1041 1041 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1042 1042 streamclonerequested=streamclonerequested, **opargs)
1043 1043 if pullop.remote.local():
1044 1044 missing = set(pullop.remote.requirements) - pullop.repo.supported
1045 1045 if missing:
1046 1046 msg = _("required features are not"
1047 1047 " supported in the destination:"
1048 1048 " %s") % (', '.join(sorted(missing)))
1049 1049 raise error.Abort(msg)
1050 1050
1051 1051 lock = pullop.repo.lock()
1052 1052 try:
1053 1053 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1054 1054 streamclone.maybeperformlegacystreamclone(pullop)
1055 1055 # This should ideally be in _pullbundle2(). However, it needs to run
1056 1056 # before discovery to avoid extra work.
1057 1057 _maybeapplyclonebundle(pullop)
1058 1058 _pulldiscovery(pullop)
1059 1059 if pullop.canusebundle2:
1060 1060 _pullbundle2(pullop)
1061 1061 _pullchangeset(pullop)
1062 1062 _pullphase(pullop)
1063 1063 _pullbookmarks(pullop)
1064 1064 _pullobsolete(pullop)
1065 1065 pullop.trmanager.close()
1066 1066 finally:
1067 1067 pullop.trmanager.release()
1068 1068 lock.release()
1069 1069
1070 1070 return pullop
1071 1071
1072 1072 # list of steps to perform discovery before pull
1073 1073 pulldiscoveryorder = []
1074 1074
1075 1075 # Mapping between step name and function
1076 1076 #
1077 1077 # This exists to help extensions wrap steps if necessary
1078 1078 pulldiscoverymapping = {}
1079 1079
1080 1080 def pulldiscovery(stepname):
1081 1081 """decorator for function performing discovery before pull
1082 1082
1083 1083 The function is added to the step -> function mapping and appended to the
1084 1084 list of steps. Beware that decorated function will be added in order (this
1085 1085 may matter).
1086 1086
1087 1087 You can only use this decorator for a new step, if you want to wrap a step
1088 1088 from an extension, change the pulldiscovery dictionary directly."""
1089 1089 def dec(func):
1090 1090 assert stepname not in pulldiscoverymapping
1091 1091 pulldiscoverymapping[stepname] = func
1092 1092 pulldiscoveryorder.append(stepname)
1093 1093 return func
1094 1094 return dec
1095 1095
1096 1096 def _pulldiscovery(pullop):
1097 1097 """Run all discovery steps"""
1098 1098 for stepname in pulldiscoveryorder:
1099 1099 step = pulldiscoverymapping[stepname]
1100 1100 step(pullop)
1101 1101
1102 1102 @pulldiscovery('b1:bookmarks')
1103 1103 def _pullbookmarkbundle1(pullop):
1104 1104 """fetch bookmark data in bundle1 case
1105 1105
1106 1106 If not using bundle2, we have to fetch bookmarks before changeset
1107 1107 discovery to reduce the chance and impact of race conditions."""
1108 1108 if pullop.remotebookmarks is not None:
1109 1109 return
1110 1110 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1111 1111 # all known bundle2 servers now support listkeys, but lets be nice with
1112 1112 # new implementation.
1113 1113 return
1114 1114 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1115 1115
1116 1116
1117 1117 @pulldiscovery('changegroup')
1118 1118 def _pulldiscoverychangegroup(pullop):
1119 1119 """discovery phase for the pull
1120 1120
1121 1121 Current handle changeset discovery only, will change handle all discovery
1122 1122 at some point."""
1123 1123 tmp = discovery.findcommonincoming(pullop.repo,
1124 1124 pullop.remote,
1125 1125 heads=pullop.heads,
1126 1126 force=pullop.force)
1127 1127 common, fetch, rheads = tmp
1128 1128 nm = pullop.repo.unfiltered().changelog.nodemap
1129 1129 if fetch and rheads:
1130 1130 # If a remote heads in filtered locally, lets drop it from the unknown
1131 1131 # remote heads and put in back in common.
1132 1132 #
1133 1133 # This is a hackish solution to catch most of "common but locally
1134 1134 # hidden situation". We do not performs discovery on unfiltered
1135 1135 # repository because it end up doing a pathological amount of round
1136 1136 # trip for w huge amount of changeset we do not care about.
1137 1137 #
1138 1138 # If a set of such "common but filtered" changeset exist on the server
1139 1139 # but are not including a remote heads, we'll not be able to detect it,
1140 1140 scommon = set(common)
1141 1141 filteredrheads = []
1142 1142 for n in rheads:
1143 1143 if n in nm:
1144 1144 if n not in scommon:
1145 1145 common.append(n)
1146 1146 else:
1147 1147 filteredrheads.append(n)
1148 1148 if not filteredrheads:
1149 1149 fetch = []
1150 1150 rheads = filteredrheads
1151 1151 pullop.common = common
1152 1152 pullop.fetch = fetch
1153 1153 pullop.rheads = rheads
1154 1154
1155 1155 def _pullbundle2(pullop):
1156 1156 """pull data using bundle2
1157 1157
1158 1158 For now, the only supported data are changegroup."""
1159 1159 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1160 1160
1161 1161 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1162 1162
1163 1163 # pulling changegroup
1164 1164 pullop.stepsdone.add('changegroup')
1165 1165
1166 1166 kwargs['common'] = pullop.common
1167 1167 kwargs['heads'] = pullop.heads or pullop.rheads
1168 1168 kwargs['cg'] = pullop.fetch
1169 1169 if 'listkeys' in pullop.remotebundle2caps:
1170 1170 kwargs['listkeys'] = ['phase']
1171 1171 if pullop.remotebookmarks is None:
1172 1172 # make sure to always includes bookmark data when migrating
1173 1173 # `hg incoming --bundle` to using this function.
1174 1174 kwargs['listkeys'].append('bookmarks')
1175 1175 if streaming:
1176 1176 pullop.repo.ui.status(_('streaming all changes\n'))
1177 1177 elif not pullop.fetch:
1178 1178 pullop.repo.ui.status(_("no changes found\n"))
1179 1179 pullop.cgresult = 0
1180 1180 else:
1181 1181 if pullop.heads is None and list(pullop.common) == [nullid]:
1182 1182 pullop.repo.ui.status(_("requesting all changes\n"))
1183 1183 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1184 1184 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1185 1185 if obsolete.commonversion(remoteversions) is not None:
1186 1186 kwargs['obsmarkers'] = True
1187 1187 pullop.stepsdone.add('obsmarkers')
1188 1188 _pullbundle2extraprepare(pullop, kwargs)
1189 1189 bundle = pullop.remote.getbundle('pull', **kwargs)
1190 1190 try:
1191 1191 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1192 1192 except error.BundleValueError as exc:
1193 1193 raise error.Abort('missing support for %s' % exc)
1194 1194
1195 1195 if pullop.fetch:
1196 1196 results = [cg['return'] for cg in op.records['changegroup']]
1197 1197 pullop.cgresult = changegroup.combineresults(results)
1198 1198
1199 1199 # processing phases change
1200 1200 for namespace, value in op.records['listkeys']:
1201 1201 if namespace == 'phases':
1202 1202 _pullapplyphases(pullop, value)
1203 1203
1204 1204 # processing bookmark update
1205 1205 for namespace, value in op.records['listkeys']:
1206 1206 if namespace == 'bookmarks':
1207 1207 pullop.remotebookmarks = value
1208 1208
1209 1209 # bookmark data were either already there or pulled in the bundle
1210 1210 if pullop.remotebookmarks is not None:
1211 1211 _pullbookmarks(pullop)
1212 1212
1213 1213 def _pullbundle2extraprepare(pullop, kwargs):
1214 1214 """hook function so that extensions can extend the getbundle call"""
1215 1215 pass
1216 1216
1217 1217 def _pullchangeset(pullop):
1218 1218 """pull changeset from unbundle into the local repo"""
1219 1219 # We delay the open of the transaction as late as possible so we
1220 1220 # don't open transaction for nothing or you break future useful
1221 1221 # rollback call
1222 1222 if 'changegroup' in pullop.stepsdone:
1223 1223 return
1224 1224 pullop.stepsdone.add('changegroup')
1225 1225 if not pullop.fetch:
1226 1226 pullop.repo.ui.status(_("no changes found\n"))
1227 1227 pullop.cgresult = 0
1228 1228 return
1229 1229 pullop.gettransaction()
1230 1230 if pullop.heads is None and list(pullop.common) == [nullid]:
1231 1231 pullop.repo.ui.status(_("requesting all changes\n"))
1232 1232 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1233 1233 # issue1320, avoid a race if remote changed after discovery
1234 1234 pullop.heads = pullop.rheads
1235 1235
1236 1236 if pullop.remote.capable('getbundle'):
1237 1237 # TODO: get bundlecaps from remote
1238 1238 cg = pullop.remote.getbundle('pull', common=pullop.common,
1239 1239 heads=pullop.heads or pullop.rheads)
1240 1240 elif pullop.heads is None:
1241 1241 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1242 1242 elif not pullop.remote.capable('changegroupsubset'):
1243 1243 raise error.Abort(_("partial pull cannot be done because "
1244 1244 "other repository doesn't support "
1245 1245 "changegroupsubset."))
1246 1246 else:
1247 1247 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1248 1248 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1249 1249 pullop.remote.url())
1250 1250
1251 1251 def _pullphase(pullop):
1252 1252 # Get remote phases data from remote
1253 1253 if 'phases' in pullop.stepsdone:
1254 1254 return
1255 1255 remotephases = pullop.remote.listkeys('phases')
1256 1256 _pullapplyphases(pullop, remotephases)
1257 1257
1258 1258 def _pullapplyphases(pullop, remotephases):
1259 1259 """apply phase movement from observed remote state"""
1260 1260 if 'phases' in pullop.stepsdone:
1261 1261 return
1262 1262 pullop.stepsdone.add('phases')
1263 1263 publishing = bool(remotephases.get('publishing', False))
1264 1264 if remotephases and not publishing:
1265 1265 # remote is new and unpublishing
1266 1266 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1267 1267 pullop.pulledsubset,
1268 1268 remotephases)
1269 1269 dheads = pullop.pulledsubset
1270 1270 else:
1271 1271 # Remote is old or publishing all common changesets
1272 1272 # should be seen as public
1273 1273 pheads = pullop.pulledsubset
1274 1274 dheads = []
1275 1275 unfi = pullop.repo.unfiltered()
1276 1276 phase = unfi._phasecache.phase
1277 1277 rev = unfi.changelog.nodemap.get
1278 1278 public = phases.public
1279 1279 draft = phases.draft
1280 1280
1281 1281 # exclude changesets already public locally and update the others
1282 1282 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1283 1283 if pheads:
1284 1284 tr = pullop.gettransaction()
1285 1285 phases.advanceboundary(pullop.repo, tr, public, pheads)
1286 1286
1287 1287 # exclude changesets already draft locally and update the others
1288 1288 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1289 1289 if dheads:
1290 1290 tr = pullop.gettransaction()
1291 1291 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1292 1292
1293 1293 def _pullbookmarks(pullop):
1294 1294 """process the remote bookmark information to update the local one"""
1295 1295 if 'bookmarks' in pullop.stepsdone:
1296 1296 return
1297 1297 pullop.stepsdone.add('bookmarks')
1298 1298 repo = pullop.repo
1299 1299 remotebookmarks = pullop.remotebookmarks
1300 1300 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1301 1301 pullop.remote.url(),
1302 1302 pullop.gettransaction,
1303 1303 explicit=pullop.explicitbookmarks)
1304 1304
1305 1305 def _pullobsolete(pullop):
1306 1306 """utility function to pull obsolete markers from a remote
1307 1307
1308 1308 The `gettransaction` is function that return the pull transaction, creating
1309 1309 one if necessary. We return the transaction to inform the calling code that
1310 1310 a new transaction have been created (when applicable).
1311 1311
1312 1312 Exists mostly to allow overriding for experimentation purpose"""
1313 1313 if 'obsmarkers' in pullop.stepsdone:
1314 1314 return
1315 1315 pullop.stepsdone.add('obsmarkers')
1316 1316 tr = None
1317 1317 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1318 1318 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1319 1319 remoteobs = pullop.remote.listkeys('obsolete')
1320 1320 if 'dump0' in remoteobs:
1321 1321 tr = pullop.gettransaction()
1322 1322 for key in sorted(remoteobs, reverse=True):
1323 1323 if key.startswith('dump'):
1324 1324 data = base85.b85decode(remoteobs[key])
1325 1325 pullop.repo.obsstore.mergemarkers(tr, data)
1326 1326 pullop.repo.invalidatevolatilesets()
1327 1327 return tr
1328 1328
1329 1329 def caps20to10(repo):
1330 1330 """return a set with appropriate options to use bundle20 during getbundle"""
1331 1331 caps = set(['HG20'])
1332 1332 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1333 1333 caps.add('bundle2=' + urllib.quote(capsblob))
1334 1334 return caps
1335 1335
1336 1336 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1337 1337 getbundle2partsorder = []
1338 1338
1339 1339 # Mapping between step name and function
1340 1340 #
1341 1341 # This exists to help extensions wrap steps if necessary
1342 1342 getbundle2partsmapping = {}
1343 1343
1344 1344 def getbundle2partsgenerator(stepname, idx=None):
1345 1345 """decorator for function generating bundle2 part for getbundle
1346 1346
1347 1347 The function is added to the step -> function mapping and appended to the
1348 1348 list of steps. Beware that decorated functions will be added in order
1349 1349 (this may matter).
1350 1350
1351 1351 You can only use this decorator for new steps, if you want to wrap a step
1352 1352 from an extension, attack the getbundle2partsmapping dictionary directly."""
1353 1353 def dec(func):
1354 1354 assert stepname not in getbundle2partsmapping
1355 1355 getbundle2partsmapping[stepname] = func
1356 1356 if idx is None:
1357 1357 getbundle2partsorder.append(stepname)
1358 1358 else:
1359 1359 getbundle2partsorder.insert(idx, stepname)
1360 1360 return func
1361 1361 return dec
1362 1362
1363 1363 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1364 1364 **kwargs):
1365 1365 """return a full bundle (with potentially multiple kind of parts)
1366 1366
1367 1367 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1368 1368 passed. For now, the bundle can contain only changegroup, but this will
1369 1369 changes when more part type will be available for bundle2.
1370 1370
1371 1371 This is different from changegroup.getchangegroup that only returns an HG10
1372 1372 changegroup bundle. They may eventually get reunited in the future when we
1373 1373 have a clearer idea of the API we what to query different data.
1374 1374
1375 1375 The implementation is at a very early stage and will get massive rework
1376 1376 when the API of bundle is refined.
1377 1377 """
1378 1378 # bundle10 case
1379 1379 usebundle2 = False
1380 1380 if bundlecaps is not None:
1381 1381 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1382 1382 if not usebundle2:
1383 1383 if bundlecaps and not kwargs.get('cg', True):
1384 1384 raise ValueError(_('request for bundle10 must include changegroup'))
1385 1385
1386 1386 if kwargs:
1387 1387 raise ValueError(_('unsupported getbundle arguments: %s')
1388 1388 % ', '.join(sorted(kwargs.keys())))
1389 1389 return changegroup.getchangegroup(repo, source, heads=heads,
1390 1390 common=common, bundlecaps=bundlecaps)
1391 1391
1392 1392 # bundle20 case
1393 1393 b2caps = {}
1394 1394 for bcaps in bundlecaps:
1395 1395 if bcaps.startswith('bundle2='):
1396 1396 blob = urllib.unquote(bcaps[len('bundle2='):])
1397 1397 b2caps.update(bundle2.decodecaps(blob))
1398 1398 bundler = bundle2.bundle20(repo.ui, b2caps)
1399 1399
1400 1400 kwargs['heads'] = heads
1401 1401 kwargs['common'] = common
1402 1402
1403 1403 for name in getbundle2partsorder:
1404 1404 func = getbundle2partsmapping[name]
1405 1405 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1406 1406 **kwargs)
1407 1407
1408 1408 return util.chunkbuffer(bundler.getchunks())
1409 1409
1410 1410 @getbundle2partsgenerator('changegroup')
1411 1411 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1412 1412 b2caps=None, heads=None, common=None, **kwargs):
1413 1413 """add a changegroup part to the requested bundle"""
1414 1414 cg = None
1415 1415 if kwargs.get('cg', True):
1416 1416 # build changegroup bundle here.
1417 1417 version = None
1418 1418 cgversions = b2caps.get('changegroup')
1419 1419 getcgkwargs = {}
1420 1420 if cgversions: # 3.1 and 3.2 ship with an empty value
1421 1421 cgversions = [v for v in cgversions if v in changegroup.packermap]
1422 1422 if not cgversions:
1423 1423 raise ValueError(_('no common changegroup version'))
1424 1424 version = getcgkwargs['version'] = max(cgversions)
1425 1425 outgoing = changegroup.computeoutgoing(repo, heads, common)
1426 1426 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1427 1427 bundlecaps=bundlecaps,
1428 1428 **getcgkwargs)
1429 1429
1430 1430 if cg:
1431 1431 part = bundler.newpart('changegroup', data=cg)
1432 1432 if version is not None:
1433 1433 part.addparam('version', version)
1434 1434 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1435 1435
1436 1436 @getbundle2partsgenerator('listkeys')
1437 1437 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1438 1438 b2caps=None, **kwargs):
1439 1439 """add parts containing listkeys namespaces to the requested bundle"""
1440 1440 listkeys = kwargs.get('listkeys', ())
1441 1441 for namespace in listkeys:
1442 1442 part = bundler.newpart('listkeys')
1443 1443 part.addparam('namespace', namespace)
1444 1444 keys = repo.listkeys(namespace).items()
1445 1445 part.data = pushkey.encodekeys(keys)
1446 1446
1447 1447 @getbundle2partsgenerator('obsmarkers')
1448 1448 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1449 1449 b2caps=None, heads=None, **kwargs):
1450 1450 """add an obsolescence markers part to the requested bundle"""
1451 1451 if kwargs.get('obsmarkers', False):
1452 1452 if heads is None:
1453 1453 heads = repo.heads()
1454 1454 subset = [c.node() for c in repo.set('::%ln', heads)]
1455 1455 markers = repo.obsstore.relevantmarkers(subset)
1456 1456 markers = sorted(markers)
1457 1457 buildobsmarkerspart(bundler, markers)
1458 1458
1459 1459 @getbundle2partsgenerator('hgtagsfnodes')
1460 1460 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1461 1461 b2caps=None, heads=None, common=None,
1462 1462 **kwargs):
1463 1463 """Transfer the .hgtags filenodes mapping.
1464 1464
1465 1465 Only values for heads in this bundle will be transferred.
1466 1466
1467 1467 The part data consists of pairs of 20 byte changeset node and .hgtags
1468 1468 filenodes raw values.
1469 1469 """
1470 1470 # Don't send unless:
1471 1471 # - changeset are being exchanged,
1472 1472 # - the client supports it.
1473 1473 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1474 1474 return
1475 1475
1476 1476 outgoing = changegroup.computeoutgoing(repo, heads, common)
1477 1477
1478 1478 if not outgoing.missingheads:
1479 1479 return
1480 1480
1481 1481 cache = tags.hgtagsfnodescache(repo.unfiltered())
1482 1482 chunks = []
1483 1483
1484 1484 # .hgtags fnodes are only relevant for head changesets. While we could
1485 1485 # transfer values for all known nodes, there will likely be little to
1486 1486 # no benefit.
1487 1487 #
1488 1488 # We don't bother using a generator to produce output data because
1489 1489 # a) we only have 40 bytes per head and even esoteric numbers of heads
1490 1490 # consume little memory (1M heads is 40MB) b) we don't want to send the
1491 1491 # part if we don't have entries and knowing if we have entries requires
1492 1492 # cache lookups.
1493 1493 for node in outgoing.missingheads:
1494 1494 # Don't compute missing, as this may slow down serving.
1495 1495 fnode = cache.getfnode(node, computemissing=False)
1496 1496 if fnode is not None:
1497 1497 chunks.extend([node, fnode])
1498 1498
1499 1499 if chunks:
1500 1500 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1501 1501
1502 1502 def check_heads(repo, their_heads, context):
1503 1503 """check if the heads of a repo have been modified
1504 1504
1505 1505 Used by peer for unbundling.
1506 1506 """
1507 1507 heads = repo.heads()
1508 1508 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1509 1509 if not (their_heads == ['force'] or their_heads == heads or
1510 1510 their_heads == ['hashed', heads_hash]):
1511 1511 # someone else committed/pushed/unbundled while we
1512 1512 # were transferring data
1513 1513 raise error.PushRaced('repository changed while %s - '
1514 1514 'please try again' % context)
1515 1515
1516 1516 def unbundle(repo, cg, heads, source, url):
1517 1517 """Apply a bundle to a repo.
1518 1518
1519 1519 this function makes sure the repo is locked during the application and have
1520 1520 mechanism to check that no push race occurred between the creation of the
1521 1521 bundle and its application.
1522 1522
1523 1523 If the push was raced as PushRaced exception is raised."""
1524 1524 r = 0
1525 1525 # need a transaction when processing a bundle2 stream
1526 1526 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1527 1527 lockandtr = [None, None, None]
1528 1528 recordout = None
1529 1529 # quick fix for output mismatch with bundle2 in 3.4
1530 1530 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1531 1531 False)
1532 1532 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1533 1533 captureoutput = True
1534 1534 try:
1535 1535 check_heads(repo, heads, 'uploading changes')
1536 1536 # push can proceed
1537 1537 if util.safehasattr(cg, 'params'):
1538 1538 r = None
1539 1539 try:
1540 1540 def gettransaction():
1541 1541 if not lockandtr[2]:
1542 1542 lockandtr[0] = repo.wlock()
1543 1543 lockandtr[1] = repo.lock()
1544 1544 lockandtr[2] = repo.transaction(source)
1545 1545 lockandtr[2].hookargs['source'] = source
1546 1546 lockandtr[2].hookargs['url'] = url
1547 1547 lockandtr[2].hookargs['bundle2'] = '1'
1548 1548 return lockandtr[2]
1549 1549
1550 1550 # Do greedy locking by default until we're satisfied with lazy
1551 1551 # locking.
1552 1552 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1553 1553 gettransaction()
1554 1554
1555 1555 op = bundle2.bundleoperation(repo, gettransaction,
1556 1556 captureoutput=captureoutput)
1557 1557 try:
1558 1558 op = bundle2.processbundle(repo, cg, op=op)
1559 1559 finally:
1560 1560 r = op.reply
1561 1561 if captureoutput and r is not None:
1562 1562 repo.ui.pushbuffer(error=True, subproc=True)
1563 1563 def recordout(output):
1564 1564 r.newpart('output', data=output, mandatory=False)
1565 1565 if lockandtr[2] is not None:
1566 1566 lockandtr[2].close()
1567 1567 except BaseException as exc:
1568 1568 exc.duringunbundle2 = True
1569 1569 if captureoutput and r is not None:
1570 1570 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1571 1571 def recordout(output):
1572 1572 part = bundle2.bundlepart('output', data=output,
1573 1573 mandatory=False)
1574 1574 parts.append(part)
1575 1575 raise
1576 1576 else:
1577 1577 lockandtr[1] = repo.lock()
1578 1578 r = changegroup.addchangegroup(repo, cg, source, url)
1579 1579 finally:
1580 1580 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1581 1581 if recordout is not None:
1582 1582 recordout(repo.ui.popbuffer())
1583 1583 return r
1584 1584
1585 1585 def _maybeapplyclonebundle(pullop):
1586 1586 """Apply a clone bundle from a remote, if possible."""
1587 1587
1588 1588 repo = pullop.repo
1589 1589 remote = pullop.remote
1590 1590
1591 1591 if not repo.ui.configbool('experimental', 'clonebundles', False):
1592 1592 return
1593 1593
1594 1594 if pullop.heads:
1595 1595 return
1596 1596
1597 1597 if not remote.capable('clonebundles'):
1598 1598 return
1599 1599
1600 1600 res = remote._call('clonebundles')
1601 1601 entries = parseclonebundlesmanifest(res)
1602 1602
1603 1603 # TODO filter entries by supported features.
1604 1604 # TODO sort entries by user preferences.
1605 1605
1606 1606 if not entries:
1607 1607 repo.ui.note(_('no clone bundles available on remote; '
1608 1608 'falling back to regular clone\n'))
1609 1609 return
1610 1610
1611 1611 url = entries[0]['URL']
1612 1612 repo.ui.status(_('applying clone bundle from %s\n') % url)
1613 1613 if trypullbundlefromurl(repo.ui, repo, url):
1614 1614 repo.ui.status(_('finished applying clone bundle\n'))
1615 1615 # Bundle failed.
1616 1616 #
1617 1617 # We abort by default to avoid the thundering herd of
1618 1618 # clients flooding a server that was expecting expensive
1619 1619 # clone load to be offloaded.
1620 1620 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1621 1621 repo.ui.warn(_('falling back to normal clone\n'))
1622 1622 else:
1623 1623 raise error.Abort(_('error applying bundle'),
1624 1624 hint=_('consider contacting the server '
1625 1625 'operator if this error persists'))
1626 1626
1627 1627 def parseclonebundlesmanifest(s):
1628 1628 """Parses the raw text of a clone bundles manifest.
1629 1629
1630 1630 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1631 1631 to the URL and other keys are the attributes for the entry.
1632 1632 """
1633 1633 m = []
1634 1634 for line in s.splitlines():
1635 1635 fields = line.split()
1636 1636 if not fields:
1637 1637 continue
1638 1638 attrs = {'URL': fields[0]}
1639 1639 for rawattr in fields[1:]:
1640 1640 key, value = rawattr.split('=', 1)
1641 1641 attrs[urllib.unquote(key)] = urllib.unquote(value)
1642 1642
1643 1643 m.append(attrs)
1644 1644
1645 1645 return m
1646 1646
1647 1647 def trypullbundlefromurl(ui, repo, url):
1648 1648 """Attempt to apply a bundle from a URL."""
1649 1649 lock = repo.lock()
1650 1650 try:
1651 1651 tr = repo.transaction('bundleurl')
1652 1652 try:
1653 1653 try:
1654 1654 fh = urlmod.open(ui, url)
1655 1655 cg = readbundle(ui, fh, 'stream')
1656 changegroup.addchangegroup(repo, cg, 'clonebundles', url)
1656
1657 if isinstance(cg, bundle2.unbundle20):
1658 bundle2.processbundle(repo, cg, lambda: tr)
1659 else:
1660 changegroup.addchangegroup(repo, cg, 'clonebundles', url)
1657 1661 tr.close()
1658 1662 return True
1659 1663 except urllib2.HTTPError as e:
1660 1664 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1661 1665 except urllib2.URLError as e:
1662 1666 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1663 1667
1664 1668 return False
1665 1669 finally:
1666 1670 tr.release()
1667 1671 finally:
1668 1672 lock.release()
@@ -1,143 +1,143 b''
1 1 Set up a server
2 2
3 3 $ hg init server
4 4 $ cd server
5 5 $ cat >> .hg/hgrc << EOF
6 6 > [extensions]
7 7 > clonebundles =
8 8 > EOF
9 9
10 10 $ touch foo
11 11 $ hg -q commit -A -m 'add foo'
12 12 $ touch bar
13 13 $ hg -q commit -A -m 'add bar'
14 14
15 15 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
16 16 $ cat hg.pid >> $DAEMON_PIDS
17 17 $ cd ..
18 18
19 19 Feature disabled by default
20 20 (client should not request manifest)
21 21
22 22 $ hg clone -U http://localhost:$HGPORT feature-disabled
23 23 requesting all changes
24 24 adding changesets
25 25 adding manifests
26 26 adding file changes
27 27 added 2 changesets with 2 changes to 2 files
28 28
29 29 $ cat server/access.log
30 30 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
31 31 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
32 32 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
33 33 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
34 34
35 35 $ cat >> $HGRCPATH << EOF
36 36 > [experimental]
37 37 > clonebundles = true
38 38 > EOF
39 39
40 40 Missing manifest should not result in server lookup
41 41
42 42 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
43 43 requesting all changes
44 44 adding changesets
45 45 adding manifests
46 46 adding file changes
47 47 added 2 changesets with 2 changes to 2 files
48 48
49 49 $ tail -4 server/access.log
50 50 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
51 51 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
52 52 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
53 53 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
54 54
55 55 Empty manifest file results in retrieval
56 56 (the extension only checks if the manifest file exists)
57 57
58 58 $ touch server/.hg/clonebundles.manifest
59 59 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
60 60 no clone bundles available on remote; falling back to regular clone
61 61 requesting all changes
62 62 adding changesets
63 63 adding manifests
64 64 adding file changes
65 65 added 2 changesets with 2 changes to 2 files
66 66
67 67 Manifest file with invalid URL aborts
68 68
69 69 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
70 70 $ hg clone http://localhost:$HGPORT 404-url
71 71 applying clone bundle from http://does.not.exist/bundle.hg
72 72 error fetching bundle: [Errno -2] Name or service not known
73 73 abort: error applying bundle
74 74 (consider contacting the server operator if this error persists)
75 75 [255]
76 76
77 77 Server is not running aborts
78 78
79 79 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
80 80 $ hg clone http://localhost:$HGPORT server-not-runner
81 81 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
82 82 error fetching bundle: [Errno 111] Connection refused
83 83 abort: error applying bundle
84 84 (consider contacting the server operator if this error persists)
85 85 [255]
86 86
87 87 Server returns 404
88 88
89 89 $ python $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
90 90 $ cat http.pid >> $DAEMON_PIDS
91 91 $ hg clone http://localhost:$HGPORT running-404
92 92 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
93 93 HTTP error fetching bundle: HTTP Error 404: File not found
94 94 abort: error applying bundle
95 95 (consider contacting the server operator if this error persists)
96 96 [255]
97 97
98 98 We can override failure to fall back to regular clone
99 99
100 100 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
101 101 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
102 102 HTTP error fetching bundle: HTTP Error 404: File not found
103 103 falling back to normal clone
104 104 requesting all changes
105 105 adding changesets
106 106 adding manifests
107 107 adding file changes
108 108 added 2 changesets with 2 changes to 2 files
109 109
110 110 Bundle with partial content works
111 111
112 112 $ hg -R server bundle --type gzip --base null -r 53245c60e682 partial.hg
113 113 1 changesets found
114 114
115 115 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
116 116 $ hg clone -U http://localhost:$HGPORT partial-bundle
117 117 applying clone bundle from http://localhost:$HGPORT1/partial.hg
118 118 adding changesets
119 119 adding manifests
120 120 adding file changes
121 121 added 1 changesets with 1 changes to 1 files
122 122 finished applying clone bundle
123 123 searching for changes
124 124 adding changesets
125 125 adding manifests
126 126 adding file changes
127 127 added 1 changesets with 1 changes to 1 files
128 128
129 129 Bundle with full content works
130 130
131 $ hg -R server bundle --type gzip --base null -r tip full.hg
131 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
132 132 2 changesets found
133 133
134 134 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
135 135 $ hg clone -U http://localhost:$HGPORT full-bundle
136 136 applying clone bundle from http://localhost:$HGPORT1/full.hg
137 137 adding changesets
138 138 adding manifests
139 139 adding file changes
140 140 added 2 changesets with 2 changes to 2 files
141 141 finished applying clone bundle
142 142 searching for changes
143 143 no changes found
General Comments 0
You need to be logged in to leave comments. Login now