##// END OF EJS Templates
exchange: use pushop.repo instead of repo
Sean Farley -
r26672:90df14eb default
parent child Browse files
Show More
@@ -1,1773 +1,1773 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib, urllib2
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13 import lock as lockmod
14 14 import streamclone
15 15 import sslutil
16 16 import tags
17 17 import url as urlmod
18 18
19 19 # Maps bundle compression human names to internal representation.
20 20 _bundlespeccompressions = {'none': None,
21 21 'bzip2': 'BZ',
22 22 'gzip': 'GZ',
23 23 }
24 24
25 25 # Maps bundle version human names to changegroup versions.
26 26 _bundlespeccgversions = {'v1': '01',
27 27 'v2': '02',
28 28 'bundle2': '02', #legacy
29 29 }
30 30
31 31 def parsebundlespec(repo, spec, strict=True, externalnames=False):
32 32 """Parse a bundle string specification into parts.
33 33
34 34 Bundle specifications denote a well-defined bundle/exchange format.
35 35 The content of a given specification should not change over time in
36 36 order to ensure that bundles produced by a newer version of Mercurial are
37 37 readable from an older version.
38 38
39 39 The string currently has the form:
40 40
41 41 <compression>-<type>
42 42
43 43 Where <compression> is one of the supported compression formats
44 44 and <type> is (currently) a version string.
45 45
46 46 If ``strict`` is True (the default) <compression> is required. Otherwise,
47 47 it is optional.
48 48
49 49 If ``externalnames`` is False (the default), the human-centric names will
50 50 be converted to their internal representation.
51 51
52 52 Returns a 2-tuple of (compression, version). Compression will be ``None``
53 53 if not in strict mode and a compression isn't defined.
54 54
55 55 An ``InvalidBundleSpecification`` is raised when the specification is
56 56 not syntactically well formed.
57 57
58 58 An ``UnsupportedBundleSpecification`` is raised when the compression or
59 59 bundle type/version is not recognized.
60 60
61 61 Note: this function will likely eventually return a more complex data
62 62 structure, including bundle2 part information.
63 63 """
64 64 if strict and '-' not in spec:
65 65 raise error.InvalidBundleSpecification(
66 66 _('invalid bundle specification; '
67 67 'must be prefixed with compression: %s') % spec)
68 68
69 69 if '-' in spec:
70 70 compression, version = spec.split('-', 1)
71 71
72 72 if compression not in _bundlespeccompressions:
73 73 raise error.UnsupportedBundleSpecification(
74 74 _('%s compression is not supported') % compression)
75 75
76 76 if version not in _bundlespeccgversions:
77 77 raise error.UnsupportedBundleSpecification(
78 78 _('%s is not a recognized bundle version') % version)
79 79 else:
80 80 # Value could be just the compression or just the version, in which
81 81 # case some defaults are assumed (but only when not in strict mode).
82 82 assert not strict
83 83
84 84 if spec in _bundlespeccompressions:
85 85 compression = spec
86 86 version = 'v1'
87 87 if 'generaldelta' in repo.requirements:
88 88 version = 'v2'
89 89 elif spec in _bundlespeccgversions:
90 90 compression = 'bzip2'
91 91 version = spec
92 92 else:
93 93 raise error.UnsupportedBundleSpecification(
94 94 _('%s is not a recognized bundle specification') % spec)
95 95
96 96 if not externalnames:
97 97 compression = _bundlespeccompressions[compression]
98 98 version = _bundlespeccgversions[version]
99 99 return compression, version
100 100
101 101 def readbundle(ui, fh, fname, vfs=None):
102 102 header = changegroup.readexactly(fh, 4)
103 103
104 104 alg = None
105 105 if not fname:
106 106 fname = "stream"
107 107 if not header.startswith('HG') and header.startswith('\0'):
108 108 fh = changegroup.headerlessfixup(fh, header)
109 109 header = "HG10"
110 110 alg = 'UN'
111 111 elif vfs:
112 112 fname = vfs.join(fname)
113 113
114 114 magic, version = header[0:2], header[2:4]
115 115
116 116 if magic != 'HG':
117 117 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
118 118 if version == '10':
119 119 if alg is None:
120 120 alg = changegroup.readexactly(fh, 2)
121 121 return changegroup.cg1unpacker(fh, alg)
122 122 elif version.startswith('2'):
123 123 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
124 124 else:
125 125 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
126 126
127 127 def buildobsmarkerspart(bundler, markers):
128 128 """add an obsmarker part to the bundler with <markers>
129 129
130 130 No part is created if markers is empty.
131 131 Raises ValueError if the bundler doesn't support any known obsmarker format.
132 132 """
133 133 if markers:
134 134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
135 135 version = obsolete.commonversion(remoteversions)
136 136 if version is None:
137 137 raise ValueError('bundler do not support common obsmarker format')
138 138 stream = obsolete.encodemarkers(markers, True, version=version)
139 139 return bundler.newpart('obsmarkers', data=stream)
140 140 return None
141 141
142 142 def _canusebundle2(op):
143 143 """return true if a pull/push can use bundle2
144 144
145 145 Feel free to nuke this function when we drop the experimental option"""
146 146 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
147 147 and op.remote.capable('bundle2'))
148 148
149 149
150 150 class pushoperation(object):
151 151 """A object that represent a single push operation
152 152
153 153 It purpose is to carry push related state and very common operation.
154 154
155 155 A new should be created at the beginning of each push and discarded
156 156 afterward.
157 157 """
158 158
159 159 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
160 160 bookmarks=()):
161 161 # repo we push from
162 162 self.repo = repo
163 163 self.ui = repo.ui
164 164 # repo we push to
165 165 self.remote = remote
166 166 # force option provided
167 167 self.force = force
168 168 # revs to be pushed (None is "all")
169 169 self.revs = revs
170 170 # bookmark explicitly pushed
171 171 self.bookmarks = bookmarks
172 172 # allow push of new branch
173 173 self.newbranch = newbranch
174 174 # did a local lock get acquired?
175 175 self.locallocked = None
176 176 # step already performed
177 177 # (used to check what steps have been already performed through bundle2)
178 178 self.stepsdone = set()
179 179 # Integer version of the changegroup push result
180 180 # - None means nothing to push
181 181 # - 0 means HTTP error
182 182 # - 1 means we pushed and remote head count is unchanged *or*
183 183 # we have outgoing changesets but refused to push
184 184 # - other values as described by addchangegroup()
185 185 self.cgresult = None
186 186 # Boolean value for the bookmark push
187 187 self.bkresult = None
188 188 # discover.outgoing object (contains common and outgoing data)
189 189 self.outgoing = None
190 190 # all remote heads before the push
191 191 self.remoteheads = None
192 192 # testable as a boolean indicating if any nodes are missing locally.
193 193 self.incoming = None
194 194 # phases changes that must be pushed along side the changesets
195 195 self.outdatedphases = None
196 196 # phases changes that must be pushed if changeset push fails
197 197 self.fallbackoutdatedphases = None
198 198 # outgoing obsmarkers
199 199 self.outobsmarkers = set()
200 200 # outgoing bookmarks
201 201 self.outbookmarks = []
202 202 # transaction manager
203 203 self.trmanager = None
204 204 # map { pushkey partid -> callback handling failure}
205 205 # used to handle exception from mandatory pushkey part failure
206 206 self.pkfailcb = {}
207 207
208 208 @util.propertycache
209 209 def futureheads(self):
210 210 """future remote heads if the changeset push succeeds"""
211 211 return self.outgoing.missingheads
212 212
213 213 @util.propertycache
214 214 def fallbackheads(self):
215 215 """future remote heads if the changeset push fails"""
216 216 if self.revs is None:
217 217 # not target to push, all common are relevant
218 218 return self.outgoing.commonheads
219 219 unfi = self.repo.unfiltered()
220 220 # I want cheads = heads(::missingheads and ::commonheads)
221 221 # (missingheads is revs with secret changeset filtered out)
222 222 #
223 223 # This can be expressed as:
224 224 # cheads = ( (missingheads and ::commonheads)
225 225 # + (commonheads and ::missingheads))"
226 226 # )
227 227 #
228 228 # while trying to push we already computed the following:
229 229 # common = (::commonheads)
230 230 # missing = ((commonheads::missingheads) - commonheads)
231 231 #
232 232 # We can pick:
233 233 # * missingheads part of common (::commonheads)
234 234 common = self.outgoing.common
235 235 nm = self.repo.changelog.nodemap
236 236 cheads = [node for node in self.revs if nm[node] in common]
237 237 # and
238 238 # * commonheads parents on missing
239 239 revset = unfi.set('%ln and parents(roots(%ln))',
240 240 self.outgoing.commonheads,
241 241 self.outgoing.missing)
242 242 cheads.extend(c.node() for c in revset)
243 243 return cheads
244 244
245 245 @property
246 246 def commonheads(self):
247 247 """set of all common heads after changeset bundle push"""
248 248 if self.cgresult:
249 249 return self.futureheads
250 250 else:
251 251 return self.fallbackheads
252 252
253 253 # mapping of message used when pushing bookmark
254 254 bookmsgmap = {'update': (_("updating bookmark %s\n"),
255 255 _('updating bookmark %s failed!\n')),
256 256 'export': (_("exporting bookmark %s\n"),
257 257 _('exporting bookmark %s failed!\n')),
258 258 'delete': (_("deleting remote bookmark %s\n"),
259 259 _('deleting remote bookmark %s failed!\n')),
260 260 }
261 261
262 262
263 263 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
264 264 '''Push outgoing changesets (limited by revs) from a local
265 265 repository to remote. Return an integer:
266 266 - None means nothing to push
267 267 - 0 means HTTP error
268 268 - 1 means we pushed and remote head count is unchanged *or*
269 269 we have outgoing changesets but refused to push
270 270 - other values as described by addchangegroup()
271 271 '''
272 272 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
273 273 if pushop.remote.local():
274 274 missing = (set(pushop.repo.requirements)
275 275 - pushop.remote.local().supported)
276 276 if missing:
277 277 msg = _("required features are not"
278 278 " supported in the destination:"
279 279 " %s") % (', '.join(sorted(missing)))
280 280 raise error.Abort(msg)
281 281
282 282 # there are two ways to push to remote repo:
283 283 #
284 284 # addchangegroup assumes local user can lock remote
285 285 # repo (local filesystem, old ssh servers).
286 286 #
287 287 # unbundle assumes local user cannot lock remote repo (new ssh
288 288 # servers, http servers).
289 289
290 290 if not pushop.remote.canpush():
291 291 raise error.Abort(_("destination does not support push"))
292 292 # get local lock as we might write phase data
293 293 localwlock = locallock = None
294 294 try:
295 295 # bundle2 push may receive a reply bundle touching bookmarks or other
296 296 # things requiring the wlock. Take it now to ensure proper ordering.
297 297 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
298 298 if _canusebundle2(pushop) and maypushback:
299 299 localwlock = pushop.repo.wlock()
300 300 locallock = pushop.repo.lock()
301 301 pushop.locallocked = True
302 302 except IOError as err:
303 303 pushop.locallocked = False
304 304 if err.errno != errno.EACCES:
305 305 raise
306 306 # source repo cannot be locked.
307 307 # We do not abort the push, but just disable the local phase
308 308 # synchronisation.
309 309 msg = 'cannot lock source repository: %s\n' % err
310 310 pushop.ui.debug(msg)
311 311 try:
312 312 if pushop.locallocked:
313 pushop.trmanager = transactionmanager(repo,
313 pushop.trmanager = transactionmanager(pushop.repo,
314 314 'push-response',
315 315 pushop.remote.url())
316 316 pushop.repo.checkpush(pushop)
317 317 lock = None
318 318 unbundle = pushop.remote.capable('unbundle')
319 319 if not unbundle:
320 320 lock = pushop.remote.lock()
321 321 try:
322 322 _pushdiscovery(pushop)
323 323 if _canusebundle2(pushop):
324 324 _pushbundle2(pushop)
325 325 _pushchangeset(pushop)
326 326 _pushsyncphase(pushop)
327 327 _pushobsolete(pushop)
328 328 _pushbookmark(pushop)
329 329 finally:
330 330 if lock is not None:
331 331 lock.release()
332 332 if pushop.trmanager:
333 333 pushop.trmanager.close()
334 334 finally:
335 335 if pushop.trmanager:
336 336 pushop.trmanager.release()
337 337 if locallock is not None:
338 338 locallock.release()
339 339 if localwlock is not None:
340 340 localwlock.release()
341 341
342 342 return pushop
343 343
344 344 # list of steps to perform discovery before push
345 345 pushdiscoveryorder = []
346 346
347 347 # Mapping between step name and function
348 348 #
349 349 # This exists to help extensions wrap steps if necessary
350 350 pushdiscoverymapping = {}
351 351
352 352 def pushdiscovery(stepname):
353 353 """decorator for function performing discovery before push
354 354
355 355 The function is added to the step -> function mapping and appended to the
356 356 list of steps. Beware that decorated function will be added in order (this
357 357 may matter).
358 358
359 359 You can only use this decorator for a new step, if you want to wrap a step
360 360 from an extension, change the pushdiscovery dictionary directly."""
361 361 def dec(func):
362 362 assert stepname not in pushdiscoverymapping
363 363 pushdiscoverymapping[stepname] = func
364 364 pushdiscoveryorder.append(stepname)
365 365 return func
366 366 return dec
367 367
368 368 def _pushdiscovery(pushop):
369 369 """Run all discovery steps"""
370 370 for stepname in pushdiscoveryorder:
371 371 step = pushdiscoverymapping[stepname]
372 372 step(pushop)
373 373
374 374 @pushdiscovery('changeset')
375 375 def _pushdiscoverychangeset(pushop):
376 376 """discover the changeset that need to be pushed"""
377 377 fci = discovery.findcommonincoming
378 378 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
379 379 common, inc, remoteheads = commoninc
380 380 fco = discovery.findcommonoutgoing
381 381 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
382 382 commoninc=commoninc, force=pushop.force)
383 383 pushop.outgoing = outgoing
384 384 pushop.remoteheads = remoteheads
385 385 pushop.incoming = inc
386 386
387 387 @pushdiscovery('phase')
388 388 def _pushdiscoveryphase(pushop):
389 389 """discover the phase that needs to be pushed
390 390
391 391 (computed for both success and failure case for changesets push)"""
392 392 outgoing = pushop.outgoing
393 393 unfi = pushop.repo.unfiltered()
394 394 remotephases = pushop.remote.listkeys('phases')
395 395 publishing = remotephases.get('publishing', False)
396 396 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
397 397 and remotephases # server supports phases
398 398 and not pushop.outgoing.missing # no changesets to be pushed
399 399 and publishing):
400 400 # When:
401 401 # - this is a subrepo push
402 402 # - and remote support phase
403 403 # - and no changeset are to be pushed
404 404 # - and remote is publishing
405 405 # We may be in issue 3871 case!
406 406 # We drop the possible phase synchronisation done by
407 407 # courtesy to publish changesets possibly locally draft
408 408 # on the remote.
409 409 remotephases = {'publishing': 'True'}
410 410 ana = phases.analyzeremotephases(pushop.repo,
411 411 pushop.fallbackheads,
412 412 remotephases)
413 413 pheads, droots = ana
414 414 extracond = ''
415 415 if not publishing:
416 416 extracond = ' and public()'
417 417 revset = 'heads((%%ln::%%ln) %s)' % extracond
418 418 # Get the list of all revs draft on remote by public here.
419 419 # XXX Beware that revset break if droots is not strictly
420 420 # XXX root we may want to ensure it is but it is costly
421 421 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
422 422 if not outgoing.missing:
423 423 future = fallback
424 424 else:
425 425 # adds changeset we are going to push as draft
426 426 #
427 427 # should not be necessary for publishing server, but because of an
428 428 # issue fixed in xxxxx we have to do it anyway.
429 429 fdroots = list(unfi.set('roots(%ln + %ln::)',
430 430 outgoing.missing, droots))
431 431 fdroots = [f.node() for f in fdroots]
432 432 future = list(unfi.set(revset, fdroots, pushop.futureheads))
433 433 pushop.outdatedphases = future
434 434 pushop.fallbackoutdatedphases = fallback
435 435
436 436 @pushdiscovery('obsmarker')
437 437 def _pushdiscoveryobsmarkers(pushop):
438 438 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
439 439 and pushop.repo.obsstore
440 440 and 'obsolete' in pushop.remote.listkeys('namespaces')):
441 441 repo = pushop.repo
442 442 # very naive computation, that can be quite expensive on big repo.
443 443 # However: evolution is currently slow on them anyway.
444 444 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
445 445 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
446 446
447 447 @pushdiscovery('bookmarks')
448 448 def _pushdiscoverybookmarks(pushop):
449 449 ui = pushop.ui
450 450 repo = pushop.repo.unfiltered()
451 451 remote = pushop.remote
452 452 ui.debug("checking for updated bookmarks\n")
453 453 ancestors = ()
454 454 if pushop.revs:
455 455 revnums = map(repo.changelog.rev, pushop.revs)
456 456 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
457 457 remotebookmark = remote.listkeys('bookmarks')
458 458
459 459 explicit = set(pushop.bookmarks)
460 460
461 461 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
462 462 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
463 463 for b, scid, dcid in advsrc:
464 464 if b in explicit:
465 465 explicit.remove(b)
466 466 if not ancestors or repo[scid].rev() in ancestors:
467 467 pushop.outbookmarks.append((b, dcid, scid))
468 468 # search added bookmark
469 469 for b, scid, dcid in addsrc:
470 470 if b in explicit:
471 471 explicit.remove(b)
472 472 pushop.outbookmarks.append((b, '', scid))
473 473 # search for overwritten bookmark
474 474 for b, scid, dcid in advdst + diverge + differ:
475 475 if b in explicit:
476 476 explicit.remove(b)
477 477 pushop.outbookmarks.append((b, dcid, scid))
478 478 # search for bookmark to delete
479 479 for b, scid, dcid in adddst:
480 480 if b in explicit:
481 481 explicit.remove(b)
482 482 # treat as "deleted locally"
483 483 pushop.outbookmarks.append((b, dcid, ''))
484 484 # identical bookmarks shouldn't get reported
485 485 for b, scid, dcid in same:
486 486 if b in explicit:
487 487 explicit.remove(b)
488 488
489 489 if explicit:
490 490 explicit = sorted(explicit)
491 491 # we should probably list all of them
492 492 ui.warn(_('bookmark %s does not exist on the local '
493 493 'or remote repository!\n') % explicit[0])
494 494 pushop.bkresult = 2
495 495
496 496 pushop.outbookmarks.sort()
497 497
498 498 def _pushcheckoutgoing(pushop):
499 499 outgoing = pushop.outgoing
500 500 unfi = pushop.repo.unfiltered()
501 501 if not outgoing.missing:
502 502 # nothing to push
503 503 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
504 504 return False
505 505 # something to push
506 506 if not pushop.force:
507 507 # if repo.obsstore == False --> no obsolete
508 508 # then, save the iteration
509 509 if unfi.obsstore:
510 510 # this message are here for 80 char limit reason
511 511 mso = _("push includes obsolete changeset: %s!")
512 512 mst = {"unstable": _("push includes unstable changeset: %s!"),
513 513 "bumped": _("push includes bumped changeset: %s!"),
514 514 "divergent": _("push includes divergent changeset: %s!")}
515 515 # If we are to push if there is at least one
516 516 # obsolete or unstable changeset in missing, at
517 517 # least one of the missinghead will be obsolete or
518 518 # unstable. So checking heads only is ok
519 519 for node in outgoing.missingheads:
520 520 ctx = unfi[node]
521 521 if ctx.obsolete():
522 522 raise error.Abort(mso % ctx)
523 523 elif ctx.troubled():
524 524 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
525 525
526 526 # internal config: bookmarks.pushing
527 527 newbm = pushop.ui.configlist('bookmarks', 'pushing')
528 528 discovery.checkheads(unfi, pushop.remote, outgoing,
529 529 pushop.remoteheads,
530 530 pushop.newbranch,
531 531 bool(pushop.incoming),
532 532 newbm)
533 533 return True
534 534
535 535 # List of names of steps to perform for an outgoing bundle2, order matters.
536 536 b2partsgenorder = []
537 537
538 538 # Mapping between step name and function
539 539 #
540 540 # This exists to help extensions wrap steps if necessary
541 541 b2partsgenmapping = {}
542 542
543 543 def b2partsgenerator(stepname, idx=None):
544 544 """decorator for function generating bundle2 part
545 545
546 546 The function is added to the step -> function mapping and appended to the
547 547 list of steps. Beware that decorated functions will be added in order
548 548 (this may matter).
549 549
550 550 You can only use this decorator for new steps, if you want to wrap a step
551 551 from an extension, attack the b2partsgenmapping dictionary directly."""
552 552 def dec(func):
553 553 assert stepname not in b2partsgenmapping
554 554 b2partsgenmapping[stepname] = func
555 555 if idx is None:
556 556 b2partsgenorder.append(stepname)
557 557 else:
558 558 b2partsgenorder.insert(idx, stepname)
559 559 return func
560 560 return dec
561 561
562 562 def _pushb2ctxcheckheads(pushop, bundler):
563 563 """Generate race condition checking parts
564 564
565 565 Exists as an indepedent function to aid extensions
566 566 """
567 567 if not pushop.force:
568 568 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
569 569
570 570 @b2partsgenerator('changeset')
571 571 def _pushb2ctx(pushop, bundler):
572 572 """handle changegroup push through bundle2
573 573
574 574 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
575 575 """
576 576 if 'changesets' in pushop.stepsdone:
577 577 return
578 578 pushop.stepsdone.add('changesets')
579 579 # Send known heads to the server for race detection.
580 580 if not _pushcheckoutgoing(pushop):
581 581 return
582 582 pushop.repo.prepushoutgoinghooks(pushop.repo,
583 583 pushop.remote,
584 584 pushop.outgoing)
585 585
586 586 _pushb2ctxcheckheads(pushop, bundler)
587 587
588 588 b2caps = bundle2.bundle2caps(pushop.remote)
589 589 version = None
590 590 cgversions = b2caps.get('changegroup')
591 591 if not cgversions: # 3.1 and 3.2 ship with an empty value
592 592 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
593 593 pushop.outgoing)
594 594 else:
595 595 cgversions = [v for v in cgversions if v in changegroup.packermap]
596 596 if not cgversions:
597 597 raise ValueError(_('no common changegroup version'))
598 598 version = max(cgversions)
599 599 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
600 600 pushop.outgoing,
601 601 version=version)
602 602 cgpart = bundler.newpart('changegroup', data=cg)
603 603 if version is not None:
604 604 cgpart.addparam('version', version)
605 605 def handlereply(op):
606 606 """extract addchangegroup returns from server reply"""
607 607 cgreplies = op.records.getreplies(cgpart.id)
608 608 assert len(cgreplies['changegroup']) == 1
609 609 pushop.cgresult = cgreplies['changegroup'][0]['return']
610 610 return handlereply
611 611
612 612 @b2partsgenerator('phase')
613 613 def _pushb2phases(pushop, bundler):
614 614 """handle phase push through bundle2"""
615 615 if 'phases' in pushop.stepsdone:
616 616 return
617 617 b2caps = bundle2.bundle2caps(pushop.remote)
618 618 if not 'pushkey' in b2caps:
619 619 return
620 620 pushop.stepsdone.add('phases')
621 621 part2node = []
622 622
623 623 def handlefailure(pushop, exc):
624 624 targetid = int(exc.partid)
625 625 for partid, node in part2node:
626 626 if partid == targetid:
627 627 raise error.Abort(_('updating %s to public failed') % node)
628 628
629 629 enc = pushkey.encode
630 630 for newremotehead in pushop.outdatedphases:
631 631 part = bundler.newpart('pushkey')
632 632 part.addparam('namespace', enc('phases'))
633 633 part.addparam('key', enc(newremotehead.hex()))
634 634 part.addparam('old', enc(str(phases.draft)))
635 635 part.addparam('new', enc(str(phases.public)))
636 636 part2node.append((part.id, newremotehead))
637 637 pushop.pkfailcb[part.id] = handlefailure
638 638
639 639 def handlereply(op):
640 640 for partid, node in part2node:
641 641 partrep = op.records.getreplies(partid)
642 642 results = partrep['pushkey']
643 643 assert len(results) <= 1
644 644 msg = None
645 645 if not results:
646 646 msg = _('server ignored update of %s to public!\n') % node
647 647 elif not int(results[0]['return']):
648 648 msg = _('updating %s to public failed!\n') % node
649 649 if msg is not None:
650 650 pushop.ui.warn(msg)
651 651 return handlereply
652 652
653 653 @b2partsgenerator('obsmarkers')
654 654 def _pushb2obsmarkers(pushop, bundler):
655 655 if 'obsmarkers' in pushop.stepsdone:
656 656 return
657 657 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
658 658 if obsolete.commonversion(remoteversions) is None:
659 659 return
660 660 pushop.stepsdone.add('obsmarkers')
661 661 if pushop.outobsmarkers:
662 662 markers = sorted(pushop.outobsmarkers)
663 663 buildobsmarkerspart(bundler, markers)
664 664
665 665 @b2partsgenerator('bookmarks')
666 666 def _pushb2bookmarks(pushop, bundler):
667 667 """handle bookmark push through bundle2"""
668 668 if 'bookmarks' in pushop.stepsdone:
669 669 return
670 670 b2caps = bundle2.bundle2caps(pushop.remote)
671 671 if 'pushkey' not in b2caps:
672 672 return
673 673 pushop.stepsdone.add('bookmarks')
674 674 part2book = []
675 675 enc = pushkey.encode
676 676
677 677 def handlefailure(pushop, exc):
678 678 targetid = int(exc.partid)
679 679 for partid, book, action in part2book:
680 680 if partid == targetid:
681 681 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
682 682 # we should not be called for part we did not generated
683 683 assert False
684 684
685 685 for book, old, new in pushop.outbookmarks:
686 686 part = bundler.newpart('pushkey')
687 687 part.addparam('namespace', enc('bookmarks'))
688 688 part.addparam('key', enc(book))
689 689 part.addparam('old', enc(old))
690 690 part.addparam('new', enc(new))
691 691 action = 'update'
692 692 if not old:
693 693 action = 'export'
694 694 elif not new:
695 695 action = 'delete'
696 696 part2book.append((part.id, book, action))
697 697 pushop.pkfailcb[part.id] = handlefailure
698 698
699 699 def handlereply(op):
700 700 ui = pushop.ui
701 701 for partid, book, action in part2book:
702 702 partrep = op.records.getreplies(partid)
703 703 results = partrep['pushkey']
704 704 assert len(results) <= 1
705 705 if not results:
706 706 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
707 707 else:
708 708 ret = int(results[0]['return'])
709 709 if ret:
710 710 ui.status(bookmsgmap[action][0] % book)
711 711 else:
712 712 ui.warn(bookmsgmap[action][1] % book)
713 713 if pushop.bkresult is not None:
714 714 pushop.bkresult = 1
715 715 return handlereply
716 716
717 717
718 718 def _pushbundle2(pushop):
719 719 """push data to the remote using bundle2
720 720
721 721 The only currently supported type of data is changegroup but this will
722 722 evolve in the future."""
723 723 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
724 724 pushback = (pushop.trmanager
725 725 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
726 726
727 727 # create reply capability
728 728 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
729 729 allowpushback=pushback))
730 730 bundler.newpart('replycaps', data=capsblob)
731 731 replyhandlers = []
732 732 for partgenname in b2partsgenorder:
733 733 partgen = b2partsgenmapping[partgenname]
734 734 ret = partgen(pushop, bundler)
735 735 if callable(ret):
736 736 replyhandlers.append(ret)
737 737 # do not push if nothing to push
738 738 if bundler.nbparts <= 1:
739 739 return
740 740 stream = util.chunkbuffer(bundler.getchunks())
741 741 try:
742 742 try:
743 743 reply = pushop.remote.unbundle(stream, ['force'], 'push')
744 744 except error.BundleValueError as exc:
745 745 raise error.Abort('missing support for %s' % exc)
746 746 try:
747 747 trgetter = None
748 748 if pushback:
749 749 trgetter = pushop.trmanager.transaction
750 750 op = bundle2.processbundle(pushop.repo, reply, trgetter)
751 751 except error.BundleValueError as exc:
752 752 raise error.Abort('missing support for %s' % exc)
753 753 except error.PushkeyFailed as exc:
754 754 partid = int(exc.partid)
755 755 if partid not in pushop.pkfailcb:
756 756 raise
757 757 pushop.pkfailcb[partid](pushop, exc)
758 758 for rephand in replyhandlers:
759 759 rephand(op)
760 760
761 761 def _pushchangeset(pushop):
762 762 """Make the actual push of changeset bundle to remote repo"""
763 763 if 'changesets' in pushop.stepsdone:
764 764 return
765 765 pushop.stepsdone.add('changesets')
766 766 if not _pushcheckoutgoing(pushop):
767 767 return
768 768 pushop.repo.prepushoutgoinghooks(pushop.repo,
769 769 pushop.remote,
770 770 pushop.outgoing)
771 771 outgoing = pushop.outgoing
772 772 unbundle = pushop.remote.capable('unbundle')
773 773 # TODO: get bundlecaps from remote
774 774 bundlecaps = None
775 775 # create a changegroup from local
776 776 if pushop.revs is None and not (outgoing.excluded
777 777 or pushop.repo.changelog.filteredrevs):
778 778 # push everything,
779 779 # use the fast path, no race possible on push
780 780 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
781 781 cg = changegroup.getsubset(pushop.repo,
782 782 outgoing,
783 783 bundler,
784 784 'push',
785 785 fastpath=True)
786 786 else:
787 787 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
788 788 bundlecaps)
789 789
790 790 # apply changegroup to remote
791 791 if unbundle:
792 792 # local repo finds heads on server, finds out what
793 793 # revs it must push. once revs transferred, if server
794 794 # finds it has different heads (someone else won
795 795 # commit/push race), server aborts.
796 796 if pushop.force:
797 797 remoteheads = ['force']
798 798 else:
799 799 remoteheads = pushop.remoteheads
800 800 # ssh: return remote's addchangegroup()
801 801 # http: return remote's addchangegroup() or 0 for error
802 802 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
803 803 pushop.repo.url())
804 804 else:
805 805 # we return an integer indicating remote head count
806 806 # change
807 807 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
808 808 pushop.repo.url())
809 809
810 810 def _pushsyncphase(pushop):
811 811 """synchronise phase information locally and remotely"""
812 812 cheads = pushop.commonheads
813 813 # even when we don't push, exchanging phase data is useful
814 814 remotephases = pushop.remote.listkeys('phases')
815 815 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
816 816 and remotephases # server supports phases
817 817 and pushop.cgresult is None # nothing was pushed
818 818 and remotephases.get('publishing', False)):
819 819 # When:
820 820 # - this is a subrepo push
821 821 # - and remote support phase
822 822 # - and no changeset was pushed
823 823 # - and remote is publishing
824 824 # We may be in issue 3871 case!
825 825 # We drop the possible phase synchronisation done by
826 826 # courtesy to publish changesets possibly locally draft
827 827 # on the remote.
828 828 remotephases = {'publishing': 'True'}
829 829 if not remotephases: # old server or public only reply from non-publishing
830 830 _localphasemove(pushop, cheads)
831 831 # don't push any phase data as there is nothing to push
832 832 else:
833 833 ana = phases.analyzeremotephases(pushop.repo, cheads,
834 834 remotephases)
835 835 pheads, droots = ana
836 836 ### Apply remote phase on local
837 837 if remotephases.get('publishing', False):
838 838 _localphasemove(pushop, cheads)
839 839 else: # publish = False
840 840 _localphasemove(pushop, pheads)
841 841 _localphasemove(pushop, cheads, phases.draft)
842 842 ### Apply local phase on remote
843 843
844 844 if pushop.cgresult:
845 845 if 'phases' in pushop.stepsdone:
846 846 # phases already pushed though bundle2
847 847 return
848 848 outdated = pushop.outdatedphases
849 849 else:
850 850 outdated = pushop.fallbackoutdatedphases
851 851
852 852 pushop.stepsdone.add('phases')
853 853
854 854 # filter heads already turned public by the push
855 855 outdated = [c for c in outdated if c.node() not in pheads]
856 856 # fallback to independent pushkey command
857 857 for newremotehead in outdated:
858 858 r = pushop.remote.pushkey('phases',
859 859 newremotehead.hex(),
860 860 str(phases.draft),
861 861 str(phases.public))
862 862 if not r:
863 863 pushop.ui.warn(_('updating %s to public failed!\n')
864 864 % newremotehead)
865 865
866 866 def _localphasemove(pushop, nodes, phase=phases.public):
867 867 """move <nodes> to <phase> in the local source repo"""
868 868 if pushop.trmanager:
869 869 phases.advanceboundary(pushop.repo,
870 870 pushop.trmanager.transaction(),
871 871 phase,
872 872 nodes)
873 873 else:
874 874 # repo is not locked, do not change any phases!
875 875 # Informs the user that phases should have been moved when
876 876 # applicable.
877 877 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
878 878 phasestr = phases.phasenames[phase]
879 879 if actualmoves:
880 880 pushop.ui.status(_('cannot lock source repo, skipping '
881 881 'local %s phase update\n') % phasestr)
882 882
883 883 def _pushobsolete(pushop):
884 884 """utility function to push obsolete markers to a remote"""
885 885 if 'obsmarkers' in pushop.stepsdone:
886 886 return
887 887 repo = pushop.repo
888 888 remote = pushop.remote
889 889 pushop.stepsdone.add('obsmarkers')
890 890 if pushop.outobsmarkers:
891 891 pushop.ui.debug('try to push obsolete markers to remote\n')
892 892 rslts = []
893 893 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
894 894 for key in sorted(remotedata, reverse=True):
895 895 # reverse sort to ensure we end with dump0
896 896 data = remotedata[key]
897 897 rslts.append(remote.pushkey('obsolete', key, '', data))
898 898 if [r for r in rslts if not r]:
899 899 msg = _('failed to push some obsolete markers!\n')
900 900 repo.ui.warn(msg)
901 901
902 902 def _pushbookmark(pushop):
903 903 """Update bookmark position on remote"""
904 904 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
905 905 return
906 906 pushop.stepsdone.add('bookmarks')
907 907 ui = pushop.ui
908 908 remote = pushop.remote
909 909
910 910 for b, old, new in pushop.outbookmarks:
911 911 action = 'update'
912 912 if not old:
913 913 action = 'export'
914 914 elif not new:
915 915 action = 'delete'
916 916 if remote.pushkey('bookmarks', b, old, new):
917 917 ui.status(bookmsgmap[action][0] % b)
918 918 else:
919 919 ui.warn(bookmsgmap[action][1] % b)
920 920 # discovery can have set the value form invalid entry
921 921 if pushop.bkresult is not None:
922 922 pushop.bkresult = 1
923 923
924 924 class pulloperation(object):
925 925 """A object that represent a single pull operation
926 926
927 927 It purpose is to carry pull related state and very common operation.
928 928
929 929 A new should be created at the beginning of each pull and discarded
930 930 afterward.
931 931 """
932 932
933 933 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
934 934 remotebookmarks=None, streamclonerequested=None):
935 935 # repo we pull into
936 936 self.repo = repo
937 937 # repo we pull from
938 938 self.remote = remote
939 939 # revision we try to pull (None is "all")
940 940 self.heads = heads
941 941 # bookmark pulled explicitly
942 942 self.explicitbookmarks = bookmarks
943 943 # do we force pull?
944 944 self.force = force
945 945 # whether a streaming clone was requested
946 946 self.streamclonerequested = streamclonerequested
947 947 # transaction manager
948 948 self.trmanager = None
949 949 # set of common changeset between local and remote before pull
950 950 self.common = None
951 951 # set of pulled head
952 952 self.rheads = None
953 953 # list of missing changeset to fetch remotely
954 954 self.fetch = None
955 955 # remote bookmarks data
956 956 self.remotebookmarks = remotebookmarks
957 957 # result of changegroup pulling (used as return code by pull)
958 958 self.cgresult = None
959 959 # list of step already done
960 960 self.stepsdone = set()
961 961
962 962 @util.propertycache
963 963 def pulledsubset(self):
964 964 """heads of the set of changeset target by the pull"""
965 965 # compute target subset
966 966 if self.heads is None:
967 967 # We pulled every thing possible
968 968 # sync on everything common
969 969 c = set(self.common)
970 970 ret = list(self.common)
971 971 for n in self.rheads:
972 972 if n not in c:
973 973 ret.append(n)
974 974 return ret
975 975 else:
976 976 # We pulled a specific subset
977 977 # sync on this subset
978 978 return self.heads
979 979
980 980 @util.propertycache
981 981 def canusebundle2(self):
982 982 return _canusebundle2(self)
983 983
984 984 @util.propertycache
985 985 def remotebundle2caps(self):
986 986 return bundle2.bundle2caps(self.remote)
987 987
988 988 def gettransaction(self):
989 989 # deprecated; talk to trmanager directly
990 990 return self.trmanager.transaction()
991 991
992 992 class transactionmanager(object):
993 993 """An object to manage the life cycle of a transaction
994 994
995 995 It creates the transaction on demand and calls the appropriate hooks when
996 996 closing the transaction."""
997 997 def __init__(self, repo, source, url):
998 998 self.repo = repo
999 999 self.source = source
1000 1000 self.url = url
1001 1001 self._tr = None
1002 1002
1003 1003 def transaction(self):
1004 1004 """Return an open transaction object, constructing if necessary"""
1005 1005 if not self._tr:
1006 1006 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1007 1007 self._tr = self.repo.transaction(trname)
1008 1008 self._tr.hookargs['source'] = self.source
1009 1009 self._tr.hookargs['url'] = self.url
1010 1010 return self._tr
1011 1011
1012 1012 def close(self):
1013 1013 """close transaction if created"""
1014 1014 if self._tr is not None:
1015 1015 self._tr.close()
1016 1016
1017 1017 def release(self):
1018 1018 """release transaction if created"""
1019 1019 if self._tr is not None:
1020 1020 self._tr.release()
1021 1021
1022 1022 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1023 1023 streamclonerequested=None):
1024 1024 """Fetch repository data from a remote.
1025 1025
1026 1026 This is the main function used to retrieve data from a remote repository.
1027 1027
1028 1028 ``repo`` is the local repository to clone into.
1029 1029 ``remote`` is a peer instance.
1030 1030 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1031 1031 default) means to pull everything from the remote.
1032 1032 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1033 1033 default, all remote bookmarks are pulled.
1034 1034 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1035 1035 initialization.
1036 1036 ``streamclonerequested`` is a boolean indicating whether a "streaming
1037 1037 clone" is requested. A "streaming clone" is essentially a raw file copy
1038 1038 of revlogs from the server. This only works when the local repository is
1039 1039 empty. The default value of ``None`` means to respect the server
1040 1040 configuration for preferring stream clones.
1041 1041
1042 1042 Returns the ``pulloperation`` created for this pull.
1043 1043 """
1044 1044 if opargs is None:
1045 1045 opargs = {}
1046 1046 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1047 1047 streamclonerequested=streamclonerequested, **opargs)
1048 1048 if pullop.remote.local():
1049 1049 missing = set(pullop.remote.requirements) - pullop.repo.supported
1050 1050 if missing:
1051 1051 msg = _("required features are not"
1052 1052 " supported in the destination:"
1053 1053 " %s") % (', '.join(sorted(missing)))
1054 1054 raise error.Abort(msg)
1055 1055
1056 1056 lock = pullop.repo.lock()
1057 1057 try:
1058 1058 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1059 1059 streamclone.maybeperformlegacystreamclone(pullop)
1060 1060 # This should ideally be in _pullbundle2(). However, it needs to run
1061 1061 # before discovery to avoid extra work.
1062 1062 _maybeapplyclonebundle(pullop)
1063 1063 _pulldiscovery(pullop)
1064 1064 if pullop.canusebundle2:
1065 1065 _pullbundle2(pullop)
1066 1066 _pullchangeset(pullop)
1067 1067 _pullphase(pullop)
1068 1068 _pullbookmarks(pullop)
1069 1069 _pullobsolete(pullop)
1070 1070 pullop.trmanager.close()
1071 1071 finally:
1072 1072 pullop.trmanager.release()
1073 1073 lock.release()
1074 1074
1075 1075 return pullop
1076 1076
1077 1077 # list of steps to perform discovery before pull
1078 1078 pulldiscoveryorder = []
1079 1079
1080 1080 # Mapping between step name and function
1081 1081 #
1082 1082 # This exists to help extensions wrap steps if necessary
1083 1083 pulldiscoverymapping = {}
1084 1084
1085 1085 def pulldiscovery(stepname):
1086 1086 """decorator for function performing discovery before pull
1087 1087
1088 1088 The function is added to the step -> function mapping and appended to the
1089 1089 list of steps. Beware that decorated function will be added in order (this
1090 1090 may matter).
1091 1091
1092 1092 You can only use this decorator for a new step, if you want to wrap a step
1093 1093 from an extension, change the pulldiscovery dictionary directly."""
1094 1094 def dec(func):
1095 1095 assert stepname not in pulldiscoverymapping
1096 1096 pulldiscoverymapping[stepname] = func
1097 1097 pulldiscoveryorder.append(stepname)
1098 1098 return func
1099 1099 return dec
1100 1100
1101 1101 def _pulldiscovery(pullop):
1102 1102 """Run all discovery steps"""
1103 1103 for stepname in pulldiscoveryorder:
1104 1104 step = pulldiscoverymapping[stepname]
1105 1105 step(pullop)
1106 1106
1107 1107 @pulldiscovery('b1:bookmarks')
1108 1108 def _pullbookmarkbundle1(pullop):
1109 1109 """fetch bookmark data in bundle1 case
1110 1110
1111 1111 If not using bundle2, we have to fetch bookmarks before changeset
1112 1112 discovery to reduce the chance and impact of race conditions."""
1113 1113 if pullop.remotebookmarks is not None:
1114 1114 return
1115 1115 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1116 1116 # all known bundle2 servers now support listkeys, but lets be nice with
1117 1117 # new implementation.
1118 1118 return
1119 1119 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1120 1120
1121 1121
1122 1122 @pulldiscovery('changegroup')
1123 1123 def _pulldiscoverychangegroup(pullop):
1124 1124 """discovery phase for the pull
1125 1125
1126 1126 Current handle changeset discovery only, will change handle all discovery
1127 1127 at some point."""
1128 1128 tmp = discovery.findcommonincoming(pullop.repo,
1129 1129 pullop.remote,
1130 1130 heads=pullop.heads,
1131 1131 force=pullop.force)
1132 1132 common, fetch, rheads = tmp
1133 1133 nm = pullop.repo.unfiltered().changelog.nodemap
1134 1134 if fetch and rheads:
1135 1135 # If a remote heads in filtered locally, lets drop it from the unknown
1136 1136 # remote heads and put in back in common.
1137 1137 #
1138 1138 # This is a hackish solution to catch most of "common but locally
1139 1139 # hidden situation". We do not performs discovery on unfiltered
1140 1140 # repository because it end up doing a pathological amount of round
1141 1141 # trip for w huge amount of changeset we do not care about.
1142 1142 #
1143 1143 # If a set of such "common but filtered" changeset exist on the server
1144 1144 # but are not including a remote heads, we'll not be able to detect it,
1145 1145 scommon = set(common)
1146 1146 filteredrheads = []
1147 1147 for n in rheads:
1148 1148 if n in nm:
1149 1149 if n not in scommon:
1150 1150 common.append(n)
1151 1151 else:
1152 1152 filteredrheads.append(n)
1153 1153 if not filteredrheads:
1154 1154 fetch = []
1155 1155 rheads = filteredrheads
1156 1156 pullop.common = common
1157 1157 pullop.fetch = fetch
1158 1158 pullop.rheads = rheads
1159 1159
1160 1160 def _pullbundle2(pullop):
1161 1161 """pull data using bundle2
1162 1162
1163 1163 For now, the only supported data are changegroup."""
1164 1164 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1165 1165
1166 1166 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1167 1167
1168 1168 # pulling changegroup
1169 1169 pullop.stepsdone.add('changegroup')
1170 1170
1171 1171 kwargs['common'] = pullop.common
1172 1172 kwargs['heads'] = pullop.heads or pullop.rheads
1173 1173 kwargs['cg'] = pullop.fetch
1174 1174 if 'listkeys' in pullop.remotebundle2caps:
1175 1175 kwargs['listkeys'] = ['phase']
1176 1176 if pullop.remotebookmarks is None:
1177 1177 # make sure to always includes bookmark data when migrating
1178 1178 # `hg incoming --bundle` to using this function.
1179 1179 kwargs['listkeys'].append('bookmarks')
1180 1180 if streaming:
1181 1181 pullop.repo.ui.status(_('streaming all changes\n'))
1182 1182 elif not pullop.fetch:
1183 1183 pullop.repo.ui.status(_("no changes found\n"))
1184 1184 pullop.cgresult = 0
1185 1185 else:
1186 1186 if pullop.heads is None and list(pullop.common) == [nullid]:
1187 1187 pullop.repo.ui.status(_("requesting all changes\n"))
1188 1188 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1189 1189 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1190 1190 if obsolete.commonversion(remoteversions) is not None:
1191 1191 kwargs['obsmarkers'] = True
1192 1192 pullop.stepsdone.add('obsmarkers')
1193 1193 _pullbundle2extraprepare(pullop, kwargs)
1194 1194 bundle = pullop.remote.getbundle('pull', **kwargs)
1195 1195 try:
1196 1196 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1197 1197 except error.BundleValueError as exc:
1198 1198 raise error.Abort('missing support for %s' % exc)
1199 1199
1200 1200 if pullop.fetch:
1201 1201 results = [cg['return'] for cg in op.records['changegroup']]
1202 1202 pullop.cgresult = changegroup.combineresults(results)
1203 1203
1204 1204 # processing phases change
1205 1205 for namespace, value in op.records['listkeys']:
1206 1206 if namespace == 'phases':
1207 1207 _pullapplyphases(pullop, value)
1208 1208
1209 1209 # processing bookmark update
1210 1210 for namespace, value in op.records['listkeys']:
1211 1211 if namespace == 'bookmarks':
1212 1212 pullop.remotebookmarks = value
1213 1213
1214 1214 # bookmark data were either already there or pulled in the bundle
1215 1215 if pullop.remotebookmarks is not None:
1216 1216 _pullbookmarks(pullop)
1217 1217
1218 1218 def _pullbundle2extraprepare(pullop, kwargs):
1219 1219 """hook function so that extensions can extend the getbundle call"""
1220 1220 pass
1221 1221
1222 1222 def _pullchangeset(pullop):
1223 1223 """pull changeset from unbundle into the local repo"""
1224 1224 # We delay the open of the transaction as late as possible so we
1225 1225 # don't open transaction for nothing or you break future useful
1226 1226 # rollback call
1227 1227 if 'changegroup' in pullop.stepsdone:
1228 1228 return
1229 1229 pullop.stepsdone.add('changegroup')
1230 1230 if not pullop.fetch:
1231 1231 pullop.repo.ui.status(_("no changes found\n"))
1232 1232 pullop.cgresult = 0
1233 1233 return
1234 1234 pullop.gettransaction()
1235 1235 if pullop.heads is None and list(pullop.common) == [nullid]:
1236 1236 pullop.repo.ui.status(_("requesting all changes\n"))
1237 1237 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1238 1238 # issue1320, avoid a race if remote changed after discovery
1239 1239 pullop.heads = pullop.rheads
1240 1240
1241 1241 if pullop.remote.capable('getbundle'):
1242 1242 # TODO: get bundlecaps from remote
1243 1243 cg = pullop.remote.getbundle('pull', common=pullop.common,
1244 1244 heads=pullop.heads or pullop.rheads)
1245 1245 elif pullop.heads is None:
1246 1246 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1247 1247 elif not pullop.remote.capable('changegroupsubset'):
1248 1248 raise error.Abort(_("partial pull cannot be done because "
1249 1249 "other repository doesn't support "
1250 1250 "changegroupsubset."))
1251 1251 else:
1252 1252 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1253 1253 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1254 1254 pullop.remote.url())
1255 1255
1256 1256 def _pullphase(pullop):
1257 1257 # Get remote phases data from remote
1258 1258 if 'phases' in pullop.stepsdone:
1259 1259 return
1260 1260 remotephases = pullop.remote.listkeys('phases')
1261 1261 _pullapplyphases(pullop, remotephases)
1262 1262
1263 1263 def _pullapplyphases(pullop, remotephases):
1264 1264 """apply phase movement from observed remote state"""
1265 1265 if 'phases' in pullop.stepsdone:
1266 1266 return
1267 1267 pullop.stepsdone.add('phases')
1268 1268 publishing = bool(remotephases.get('publishing', False))
1269 1269 if remotephases and not publishing:
1270 1270 # remote is new and unpublishing
1271 1271 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1272 1272 pullop.pulledsubset,
1273 1273 remotephases)
1274 1274 dheads = pullop.pulledsubset
1275 1275 else:
1276 1276 # Remote is old or publishing all common changesets
1277 1277 # should be seen as public
1278 1278 pheads = pullop.pulledsubset
1279 1279 dheads = []
1280 1280 unfi = pullop.repo.unfiltered()
1281 1281 phase = unfi._phasecache.phase
1282 1282 rev = unfi.changelog.nodemap.get
1283 1283 public = phases.public
1284 1284 draft = phases.draft
1285 1285
1286 1286 # exclude changesets already public locally and update the others
1287 1287 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1288 1288 if pheads:
1289 1289 tr = pullop.gettransaction()
1290 1290 phases.advanceboundary(pullop.repo, tr, public, pheads)
1291 1291
1292 1292 # exclude changesets already draft locally and update the others
1293 1293 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1294 1294 if dheads:
1295 1295 tr = pullop.gettransaction()
1296 1296 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1297 1297
1298 1298 def _pullbookmarks(pullop):
1299 1299 """process the remote bookmark information to update the local one"""
1300 1300 if 'bookmarks' in pullop.stepsdone:
1301 1301 return
1302 1302 pullop.stepsdone.add('bookmarks')
1303 1303 repo = pullop.repo
1304 1304 remotebookmarks = pullop.remotebookmarks
1305 1305 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1306 1306 pullop.remote.url(),
1307 1307 pullop.gettransaction,
1308 1308 explicit=pullop.explicitbookmarks)
1309 1309
1310 1310 def _pullobsolete(pullop):
1311 1311 """utility function to pull obsolete markers from a remote
1312 1312
1313 1313 The `gettransaction` is function that return the pull transaction, creating
1314 1314 one if necessary. We return the transaction to inform the calling code that
1315 1315 a new transaction have been created (when applicable).
1316 1316
1317 1317 Exists mostly to allow overriding for experimentation purpose"""
1318 1318 if 'obsmarkers' in pullop.stepsdone:
1319 1319 return
1320 1320 pullop.stepsdone.add('obsmarkers')
1321 1321 tr = None
1322 1322 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1323 1323 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1324 1324 remoteobs = pullop.remote.listkeys('obsolete')
1325 1325 if 'dump0' in remoteobs:
1326 1326 tr = pullop.gettransaction()
1327 1327 for key in sorted(remoteobs, reverse=True):
1328 1328 if key.startswith('dump'):
1329 1329 data = base85.b85decode(remoteobs[key])
1330 1330 pullop.repo.obsstore.mergemarkers(tr, data)
1331 1331 pullop.repo.invalidatevolatilesets()
1332 1332 return tr
1333 1333
1334 1334 def caps20to10(repo):
1335 1335 """return a set with appropriate options to use bundle20 during getbundle"""
1336 1336 caps = set(['HG20'])
1337 1337 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1338 1338 caps.add('bundle2=' + urllib.quote(capsblob))
1339 1339 return caps
1340 1340
1341 1341 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1342 1342 getbundle2partsorder = []
1343 1343
1344 1344 # Mapping between step name and function
1345 1345 #
1346 1346 # This exists to help extensions wrap steps if necessary
1347 1347 getbundle2partsmapping = {}
1348 1348
1349 1349 def getbundle2partsgenerator(stepname, idx=None):
1350 1350 """decorator for function generating bundle2 part for getbundle
1351 1351
1352 1352 The function is added to the step -> function mapping and appended to the
1353 1353 list of steps. Beware that decorated functions will be added in order
1354 1354 (this may matter).
1355 1355
1356 1356 You can only use this decorator for new steps, if you want to wrap a step
1357 1357 from an extension, attack the getbundle2partsmapping dictionary directly."""
1358 1358 def dec(func):
1359 1359 assert stepname not in getbundle2partsmapping
1360 1360 getbundle2partsmapping[stepname] = func
1361 1361 if idx is None:
1362 1362 getbundle2partsorder.append(stepname)
1363 1363 else:
1364 1364 getbundle2partsorder.insert(idx, stepname)
1365 1365 return func
1366 1366 return dec
1367 1367
1368 1368 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1369 1369 **kwargs):
1370 1370 """return a full bundle (with potentially multiple kind of parts)
1371 1371
1372 1372 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1373 1373 passed. For now, the bundle can contain only changegroup, but this will
1374 1374 changes when more part type will be available for bundle2.
1375 1375
1376 1376 This is different from changegroup.getchangegroup that only returns an HG10
1377 1377 changegroup bundle. They may eventually get reunited in the future when we
1378 1378 have a clearer idea of the API we what to query different data.
1379 1379
1380 1380 The implementation is at a very early stage and will get massive rework
1381 1381 when the API of bundle is refined.
1382 1382 """
1383 1383 # bundle10 case
1384 1384 usebundle2 = False
1385 1385 if bundlecaps is not None:
1386 1386 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1387 1387 if not usebundle2:
1388 1388 if bundlecaps and not kwargs.get('cg', True):
1389 1389 raise ValueError(_('request for bundle10 must include changegroup'))
1390 1390
1391 1391 if kwargs:
1392 1392 raise ValueError(_('unsupported getbundle arguments: %s')
1393 1393 % ', '.join(sorted(kwargs.keys())))
1394 1394 return changegroup.getchangegroup(repo, source, heads=heads,
1395 1395 common=common, bundlecaps=bundlecaps)
1396 1396
1397 1397 # bundle20 case
1398 1398 b2caps = {}
1399 1399 for bcaps in bundlecaps:
1400 1400 if bcaps.startswith('bundle2='):
1401 1401 blob = urllib.unquote(bcaps[len('bundle2='):])
1402 1402 b2caps.update(bundle2.decodecaps(blob))
1403 1403 bundler = bundle2.bundle20(repo.ui, b2caps)
1404 1404
1405 1405 kwargs['heads'] = heads
1406 1406 kwargs['common'] = common
1407 1407
1408 1408 for name in getbundle2partsorder:
1409 1409 func = getbundle2partsmapping[name]
1410 1410 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1411 1411 **kwargs)
1412 1412
1413 1413 return util.chunkbuffer(bundler.getchunks())
1414 1414
1415 1415 @getbundle2partsgenerator('changegroup')
1416 1416 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1417 1417 b2caps=None, heads=None, common=None, **kwargs):
1418 1418 """add a changegroup part to the requested bundle"""
1419 1419 cg = None
1420 1420 if kwargs.get('cg', True):
1421 1421 # build changegroup bundle here.
1422 1422 version = None
1423 1423 cgversions = b2caps.get('changegroup')
1424 1424 getcgkwargs = {}
1425 1425 if cgversions: # 3.1 and 3.2 ship with an empty value
1426 1426 cgversions = [v for v in cgversions if v in changegroup.packermap]
1427 1427 if not cgversions:
1428 1428 raise ValueError(_('no common changegroup version'))
1429 1429 version = getcgkwargs['version'] = max(cgversions)
1430 1430 outgoing = changegroup.computeoutgoing(repo, heads, common)
1431 1431 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1432 1432 bundlecaps=bundlecaps,
1433 1433 **getcgkwargs)
1434 1434
1435 1435 if cg:
1436 1436 part = bundler.newpart('changegroup', data=cg)
1437 1437 if version is not None:
1438 1438 part.addparam('version', version)
1439 1439 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1440 1440
1441 1441 @getbundle2partsgenerator('listkeys')
1442 1442 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1443 1443 b2caps=None, **kwargs):
1444 1444 """add parts containing listkeys namespaces to the requested bundle"""
1445 1445 listkeys = kwargs.get('listkeys', ())
1446 1446 for namespace in listkeys:
1447 1447 part = bundler.newpart('listkeys')
1448 1448 part.addparam('namespace', namespace)
1449 1449 keys = repo.listkeys(namespace).items()
1450 1450 part.data = pushkey.encodekeys(keys)
1451 1451
1452 1452 @getbundle2partsgenerator('obsmarkers')
1453 1453 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1454 1454 b2caps=None, heads=None, **kwargs):
1455 1455 """add an obsolescence markers part to the requested bundle"""
1456 1456 if kwargs.get('obsmarkers', False):
1457 1457 if heads is None:
1458 1458 heads = repo.heads()
1459 1459 subset = [c.node() for c in repo.set('::%ln', heads)]
1460 1460 markers = repo.obsstore.relevantmarkers(subset)
1461 1461 markers = sorted(markers)
1462 1462 buildobsmarkerspart(bundler, markers)
1463 1463
1464 1464 @getbundle2partsgenerator('hgtagsfnodes')
1465 1465 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1466 1466 b2caps=None, heads=None, common=None,
1467 1467 **kwargs):
1468 1468 """Transfer the .hgtags filenodes mapping.
1469 1469
1470 1470 Only values for heads in this bundle will be transferred.
1471 1471
1472 1472 The part data consists of pairs of 20 byte changeset node and .hgtags
1473 1473 filenodes raw values.
1474 1474 """
1475 1475 # Don't send unless:
1476 1476 # - changeset are being exchanged,
1477 1477 # - the client supports it.
1478 1478 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1479 1479 return
1480 1480
1481 1481 outgoing = changegroup.computeoutgoing(repo, heads, common)
1482 1482
1483 1483 if not outgoing.missingheads:
1484 1484 return
1485 1485
1486 1486 cache = tags.hgtagsfnodescache(repo.unfiltered())
1487 1487 chunks = []
1488 1488
1489 1489 # .hgtags fnodes are only relevant for head changesets. While we could
1490 1490 # transfer values for all known nodes, there will likely be little to
1491 1491 # no benefit.
1492 1492 #
1493 1493 # We don't bother using a generator to produce output data because
1494 1494 # a) we only have 40 bytes per head and even esoteric numbers of heads
1495 1495 # consume little memory (1M heads is 40MB) b) we don't want to send the
1496 1496 # part if we don't have entries and knowing if we have entries requires
1497 1497 # cache lookups.
1498 1498 for node in outgoing.missingheads:
1499 1499 # Don't compute missing, as this may slow down serving.
1500 1500 fnode = cache.getfnode(node, computemissing=False)
1501 1501 if fnode is not None:
1502 1502 chunks.extend([node, fnode])
1503 1503
1504 1504 if chunks:
1505 1505 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1506 1506
1507 1507 def check_heads(repo, their_heads, context):
1508 1508 """check if the heads of a repo have been modified
1509 1509
1510 1510 Used by peer for unbundling.
1511 1511 """
1512 1512 heads = repo.heads()
1513 1513 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1514 1514 if not (their_heads == ['force'] or their_heads == heads or
1515 1515 their_heads == ['hashed', heads_hash]):
1516 1516 # someone else committed/pushed/unbundled while we
1517 1517 # were transferring data
1518 1518 raise error.PushRaced('repository changed while %s - '
1519 1519 'please try again' % context)
1520 1520
1521 1521 def unbundle(repo, cg, heads, source, url):
1522 1522 """Apply a bundle to a repo.
1523 1523
1524 1524 this function makes sure the repo is locked during the application and have
1525 1525 mechanism to check that no push race occurred between the creation of the
1526 1526 bundle and its application.
1527 1527
1528 1528 If the push was raced as PushRaced exception is raised."""
1529 1529 r = 0
1530 1530 # need a transaction when processing a bundle2 stream
1531 1531 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1532 1532 lockandtr = [None, None, None]
1533 1533 recordout = None
1534 1534 # quick fix for output mismatch with bundle2 in 3.4
1535 1535 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1536 1536 False)
1537 1537 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1538 1538 captureoutput = True
1539 1539 try:
1540 1540 check_heads(repo, heads, 'uploading changes')
1541 1541 # push can proceed
1542 1542 if util.safehasattr(cg, 'params'):
1543 1543 r = None
1544 1544 try:
1545 1545 def gettransaction():
1546 1546 if not lockandtr[2]:
1547 1547 lockandtr[0] = repo.wlock()
1548 1548 lockandtr[1] = repo.lock()
1549 1549 lockandtr[2] = repo.transaction(source)
1550 1550 lockandtr[2].hookargs['source'] = source
1551 1551 lockandtr[2].hookargs['url'] = url
1552 1552 lockandtr[2].hookargs['bundle2'] = '1'
1553 1553 return lockandtr[2]
1554 1554
1555 1555 # Do greedy locking by default until we're satisfied with lazy
1556 1556 # locking.
1557 1557 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1558 1558 gettransaction()
1559 1559
1560 1560 op = bundle2.bundleoperation(repo, gettransaction,
1561 1561 captureoutput=captureoutput)
1562 1562 try:
1563 1563 op = bundle2.processbundle(repo, cg, op=op)
1564 1564 finally:
1565 1565 r = op.reply
1566 1566 if captureoutput and r is not None:
1567 1567 repo.ui.pushbuffer(error=True, subproc=True)
1568 1568 def recordout(output):
1569 1569 r.newpart('output', data=output, mandatory=False)
1570 1570 if lockandtr[2] is not None:
1571 1571 lockandtr[2].close()
1572 1572 except BaseException as exc:
1573 1573 exc.duringunbundle2 = True
1574 1574 if captureoutput and r is not None:
1575 1575 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1576 1576 def recordout(output):
1577 1577 part = bundle2.bundlepart('output', data=output,
1578 1578 mandatory=False)
1579 1579 parts.append(part)
1580 1580 raise
1581 1581 else:
1582 1582 lockandtr[1] = repo.lock()
1583 1583 r = changegroup.addchangegroup(repo, cg, source, url)
1584 1584 finally:
1585 1585 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1586 1586 if recordout is not None:
1587 1587 recordout(repo.ui.popbuffer())
1588 1588 return r
1589 1589
1590 1590 def _maybeapplyclonebundle(pullop):
1591 1591 """Apply a clone bundle from a remote, if possible."""
1592 1592
1593 1593 repo = pullop.repo
1594 1594 remote = pullop.remote
1595 1595
1596 1596 if not repo.ui.configbool('experimental', 'clonebundles', False):
1597 1597 return
1598 1598
1599 1599 if pullop.heads:
1600 1600 return
1601 1601
1602 1602 if not remote.capable('clonebundles'):
1603 1603 return
1604 1604
1605 1605 res = remote._call('clonebundles')
1606 1606 entries = parseclonebundlesmanifest(repo, res)
1607 1607 if not entries:
1608 1608 repo.ui.note(_('no clone bundles available on remote; '
1609 1609 'falling back to regular clone\n'))
1610 1610 return
1611 1611
1612 1612 entries = filterclonebundleentries(repo, entries)
1613 1613 if not entries:
1614 1614 # There is a thundering herd concern here. However, if a server
1615 1615 # operator doesn't advertise bundles appropriate for its clients,
1616 1616 # they deserve what's coming. Furthermore, from a client's
1617 1617 # perspective, no automatic fallback would mean not being able to
1618 1618 # clone!
1619 1619 repo.ui.warn(_('no compatible clone bundles available on server; '
1620 1620 'falling back to regular clone\n'))
1621 1621 repo.ui.warn(_('(you may want to report this to the server '
1622 1622 'operator)\n'))
1623 1623 return
1624 1624
1625 1625 entries = sortclonebundleentries(repo.ui, entries)
1626 1626
1627 1627 url = entries[0]['URL']
1628 1628 repo.ui.status(_('applying clone bundle from %s\n') % url)
1629 1629 if trypullbundlefromurl(repo.ui, repo, url):
1630 1630 repo.ui.status(_('finished applying clone bundle\n'))
1631 1631 # Bundle failed.
1632 1632 #
1633 1633 # We abort by default to avoid the thundering herd of
1634 1634 # clients flooding a server that was expecting expensive
1635 1635 # clone load to be offloaded.
1636 1636 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1637 1637 repo.ui.warn(_('falling back to normal clone\n'))
1638 1638 else:
1639 1639 raise error.Abort(_('error applying bundle'),
1640 1640 hint=_('consider contacting the server '
1641 1641 'operator if this error persists'))
1642 1642
1643 1643 def parseclonebundlesmanifest(repo, s):
1644 1644 """Parses the raw text of a clone bundles manifest.
1645 1645
1646 1646 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1647 1647 to the URL and other keys are the attributes for the entry.
1648 1648 """
1649 1649 m = []
1650 1650 for line in s.splitlines():
1651 1651 fields = line.split()
1652 1652 if not fields:
1653 1653 continue
1654 1654 attrs = {'URL': fields[0]}
1655 1655 for rawattr in fields[1:]:
1656 1656 key, value = rawattr.split('=', 1)
1657 1657 key = urllib.unquote(key)
1658 1658 value = urllib.unquote(value)
1659 1659 attrs[key] = value
1660 1660
1661 1661 # Parse BUNDLESPEC into components. This makes client-side
1662 1662 # preferences easier to specify since you can prefer a single
1663 1663 # component of the BUNDLESPEC.
1664 1664 if key == 'BUNDLESPEC':
1665 1665 try:
1666 1666 comp, version = parsebundlespec(repo, value,
1667 1667 externalnames=True)
1668 1668 attrs['COMPRESSION'] = comp
1669 1669 attrs['VERSION'] = version
1670 1670 except error.InvalidBundleSpecification:
1671 1671 pass
1672 1672 except error.UnsupportedBundleSpecification:
1673 1673 pass
1674 1674
1675 1675 m.append(attrs)
1676 1676
1677 1677 return m
1678 1678
1679 1679 def filterclonebundleentries(repo, entries):
1680 1680 newentries = []
1681 1681 for entry in entries:
1682 1682 spec = entry.get('BUNDLESPEC')
1683 1683 if spec:
1684 1684 try:
1685 1685 parsebundlespec(repo, spec, strict=True)
1686 1686 except error.InvalidBundleSpecification as e:
1687 1687 repo.ui.debug(str(e) + '\n')
1688 1688 continue
1689 1689 except error.UnsupportedBundleSpecification as e:
1690 1690 repo.ui.debug('filtering %s because unsupported bundle '
1691 1691 'spec: %s\n' % (entry['URL'], str(e)))
1692 1692 continue
1693 1693
1694 1694 if 'REQUIRESNI' in entry and not sslutil.hassni:
1695 1695 repo.ui.debug('filtering %s because SNI not supported\n' %
1696 1696 entry['URL'])
1697 1697 continue
1698 1698
1699 1699 newentries.append(entry)
1700 1700
1701 1701 return newentries
1702 1702
1703 1703 def sortclonebundleentries(ui, entries):
1704 1704 # experimental config: experimental.clonebundleprefers
1705 1705 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1706 1706 if not prefers:
1707 1707 return list(entries)
1708 1708
1709 1709 prefers = [p.split('=', 1) for p in prefers]
1710 1710
1711 1711 # Our sort function.
1712 1712 def compareentry(a, b):
1713 1713 for prefkey, prefvalue in prefers:
1714 1714 avalue = a.get(prefkey)
1715 1715 bvalue = b.get(prefkey)
1716 1716
1717 1717 # Special case for b missing attribute and a matches exactly.
1718 1718 if avalue is not None and bvalue is None and avalue == prefvalue:
1719 1719 return -1
1720 1720
1721 1721 # Special case for a missing attribute and b matches exactly.
1722 1722 if bvalue is not None and avalue is None and bvalue == prefvalue:
1723 1723 return 1
1724 1724
1725 1725 # We can't compare unless attribute present on both.
1726 1726 if avalue is None or bvalue is None:
1727 1727 continue
1728 1728
1729 1729 # Same values should fall back to next attribute.
1730 1730 if avalue == bvalue:
1731 1731 continue
1732 1732
1733 1733 # Exact matches come first.
1734 1734 if avalue == prefvalue:
1735 1735 return -1
1736 1736 if bvalue == prefvalue:
1737 1737 return 1
1738 1738
1739 1739 # Fall back to next attribute.
1740 1740 continue
1741 1741
1742 1742 # If we got here we couldn't sort by attributes and prefers. Fall
1743 1743 # back to index order.
1744 1744 return 0
1745 1745
1746 1746 return sorted(entries, cmp=compareentry)
1747 1747
1748 1748 def trypullbundlefromurl(ui, repo, url):
1749 1749 """Attempt to apply a bundle from a URL."""
1750 1750 lock = repo.lock()
1751 1751 try:
1752 1752 tr = repo.transaction('bundleurl')
1753 1753 try:
1754 1754 try:
1755 1755 fh = urlmod.open(ui, url)
1756 1756 cg = readbundle(ui, fh, 'stream')
1757 1757
1758 1758 if isinstance(cg, bundle2.unbundle20):
1759 1759 bundle2.processbundle(repo, cg, lambda: tr)
1760 1760 else:
1761 1761 changegroup.addchangegroup(repo, cg, 'clonebundles', url)
1762 1762 tr.close()
1763 1763 return True
1764 1764 except urllib2.HTTPError as e:
1765 1765 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1766 1766 except urllib2.URLError as e:
1767 1767 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1768 1768
1769 1769 return False
1770 1770 finally:
1771 1771 tr.release()
1772 1772 finally:
1773 1773 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now