##// END OF EJS Templates
revbranchcache: add the necessary bit to send 'rbc' data over bundle2...
Boris Feld -
r36984:c0e90df1 default
parent child Browse files
Show More
@@ -1,2272 +1,2294 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 lock as lockmod,
27 27 logexchange,
28 28 obsolete,
29 29 phases,
30 30 pushkey,
31 31 pycompat,
32 32 scmutil,
33 33 sslutil,
34 34 streamclone,
35 35 url as urlmod,
36 36 util,
37 37 )
38 38
39 39 urlerr = util.urlerr
40 40 urlreq = util.urlreq
41 41
42 42 # Maps bundle version human names to changegroup versions.
43 43 _bundlespeccgversions = {'v1': '01',
44 44 'v2': '02',
45 45 'packed1': 's1',
46 46 'bundle2': '02', #legacy
47 47 }
48 48
49 49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51 51
52 52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 53 """Parse a bundle string specification into parts.
54 54
55 55 Bundle specifications denote a well-defined bundle/exchange format.
56 56 The content of a given specification should not change over time in
57 57 order to ensure that bundles produced by a newer version of Mercurial are
58 58 readable from an older version.
59 59
60 60 The string currently has the form:
61 61
62 62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63 63
64 64 Where <compression> is one of the supported compression formats
65 65 and <type> is (currently) a version string. A ";" can follow the type and
66 66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 67 pairs.
68 68
69 69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 70 it is optional.
71 71
72 72 If ``externalnames`` is False (the default), the human-centric names will
73 73 be converted to their internal representation.
74 74
75 75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 76 be ``None`` if not in strict mode and a compression isn't defined.
77 77
78 78 An ``InvalidBundleSpecification`` is raised when the specification is
79 79 not syntactically well formed.
80 80
81 81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 82 bundle type/version is not recognized.
83 83
84 84 Note: this function will likely eventually return a more complex data
85 85 structure, including bundle2 part information.
86 86 """
87 87 def parseparams(s):
88 88 if ';' not in s:
89 89 return s, {}
90 90
91 91 params = {}
92 92 version, paramstr = s.split(';', 1)
93 93
94 94 for p in paramstr.split(';'):
95 95 if '=' not in p:
96 96 raise error.InvalidBundleSpecification(
97 97 _('invalid bundle specification: '
98 98 'missing "=" in parameter: %s') % p)
99 99
100 100 key, value = p.split('=', 1)
101 101 key = urlreq.unquote(key)
102 102 value = urlreq.unquote(value)
103 103 params[key] = value
104 104
105 105 return version, params
106 106
107 107
108 108 if strict and '-' not in spec:
109 109 raise error.InvalidBundleSpecification(
110 110 _('invalid bundle specification; '
111 111 'must be prefixed with compression: %s') % spec)
112 112
113 113 if '-' in spec:
114 114 compression, version = spec.split('-', 1)
115 115
116 116 if compression not in util.compengines.supportedbundlenames:
117 117 raise error.UnsupportedBundleSpecification(
118 118 _('%s compression is not supported') % compression)
119 119
120 120 version, params = parseparams(version)
121 121
122 122 if version not in _bundlespeccgversions:
123 123 raise error.UnsupportedBundleSpecification(
124 124 _('%s is not a recognized bundle version') % version)
125 125 else:
126 126 # Value could be just the compression or just the version, in which
127 127 # case some defaults are assumed (but only when not in strict mode).
128 128 assert not strict
129 129
130 130 spec, params = parseparams(spec)
131 131
132 132 if spec in util.compengines.supportedbundlenames:
133 133 compression = spec
134 134 version = 'v1'
135 135 # Generaldelta repos require v2.
136 136 if 'generaldelta' in repo.requirements:
137 137 version = 'v2'
138 138 # Modern compression engines require v2.
139 139 if compression not in _bundlespecv1compengines:
140 140 version = 'v2'
141 141 elif spec in _bundlespeccgversions:
142 142 if spec == 'packed1':
143 143 compression = 'none'
144 144 else:
145 145 compression = 'bzip2'
146 146 version = spec
147 147 else:
148 148 raise error.UnsupportedBundleSpecification(
149 149 _('%s is not a recognized bundle specification') % spec)
150 150
151 151 # Bundle version 1 only supports a known set of compression engines.
152 152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 153 raise error.UnsupportedBundleSpecification(
154 154 _('compression engine %s is not supported on v1 bundles') %
155 155 compression)
156 156
157 157 # The specification for packed1 can optionally declare the data formats
158 158 # required to apply it. If we see this metadata, compare against what the
159 159 # repo supports and error if the bundle isn't compatible.
160 160 if version == 'packed1' and 'requirements' in params:
161 161 requirements = set(params['requirements'].split(','))
162 162 missingreqs = requirements - repo.supportedformats
163 163 if missingreqs:
164 164 raise error.UnsupportedBundleSpecification(
165 165 _('missing support for repository features: %s') %
166 166 ', '.join(sorted(missingreqs)))
167 167
168 168 if not externalnames:
169 169 engine = util.compengines.forbundlename(compression)
170 170 compression = engine.bundletype()[1]
171 171 version = _bundlespeccgversions[version]
172 172 return compression, version, params
173 173
174 174 def readbundle(ui, fh, fname, vfs=None):
175 175 header = changegroup.readexactly(fh, 4)
176 176
177 177 alg = None
178 178 if not fname:
179 179 fname = "stream"
180 180 if not header.startswith('HG') and header.startswith('\0'):
181 181 fh = changegroup.headerlessfixup(fh, header)
182 182 header = "HG10"
183 183 alg = 'UN'
184 184 elif vfs:
185 185 fname = vfs.join(fname)
186 186
187 187 magic, version = header[0:2], header[2:4]
188 188
189 189 if magic != 'HG':
190 190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 191 if version == '10':
192 192 if alg is None:
193 193 alg = changegroup.readexactly(fh, 2)
194 194 return changegroup.cg1unpacker(fh, alg)
195 195 elif version.startswith('2'):
196 196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 197 elif version == 'S1':
198 198 return streamclone.streamcloneapplier(fh)
199 199 else:
200 200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201 201
202 202 def _formatrequirementsspec(requirements):
203 203 return urlreq.quote(','.join(sorted(requirements)))
204 204
205 205 def _formatrequirementsparams(requirements):
206 206 requirements = _formatrequirementsspec(requirements)
207 207 params = "%s%s" % (urlreq.quote("requirements="), requirements)
208 208 return params
209 209
210 210 def getbundlespec(ui, fh):
211 211 """Infer the bundlespec from a bundle file handle.
212 212
213 213 The input file handle is seeked and the original seek position is not
214 214 restored.
215 215 """
216 216 def speccompression(alg):
217 217 try:
218 218 return util.compengines.forbundletype(alg).bundletype()[0]
219 219 except KeyError:
220 220 return None
221 221
222 222 b = readbundle(ui, fh, None)
223 223 if isinstance(b, changegroup.cg1unpacker):
224 224 alg = b._type
225 225 if alg == '_truncatedBZ':
226 226 alg = 'BZ'
227 227 comp = speccompression(alg)
228 228 if not comp:
229 229 raise error.Abort(_('unknown compression algorithm: %s') % alg)
230 230 return '%s-v1' % comp
231 231 elif isinstance(b, bundle2.unbundle20):
232 232 if 'Compression' in b.params:
233 233 comp = speccompression(b.params['Compression'])
234 234 if not comp:
235 235 raise error.Abort(_('unknown compression algorithm: %s') % comp)
236 236 else:
237 237 comp = 'none'
238 238
239 239 version = None
240 240 for part in b.iterparts():
241 241 if part.type == 'changegroup':
242 242 version = part.params['version']
243 243 if version in ('01', '02'):
244 244 version = 'v2'
245 245 else:
246 246 raise error.Abort(_('changegroup version %s does not have '
247 247 'a known bundlespec') % version,
248 248 hint=_('try upgrading your Mercurial '
249 249 'client'))
250 250
251 251 if not version:
252 252 raise error.Abort(_('could not identify changegroup version in '
253 253 'bundle'))
254 254
255 255 return '%s-%s' % (comp, version)
256 256 elif isinstance(b, streamclone.streamcloneapplier):
257 257 requirements = streamclone.readbundle1header(fh)[2]
258 258 return 'none-packed1;%s' % _formatrequirementsparams(requirements)
259 259 else:
260 260 raise error.Abort(_('unknown bundle type: %s') % b)
261 261
262 262 def _computeoutgoing(repo, heads, common):
263 263 """Computes which revs are outgoing given a set of common
264 264 and a set of heads.
265 265
266 266 This is a separate function so extensions can have access to
267 267 the logic.
268 268
269 269 Returns a discovery.outgoing object.
270 270 """
271 271 cl = repo.changelog
272 272 if common:
273 273 hasnode = cl.hasnode
274 274 common = [n for n in common if hasnode(n)]
275 275 else:
276 276 common = [nullid]
277 277 if not heads:
278 278 heads = cl.heads()
279 279 return discovery.outgoing(repo, common, heads)
280 280
281 281 def _forcebundle1(op):
282 282 """return true if a pull/push must use bundle1
283 283
284 284 This function is used to allow testing of the older bundle version"""
285 285 ui = op.repo.ui
286 286 # The goal is this config is to allow developer to choose the bundle
287 287 # version used during exchanged. This is especially handy during test.
288 288 # Value is a list of bundle version to be picked from, highest version
289 289 # should be used.
290 290 #
291 291 # developer config: devel.legacy.exchange
292 292 exchange = ui.configlist('devel', 'legacy.exchange')
293 293 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
294 294 return forcebundle1 or not op.remote.capable('bundle2')
295 295
296 296 class pushoperation(object):
297 297 """A object that represent a single push operation
298 298
299 299 Its purpose is to carry push related state and very common operations.
300 300
301 301 A new pushoperation should be created at the beginning of each push and
302 302 discarded afterward.
303 303 """
304 304
305 305 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
306 306 bookmarks=(), pushvars=None):
307 307 # repo we push from
308 308 self.repo = repo
309 309 self.ui = repo.ui
310 310 # repo we push to
311 311 self.remote = remote
312 312 # force option provided
313 313 self.force = force
314 314 # revs to be pushed (None is "all")
315 315 self.revs = revs
316 316 # bookmark explicitly pushed
317 317 self.bookmarks = bookmarks
318 318 # allow push of new branch
319 319 self.newbranch = newbranch
320 320 # step already performed
321 321 # (used to check what steps have been already performed through bundle2)
322 322 self.stepsdone = set()
323 323 # Integer version of the changegroup push result
324 324 # - None means nothing to push
325 325 # - 0 means HTTP error
326 326 # - 1 means we pushed and remote head count is unchanged *or*
327 327 # we have outgoing changesets but refused to push
328 328 # - other values as described by addchangegroup()
329 329 self.cgresult = None
330 330 # Boolean value for the bookmark push
331 331 self.bkresult = None
332 332 # discover.outgoing object (contains common and outgoing data)
333 333 self.outgoing = None
334 334 # all remote topological heads before the push
335 335 self.remoteheads = None
336 336 # Details of the remote branch pre and post push
337 337 #
338 338 # mapping: {'branch': ([remoteheads],
339 339 # [newheads],
340 340 # [unsyncedheads],
341 341 # [discardedheads])}
342 342 # - branch: the branch name
343 343 # - remoteheads: the list of remote heads known locally
344 344 # None if the branch is new
345 345 # - newheads: the new remote heads (known locally) with outgoing pushed
346 346 # - unsyncedheads: the list of remote heads unknown locally.
347 347 # - discardedheads: the list of remote heads made obsolete by the push
348 348 self.pushbranchmap = None
349 349 # testable as a boolean indicating if any nodes are missing locally.
350 350 self.incoming = None
351 351 # summary of the remote phase situation
352 352 self.remotephases = None
353 353 # phases changes that must be pushed along side the changesets
354 354 self.outdatedphases = None
355 355 # phases changes that must be pushed if changeset push fails
356 356 self.fallbackoutdatedphases = None
357 357 # outgoing obsmarkers
358 358 self.outobsmarkers = set()
359 359 # outgoing bookmarks
360 360 self.outbookmarks = []
361 361 # transaction manager
362 362 self.trmanager = None
363 363 # map { pushkey partid -> callback handling failure}
364 364 # used to handle exception from mandatory pushkey part failure
365 365 self.pkfailcb = {}
366 366 # an iterable of pushvars or None
367 367 self.pushvars = pushvars
368 368
369 369 @util.propertycache
370 370 def futureheads(self):
371 371 """future remote heads if the changeset push succeeds"""
372 372 return self.outgoing.missingheads
373 373
374 374 @util.propertycache
375 375 def fallbackheads(self):
376 376 """future remote heads if the changeset push fails"""
377 377 if self.revs is None:
378 378 # not target to push, all common are relevant
379 379 return self.outgoing.commonheads
380 380 unfi = self.repo.unfiltered()
381 381 # I want cheads = heads(::missingheads and ::commonheads)
382 382 # (missingheads is revs with secret changeset filtered out)
383 383 #
384 384 # This can be expressed as:
385 385 # cheads = ( (missingheads and ::commonheads)
386 386 # + (commonheads and ::missingheads))"
387 387 # )
388 388 #
389 389 # while trying to push we already computed the following:
390 390 # common = (::commonheads)
391 391 # missing = ((commonheads::missingheads) - commonheads)
392 392 #
393 393 # We can pick:
394 394 # * missingheads part of common (::commonheads)
395 395 common = self.outgoing.common
396 396 nm = self.repo.changelog.nodemap
397 397 cheads = [node for node in self.revs if nm[node] in common]
398 398 # and
399 399 # * commonheads parents on missing
400 400 revset = unfi.set('%ln and parents(roots(%ln))',
401 401 self.outgoing.commonheads,
402 402 self.outgoing.missing)
403 403 cheads.extend(c.node() for c in revset)
404 404 return cheads
405 405
406 406 @property
407 407 def commonheads(self):
408 408 """set of all common heads after changeset bundle push"""
409 409 if self.cgresult:
410 410 return self.futureheads
411 411 else:
412 412 return self.fallbackheads
413 413
414 414 # mapping of message used when pushing bookmark
415 415 bookmsgmap = {'update': (_("updating bookmark %s\n"),
416 416 _('updating bookmark %s failed!\n')),
417 417 'export': (_("exporting bookmark %s\n"),
418 418 _('exporting bookmark %s failed!\n')),
419 419 'delete': (_("deleting remote bookmark %s\n"),
420 420 _('deleting remote bookmark %s failed!\n')),
421 421 }
422 422
423 423
424 424 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
425 425 opargs=None):
426 426 '''Push outgoing changesets (limited by revs) from a local
427 427 repository to remote. Return an integer:
428 428 - None means nothing to push
429 429 - 0 means HTTP error
430 430 - 1 means we pushed and remote head count is unchanged *or*
431 431 we have outgoing changesets but refused to push
432 432 - other values as described by addchangegroup()
433 433 '''
434 434 if opargs is None:
435 435 opargs = {}
436 436 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
437 437 **pycompat.strkwargs(opargs))
438 438 if pushop.remote.local():
439 439 missing = (set(pushop.repo.requirements)
440 440 - pushop.remote.local().supported)
441 441 if missing:
442 442 msg = _("required features are not"
443 443 " supported in the destination:"
444 444 " %s") % (', '.join(sorted(missing)))
445 445 raise error.Abort(msg)
446 446
447 447 if not pushop.remote.canpush():
448 448 raise error.Abort(_("destination does not support push"))
449 449
450 450 if not pushop.remote.capable('unbundle'):
451 451 raise error.Abort(_('cannot push: destination does not support the '
452 452 'unbundle wire protocol command'))
453 453
454 454 # get lock as we might write phase data
455 455 wlock = lock = None
456 456 try:
457 457 # bundle2 push may receive a reply bundle touching bookmarks or other
458 458 # things requiring the wlock. Take it now to ensure proper ordering.
459 459 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
460 460 if (not _forcebundle1(pushop)) and maypushback:
461 461 wlock = pushop.repo.wlock()
462 462 lock = pushop.repo.lock()
463 463 pushop.trmanager = transactionmanager(pushop.repo,
464 464 'push-response',
465 465 pushop.remote.url())
466 466 except IOError as err:
467 467 if err.errno != errno.EACCES:
468 468 raise
469 469 # source repo cannot be locked.
470 470 # We do not abort the push, but just disable the local phase
471 471 # synchronisation.
472 472 msg = 'cannot lock source repository: %s\n' % err
473 473 pushop.ui.debug(msg)
474 474
475 475 with wlock or util.nullcontextmanager(), \
476 476 lock or util.nullcontextmanager(), \
477 477 pushop.trmanager or util.nullcontextmanager():
478 478 pushop.repo.checkpush(pushop)
479 479 _pushdiscovery(pushop)
480 480 if not _forcebundle1(pushop):
481 481 _pushbundle2(pushop)
482 482 _pushchangeset(pushop)
483 483 _pushsyncphase(pushop)
484 484 _pushobsolete(pushop)
485 485 _pushbookmark(pushop)
486 486
487 487 return pushop
488 488
489 489 # list of steps to perform discovery before push
490 490 pushdiscoveryorder = []
491 491
492 492 # Mapping between step name and function
493 493 #
494 494 # This exists to help extensions wrap steps if necessary
495 495 pushdiscoverymapping = {}
496 496
497 497 def pushdiscovery(stepname):
498 498 """decorator for function performing discovery before push
499 499
500 500 The function is added to the step -> function mapping and appended to the
501 501 list of steps. Beware that decorated function will be added in order (this
502 502 may matter).
503 503
504 504 You can only use this decorator for a new step, if you want to wrap a step
505 505 from an extension, change the pushdiscovery dictionary directly."""
506 506 def dec(func):
507 507 assert stepname not in pushdiscoverymapping
508 508 pushdiscoverymapping[stepname] = func
509 509 pushdiscoveryorder.append(stepname)
510 510 return func
511 511 return dec
512 512
513 513 def _pushdiscovery(pushop):
514 514 """Run all discovery steps"""
515 515 for stepname in pushdiscoveryorder:
516 516 step = pushdiscoverymapping[stepname]
517 517 step(pushop)
518 518
519 519 @pushdiscovery('changeset')
520 520 def _pushdiscoverychangeset(pushop):
521 521 """discover the changeset that need to be pushed"""
522 522 fci = discovery.findcommonincoming
523 523 if pushop.revs:
524 524 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
525 525 ancestorsof=pushop.revs)
526 526 else:
527 527 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
528 528 common, inc, remoteheads = commoninc
529 529 fco = discovery.findcommonoutgoing
530 530 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
531 531 commoninc=commoninc, force=pushop.force)
532 532 pushop.outgoing = outgoing
533 533 pushop.remoteheads = remoteheads
534 534 pushop.incoming = inc
535 535
536 536 @pushdiscovery('phase')
537 537 def _pushdiscoveryphase(pushop):
538 538 """discover the phase that needs to be pushed
539 539
540 540 (computed for both success and failure case for changesets push)"""
541 541 outgoing = pushop.outgoing
542 542 unfi = pushop.repo.unfiltered()
543 543 remotephases = pushop.remote.listkeys('phases')
544 544 if (pushop.ui.configbool('ui', '_usedassubrepo')
545 545 and remotephases # server supports phases
546 546 and not pushop.outgoing.missing # no changesets to be pushed
547 547 and remotephases.get('publishing', False)):
548 548 # When:
549 549 # - this is a subrepo push
550 550 # - and remote support phase
551 551 # - and no changeset are to be pushed
552 552 # - and remote is publishing
553 553 # We may be in issue 3781 case!
554 554 # We drop the possible phase synchronisation done by
555 555 # courtesy to publish changesets possibly locally draft
556 556 # on the remote.
557 557 pushop.outdatedphases = []
558 558 pushop.fallbackoutdatedphases = []
559 559 return
560 560
561 561 pushop.remotephases = phases.remotephasessummary(pushop.repo,
562 562 pushop.fallbackheads,
563 563 remotephases)
564 564 droots = pushop.remotephases.draftroots
565 565
566 566 extracond = ''
567 567 if not pushop.remotephases.publishing:
568 568 extracond = ' and public()'
569 569 revset = 'heads((%%ln::%%ln) %s)' % extracond
570 570 # Get the list of all revs draft on remote by public here.
571 571 # XXX Beware that revset break if droots is not strictly
572 572 # XXX root we may want to ensure it is but it is costly
573 573 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
574 574 if not outgoing.missing:
575 575 future = fallback
576 576 else:
577 577 # adds changeset we are going to push as draft
578 578 #
579 579 # should not be necessary for publishing server, but because of an
580 580 # issue fixed in xxxxx we have to do it anyway.
581 581 fdroots = list(unfi.set('roots(%ln + %ln::)',
582 582 outgoing.missing, droots))
583 583 fdroots = [f.node() for f in fdroots]
584 584 future = list(unfi.set(revset, fdroots, pushop.futureheads))
585 585 pushop.outdatedphases = future
586 586 pushop.fallbackoutdatedphases = fallback
587 587
588 588 @pushdiscovery('obsmarker')
589 589 def _pushdiscoveryobsmarkers(pushop):
590 590 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
591 591 and pushop.repo.obsstore
592 592 and 'obsolete' in pushop.remote.listkeys('namespaces')):
593 593 repo = pushop.repo
594 594 # very naive computation, that can be quite expensive on big repo.
595 595 # However: evolution is currently slow on them anyway.
596 596 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
597 597 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
598 598
599 599 @pushdiscovery('bookmarks')
600 600 def _pushdiscoverybookmarks(pushop):
601 601 ui = pushop.ui
602 602 repo = pushop.repo.unfiltered()
603 603 remote = pushop.remote
604 604 ui.debug("checking for updated bookmarks\n")
605 605 ancestors = ()
606 606 if pushop.revs:
607 607 revnums = map(repo.changelog.rev, pushop.revs)
608 608 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
609 609 remotebookmark = remote.listkeys('bookmarks')
610 610
611 611 explicit = set([repo._bookmarks.expandname(bookmark)
612 612 for bookmark in pushop.bookmarks])
613 613
614 614 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
615 615 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
616 616
617 617 def safehex(x):
618 618 if x is None:
619 619 return x
620 620 return hex(x)
621 621
622 622 def hexifycompbookmarks(bookmarks):
623 623 return [(b, safehex(scid), safehex(dcid))
624 624 for (b, scid, dcid) in bookmarks]
625 625
626 626 comp = [hexifycompbookmarks(marks) for marks in comp]
627 627 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
628 628
629 629 def _processcompared(pushop, pushed, explicit, remotebms, comp):
630 630 """take decision on bookmark to pull from the remote bookmark
631 631
632 632 Exist to help extensions who want to alter this behavior.
633 633 """
634 634 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
635 635
636 636 repo = pushop.repo
637 637
638 638 for b, scid, dcid in advsrc:
639 639 if b in explicit:
640 640 explicit.remove(b)
641 641 if not pushed or repo[scid].rev() in pushed:
642 642 pushop.outbookmarks.append((b, dcid, scid))
643 643 # search added bookmark
644 644 for b, scid, dcid in addsrc:
645 645 if b in explicit:
646 646 explicit.remove(b)
647 647 pushop.outbookmarks.append((b, '', scid))
648 648 # search for overwritten bookmark
649 649 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
650 650 if b in explicit:
651 651 explicit.remove(b)
652 652 pushop.outbookmarks.append((b, dcid, scid))
653 653 # search for bookmark to delete
654 654 for b, scid, dcid in adddst:
655 655 if b in explicit:
656 656 explicit.remove(b)
657 657 # treat as "deleted locally"
658 658 pushop.outbookmarks.append((b, dcid, ''))
659 659 # identical bookmarks shouldn't get reported
660 660 for b, scid, dcid in same:
661 661 if b in explicit:
662 662 explicit.remove(b)
663 663
664 664 if explicit:
665 665 explicit = sorted(explicit)
666 666 # we should probably list all of them
667 667 pushop.ui.warn(_('bookmark %s does not exist on the local '
668 668 'or remote repository!\n') % explicit[0])
669 669 pushop.bkresult = 2
670 670
671 671 pushop.outbookmarks.sort()
672 672
673 673 def _pushcheckoutgoing(pushop):
674 674 outgoing = pushop.outgoing
675 675 unfi = pushop.repo.unfiltered()
676 676 if not outgoing.missing:
677 677 # nothing to push
678 678 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
679 679 return False
680 680 # something to push
681 681 if not pushop.force:
682 682 # if repo.obsstore == False --> no obsolete
683 683 # then, save the iteration
684 684 if unfi.obsstore:
685 685 # this message are here for 80 char limit reason
686 686 mso = _("push includes obsolete changeset: %s!")
687 687 mspd = _("push includes phase-divergent changeset: %s!")
688 688 mscd = _("push includes content-divergent changeset: %s!")
689 689 mst = {"orphan": _("push includes orphan changeset: %s!"),
690 690 "phase-divergent": mspd,
691 691 "content-divergent": mscd}
692 692 # If we are to push if there is at least one
693 693 # obsolete or unstable changeset in missing, at
694 694 # least one of the missinghead will be obsolete or
695 695 # unstable. So checking heads only is ok
696 696 for node in outgoing.missingheads:
697 697 ctx = unfi[node]
698 698 if ctx.obsolete():
699 699 raise error.Abort(mso % ctx)
700 700 elif ctx.isunstable():
701 701 # TODO print more than one instability in the abort
702 702 # message
703 703 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
704 704
705 705 discovery.checkheads(pushop)
706 706 return True
707 707
708 708 # List of names of steps to perform for an outgoing bundle2, order matters.
709 709 b2partsgenorder = []
710 710
711 711 # Mapping between step name and function
712 712 #
713 713 # This exists to help extensions wrap steps if necessary
714 714 b2partsgenmapping = {}
715 715
716 716 def b2partsgenerator(stepname, idx=None):
717 717 """decorator for function generating bundle2 part
718 718
719 719 The function is added to the step -> function mapping and appended to the
720 720 list of steps. Beware that decorated functions will be added in order
721 721 (this may matter).
722 722
723 723 You can only use this decorator for new steps, if you want to wrap a step
724 724 from an extension, attack the b2partsgenmapping dictionary directly."""
725 725 def dec(func):
726 726 assert stepname not in b2partsgenmapping
727 727 b2partsgenmapping[stepname] = func
728 728 if idx is None:
729 729 b2partsgenorder.append(stepname)
730 730 else:
731 731 b2partsgenorder.insert(idx, stepname)
732 732 return func
733 733 return dec
734 734
735 735 def _pushb2ctxcheckheads(pushop, bundler):
736 736 """Generate race condition checking parts
737 737
738 738 Exists as an independent function to aid extensions
739 739 """
740 740 # * 'force' do not check for push race,
741 741 # * if we don't push anything, there are nothing to check.
742 742 if not pushop.force and pushop.outgoing.missingheads:
743 743 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
744 744 emptyremote = pushop.pushbranchmap is None
745 745 if not allowunrelated or emptyremote:
746 746 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
747 747 else:
748 748 affected = set()
749 749 for branch, heads in pushop.pushbranchmap.iteritems():
750 750 remoteheads, newheads, unsyncedheads, discardedheads = heads
751 751 if remoteheads is not None:
752 752 remote = set(remoteheads)
753 753 affected |= set(discardedheads) & remote
754 754 affected |= remote - set(newheads)
755 755 if affected:
756 756 data = iter(sorted(affected))
757 757 bundler.newpart('check:updated-heads', data=data)
758 758
759 759 def _pushing(pushop):
760 760 """return True if we are pushing anything"""
761 761 return bool(pushop.outgoing.missing
762 762 or pushop.outdatedphases
763 763 or pushop.outobsmarkers
764 764 or pushop.outbookmarks)
765 765
766 766 @b2partsgenerator('check-bookmarks')
767 767 def _pushb2checkbookmarks(pushop, bundler):
768 768 """insert bookmark move checking"""
769 769 if not _pushing(pushop) or pushop.force:
770 770 return
771 771 b2caps = bundle2.bundle2caps(pushop.remote)
772 772 hasbookmarkcheck = 'bookmarks' in b2caps
773 773 if not (pushop.outbookmarks and hasbookmarkcheck):
774 774 return
775 775 data = []
776 776 for book, old, new in pushop.outbookmarks:
777 777 old = bin(old)
778 778 data.append((book, old))
779 779 checkdata = bookmod.binaryencode(data)
780 780 bundler.newpart('check:bookmarks', data=checkdata)
781 781
782 782 @b2partsgenerator('check-phases')
783 783 def _pushb2checkphases(pushop, bundler):
784 784 """insert phase move checking"""
785 785 if not _pushing(pushop) or pushop.force:
786 786 return
787 787 b2caps = bundle2.bundle2caps(pushop.remote)
788 788 hasphaseheads = 'heads' in b2caps.get('phases', ())
789 789 if pushop.remotephases is not None and hasphaseheads:
790 790 # check that the remote phase has not changed
791 791 checks = [[] for p in phases.allphases]
792 792 checks[phases.public].extend(pushop.remotephases.publicheads)
793 793 checks[phases.draft].extend(pushop.remotephases.draftroots)
794 794 if any(checks):
795 795 for nodes in checks:
796 796 nodes.sort()
797 797 checkdata = phases.binaryencode(checks)
798 798 bundler.newpart('check:phases', data=checkdata)
799 799
800 800 @b2partsgenerator('changeset')
801 801 def _pushb2ctx(pushop, bundler):
802 802 """handle changegroup push through bundle2
803 803
804 804 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
805 805 """
806 806 if 'changesets' in pushop.stepsdone:
807 807 return
808 808 pushop.stepsdone.add('changesets')
809 809 # Send known heads to the server for race detection.
810 810 if not _pushcheckoutgoing(pushop):
811 811 return
812 812 pushop.repo.prepushoutgoinghooks(pushop)
813 813
814 814 _pushb2ctxcheckheads(pushop, bundler)
815 815
816 816 b2caps = bundle2.bundle2caps(pushop.remote)
817 817 version = '01'
818 818 cgversions = b2caps.get('changegroup')
819 819 if cgversions: # 3.1 and 3.2 ship with an empty value
820 820 cgversions = [v for v in cgversions
821 821 if v in changegroup.supportedoutgoingversions(
822 822 pushop.repo)]
823 823 if not cgversions:
824 824 raise ValueError(_('no common changegroup version'))
825 825 version = max(cgversions)
826 826 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
827 827 'push')
828 828 cgpart = bundler.newpart('changegroup', data=cgstream)
829 829 if cgversions:
830 830 cgpart.addparam('version', version)
831 831 if 'treemanifest' in pushop.repo.requirements:
832 832 cgpart.addparam('treemanifest', '1')
833 833 def handlereply(op):
834 834 """extract addchangegroup returns from server reply"""
835 835 cgreplies = op.records.getreplies(cgpart.id)
836 836 assert len(cgreplies['changegroup']) == 1
837 837 pushop.cgresult = cgreplies['changegroup'][0]['return']
838 838 return handlereply
839 839
840 840 @b2partsgenerator('phase')
841 841 def _pushb2phases(pushop, bundler):
842 842 """handle phase push through bundle2"""
843 843 if 'phases' in pushop.stepsdone:
844 844 return
845 845 b2caps = bundle2.bundle2caps(pushop.remote)
846 846 ui = pushop.repo.ui
847 847
848 848 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
849 849 haspushkey = 'pushkey' in b2caps
850 850 hasphaseheads = 'heads' in b2caps.get('phases', ())
851 851
852 852 if hasphaseheads and not legacyphase:
853 853 return _pushb2phaseheads(pushop, bundler)
854 854 elif haspushkey:
855 855 return _pushb2phasespushkey(pushop, bundler)
856 856
857 857 def _pushb2phaseheads(pushop, bundler):
858 858 """push phase information through a bundle2 - binary part"""
859 859 pushop.stepsdone.add('phases')
860 860 if pushop.outdatedphases:
861 861 updates = [[] for p in phases.allphases]
862 862 updates[0].extend(h.node() for h in pushop.outdatedphases)
863 863 phasedata = phases.binaryencode(updates)
864 864 bundler.newpart('phase-heads', data=phasedata)
865 865
866 866 def _pushb2phasespushkey(pushop, bundler):
867 867 """push phase information through a bundle2 - pushkey part"""
868 868 pushop.stepsdone.add('phases')
869 869 part2node = []
870 870
871 871 def handlefailure(pushop, exc):
872 872 targetid = int(exc.partid)
873 873 for partid, node in part2node:
874 874 if partid == targetid:
875 875 raise error.Abort(_('updating %s to public failed') % node)
876 876
877 877 enc = pushkey.encode
878 878 for newremotehead in pushop.outdatedphases:
879 879 part = bundler.newpart('pushkey')
880 880 part.addparam('namespace', enc('phases'))
881 881 part.addparam('key', enc(newremotehead.hex()))
882 882 part.addparam('old', enc('%d' % phases.draft))
883 883 part.addparam('new', enc('%d' % phases.public))
884 884 part2node.append((part.id, newremotehead))
885 885 pushop.pkfailcb[part.id] = handlefailure
886 886
887 887 def handlereply(op):
888 888 for partid, node in part2node:
889 889 partrep = op.records.getreplies(partid)
890 890 results = partrep['pushkey']
891 891 assert len(results) <= 1
892 892 msg = None
893 893 if not results:
894 894 msg = _('server ignored update of %s to public!\n') % node
895 895 elif not int(results[0]['return']):
896 896 msg = _('updating %s to public failed!\n') % node
897 897 if msg is not None:
898 898 pushop.ui.warn(msg)
899 899 return handlereply
900 900
901 901 @b2partsgenerator('obsmarkers')
902 902 def _pushb2obsmarkers(pushop, bundler):
903 903 if 'obsmarkers' in pushop.stepsdone:
904 904 return
905 905 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
906 906 if obsolete.commonversion(remoteversions) is None:
907 907 return
908 908 pushop.stepsdone.add('obsmarkers')
909 909 if pushop.outobsmarkers:
910 910 markers = sorted(pushop.outobsmarkers)
911 911 bundle2.buildobsmarkerspart(bundler, markers)
912 912
913 913 @b2partsgenerator('bookmarks')
914 914 def _pushb2bookmarks(pushop, bundler):
915 915 """handle bookmark push through bundle2"""
916 916 if 'bookmarks' in pushop.stepsdone:
917 917 return
918 918 b2caps = bundle2.bundle2caps(pushop.remote)
919 919
920 920 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
921 921 legacybooks = 'bookmarks' in legacy
922 922
923 923 if not legacybooks and 'bookmarks' in b2caps:
924 924 return _pushb2bookmarkspart(pushop, bundler)
925 925 elif 'pushkey' in b2caps:
926 926 return _pushb2bookmarkspushkey(pushop, bundler)
927 927
928 928 def _bmaction(old, new):
929 929 """small utility for bookmark pushing"""
930 930 if not old:
931 931 return 'export'
932 932 elif not new:
933 933 return 'delete'
934 934 return 'update'
935 935
936 936 def _pushb2bookmarkspart(pushop, bundler):
937 937 pushop.stepsdone.add('bookmarks')
938 938 if not pushop.outbookmarks:
939 939 return
940 940
941 941 allactions = []
942 942 data = []
943 943 for book, old, new in pushop.outbookmarks:
944 944 new = bin(new)
945 945 data.append((book, new))
946 946 allactions.append((book, _bmaction(old, new)))
947 947 checkdata = bookmod.binaryencode(data)
948 948 bundler.newpart('bookmarks', data=checkdata)
949 949
950 950 def handlereply(op):
951 951 ui = pushop.ui
952 952 # if success
953 953 for book, action in allactions:
954 954 ui.status(bookmsgmap[action][0] % book)
955 955
956 956 return handlereply
957 957
958 958 def _pushb2bookmarkspushkey(pushop, bundler):
959 959 pushop.stepsdone.add('bookmarks')
960 960 part2book = []
961 961 enc = pushkey.encode
962 962
963 963 def handlefailure(pushop, exc):
964 964 targetid = int(exc.partid)
965 965 for partid, book, action in part2book:
966 966 if partid == targetid:
967 967 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
968 968 # we should not be called for part we did not generated
969 969 assert False
970 970
971 971 for book, old, new in pushop.outbookmarks:
972 972 part = bundler.newpart('pushkey')
973 973 part.addparam('namespace', enc('bookmarks'))
974 974 part.addparam('key', enc(book))
975 975 part.addparam('old', enc(old))
976 976 part.addparam('new', enc(new))
977 977 action = 'update'
978 978 if not old:
979 979 action = 'export'
980 980 elif not new:
981 981 action = 'delete'
982 982 part2book.append((part.id, book, action))
983 983 pushop.pkfailcb[part.id] = handlefailure
984 984
985 985 def handlereply(op):
986 986 ui = pushop.ui
987 987 for partid, book, action in part2book:
988 988 partrep = op.records.getreplies(partid)
989 989 results = partrep['pushkey']
990 990 assert len(results) <= 1
991 991 if not results:
992 992 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
993 993 else:
994 994 ret = int(results[0]['return'])
995 995 if ret:
996 996 ui.status(bookmsgmap[action][0] % book)
997 997 else:
998 998 ui.warn(bookmsgmap[action][1] % book)
999 999 if pushop.bkresult is not None:
1000 1000 pushop.bkresult = 1
1001 1001 return handlereply
1002 1002
1003 1003 @b2partsgenerator('pushvars', idx=0)
1004 1004 def _getbundlesendvars(pushop, bundler):
1005 1005 '''send shellvars via bundle2'''
1006 1006 pushvars = pushop.pushvars
1007 1007 if pushvars:
1008 1008 shellvars = {}
1009 1009 for raw in pushvars:
1010 1010 if '=' not in raw:
1011 1011 msg = ("unable to parse variable '%s', should follow "
1012 1012 "'KEY=VALUE' or 'KEY=' format")
1013 1013 raise error.Abort(msg % raw)
1014 1014 k, v = raw.split('=', 1)
1015 1015 shellvars[k] = v
1016 1016
1017 1017 part = bundler.newpart('pushvars')
1018 1018
1019 1019 for key, value in shellvars.iteritems():
1020 1020 part.addparam(key, value, mandatory=False)
1021 1021
1022 1022 def _pushbundle2(pushop):
1023 1023 """push data to the remote using bundle2
1024 1024
1025 1025 The only currently supported type of data is changegroup but this will
1026 1026 evolve in the future."""
1027 1027 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1028 1028 pushback = (pushop.trmanager
1029 1029 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1030 1030
1031 1031 # create reply capability
1032 1032 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1033 1033 allowpushback=pushback,
1034 1034 role='client'))
1035 1035 bundler.newpart('replycaps', data=capsblob)
1036 1036 replyhandlers = []
1037 1037 for partgenname in b2partsgenorder:
1038 1038 partgen = b2partsgenmapping[partgenname]
1039 1039 ret = partgen(pushop, bundler)
1040 1040 if callable(ret):
1041 1041 replyhandlers.append(ret)
1042 1042 # do not push if nothing to push
1043 1043 if bundler.nbparts <= 1:
1044 1044 return
1045 1045 stream = util.chunkbuffer(bundler.getchunks())
1046 1046 try:
1047 1047 try:
1048 1048 reply = pushop.remote.unbundle(
1049 1049 stream, ['force'], pushop.remote.url())
1050 1050 except error.BundleValueError as exc:
1051 1051 raise error.Abort(_('missing support for %s') % exc)
1052 1052 try:
1053 1053 trgetter = None
1054 1054 if pushback:
1055 1055 trgetter = pushop.trmanager.transaction
1056 1056 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1057 1057 except error.BundleValueError as exc:
1058 1058 raise error.Abort(_('missing support for %s') % exc)
1059 1059 except bundle2.AbortFromPart as exc:
1060 1060 pushop.ui.status(_('remote: %s\n') % exc)
1061 1061 if exc.hint is not None:
1062 1062 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1063 1063 raise error.Abort(_('push failed on remote'))
1064 1064 except error.PushkeyFailed as exc:
1065 1065 partid = int(exc.partid)
1066 1066 if partid not in pushop.pkfailcb:
1067 1067 raise
1068 1068 pushop.pkfailcb[partid](pushop, exc)
1069 1069 for rephand in replyhandlers:
1070 1070 rephand(op)
1071 1071
1072 1072 def _pushchangeset(pushop):
1073 1073 """Make the actual push of changeset bundle to remote repo"""
1074 1074 if 'changesets' in pushop.stepsdone:
1075 1075 return
1076 1076 pushop.stepsdone.add('changesets')
1077 1077 if not _pushcheckoutgoing(pushop):
1078 1078 return
1079 1079
1080 1080 # Should have verified this in push().
1081 1081 assert pushop.remote.capable('unbundle')
1082 1082
1083 1083 pushop.repo.prepushoutgoinghooks(pushop)
1084 1084 outgoing = pushop.outgoing
1085 1085 # TODO: get bundlecaps from remote
1086 1086 bundlecaps = None
1087 1087 # create a changegroup from local
1088 1088 if pushop.revs is None and not (outgoing.excluded
1089 1089 or pushop.repo.changelog.filteredrevs):
1090 1090 # push everything,
1091 1091 # use the fast path, no race possible on push
1092 1092 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1093 1093 fastpath=True, bundlecaps=bundlecaps)
1094 1094 else:
1095 1095 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1096 1096 'push', bundlecaps=bundlecaps)
1097 1097
1098 1098 # apply changegroup to remote
1099 1099 # local repo finds heads on server, finds out what
1100 1100 # revs it must push. once revs transferred, if server
1101 1101 # finds it has different heads (someone else won
1102 1102 # commit/push race), server aborts.
1103 1103 if pushop.force:
1104 1104 remoteheads = ['force']
1105 1105 else:
1106 1106 remoteheads = pushop.remoteheads
1107 1107 # ssh: return remote's addchangegroup()
1108 1108 # http: return remote's addchangegroup() or 0 for error
1109 1109 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1110 1110 pushop.repo.url())
1111 1111
1112 1112 def _pushsyncphase(pushop):
1113 1113 """synchronise phase information locally and remotely"""
1114 1114 cheads = pushop.commonheads
1115 1115 # even when we don't push, exchanging phase data is useful
1116 1116 remotephases = pushop.remote.listkeys('phases')
1117 1117 if (pushop.ui.configbool('ui', '_usedassubrepo')
1118 1118 and remotephases # server supports phases
1119 1119 and pushop.cgresult is None # nothing was pushed
1120 1120 and remotephases.get('publishing', False)):
1121 1121 # When:
1122 1122 # - this is a subrepo push
1123 1123 # - and remote support phase
1124 1124 # - and no changeset was pushed
1125 1125 # - and remote is publishing
1126 1126 # We may be in issue 3871 case!
1127 1127 # We drop the possible phase synchronisation done by
1128 1128 # courtesy to publish changesets possibly locally draft
1129 1129 # on the remote.
1130 1130 remotephases = {'publishing': 'True'}
1131 1131 if not remotephases: # old server or public only reply from non-publishing
1132 1132 _localphasemove(pushop, cheads)
1133 1133 # don't push any phase data as there is nothing to push
1134 1134 else:
1135 1135 ana = phases.analyzeremotephases(pushop.repo, cheads,
1136 1136 remotephases)
1137 1137 pheads, droots = ana
1138 1138 ### Apply remote phase on local
1139 1139 if remotephases.get('publishing', False):
1140 1140 _localphasemove(pushop, cheads)
1141 1141 else: # publish = False
1142 1142 _localphasemove(pushop, pheads)
1143 1143 _localphasemove(pushop, cheads, phases.draft)
1144 1144 ### Apply local phase on remote
1145 1145
1146 1146 if pushop.cgresult:
1147 1147 if 'phases' in pushop.stepsdone:
1148 1148 # phases already pushed though bundle2
1149 1149 return
1150 1150 outdated = pushop.outdatedphases
1151 1151 else:
1152 1152 outdated = pushop.fallbackoutdatedphases
1153 1153
1154 1154 pushop.stepsdone.add('phases')
1155 1155
1156 1156 # filter heads already turned public by the push
1157 1157 outdated = [c for c in outdated if c.node() not in pheads]
1158 1158 # fallback to independent pushkey command
1159 1159 for newremotehead in outdated:
1160 1160 r = pushop.remote.pushkey('phases',
1161 1161 newremotehead.hex(),
1162 1162 ('%d' % phases.draft),
1163 1163 ('%d' % phases.public))
1164 1164 if not r:
1165 1165 pushop.ui.warn(_('updating %s to public failed!\n')
1166 1166 % newremotehead)
1167 1167
1168 1168 def _localphasemove(pushop, nodes, phase=phases.public):
1169 1169 """move <nodes> to <phase> in the local source repo"""
1170 1170 if pushop.trmanager:
1171 1171 phases.advanceboundary(pushop.repo,
1172 1172 pushop.trmanager.transaction(),
1173 1173 phase,
1174 1174 nodes)
1175 1175 else:
1176 1176 # repo is not locked, do not change any phases!
1177 1177 # Informs the user that phases should have been moved when
1178 1178 # applicable.
1179 1179 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1180 1180 phasestr = phases.phasenames[phase]
1181 1181 if actualmoves:
1182 1182 pushop.ui.status(_('cannot lock source repo, skipping '
1183 1183 'local %s phase update\n') % phasestr)
1184 1184
1185 1185 def _pushobsolete(pushop):
1186 1186 """utility function to push obsolete markers to a remote"""
1187 1187 if 'obsmarkers' in pushop.stepsdone:
1188 1188 return
1189 1189 repo = pushop.repo
1190 1190 remote = pushop.remote
1191 1191 pushop.stepsdone.add('obsmarkers')
1192 1192 if pushop.outobsmarkers:
1193 1193 pushop.ui.debug('try to push obsolete markers to remote\n')
1194 1194 rslts = []
1195 1195 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1196 1196 for key in sorted(remotedata, reverse=True):
1197 1197 # reverse sort to ensure we end with dump0
1198 1198 data = remotedata[key]
1199 1199 rslts.append(remote.pushkey('obsolete', key, '', data))
1200 1200 if [r for r in rslts if not r]:
1201 1201 msg = _('failed to push some obsolete markers!\n')
1202 1202 repo.ui.warn(msg)
1203 1203
1204 1204 def _pushbookmark(pushop):
1205 1205 """Update bookmark position on remote"""
1206 1206 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1207 1207 return
1208 1208 pushop.stepsdone.add('bookmarks')
1209 1209 ui = pushop.ui
1210 1210 remote = pushop.remote
1211 1211
1212 1212 for b, old, new in pushop.outbookmarks:
1213 1213 action = 'update'
1214 1214 if not old:
1215 1215 action = 'export'
1216 1216 elif not new:
1217 1217 action = 'delete'
1218 1218 if remote.pushkey('bookmarks', b, old, new):
1219 1219 ui.status(bookmsgmap[action][0] % b)
1220 1220 else:
1221 1221 ui.warn(bookmsgmap[action][1] % b)
1222 1222 # discovery can have set the value form invalid entry
1223 1223 if pushop.bkresult is not None:
1224 1224 pushop.bkresult = 1
1225 1225
1226 1226 class pulloperation(object):
1227 1227 """A object that represent a single pull operation
1228 1228
1229 1229 It purpose is to carry pull related state and very common operation.
1230 1230
1231 1231 A new should be created at the beginning of each pull and discarded
1232 1232 afterward.
1233 1233 """
1234 1234
1235 1235 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1236 1236 remotebookmarks=None, streamclonerequested=None):
1237 1237 # repo we pull into
1238 1238 self.repo = repo
1239 1239 # repo we pull from
1240 1240 self.remote = remote
1241 1241 # revision we try to pull (None is "all")
1242 1242 self.heads = heads
1243 1243 # bookmark pulled explicitly
1244 1244 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1245 1245 for bookmark in bookmarks]
1246 1246 # do we force pull?
1247 1247 self.force = force
1248 1248 # whether a streaming clone was requested
1249 1249 self.streamclonerequested = streamclonerequested
1250 1250 # transaction manager
1251 1251 self.trmanager = None
1252 1252 # set of common changeset between local and remote before pull
1253 1253 self.common = None
1254 1254 # set of pulled head
1255 1255 self.rheads = None
1256 1256 # list of missing changeset to fetch remotely
1257 1257 self.fetch = None
1258 1258 # remote bookmarks data
1259 1259 self.remotebookmarks = remotebookmarks
1260 1260 # result of changegroup pulling (used as return code by pull)
1261 1261 self.cgresult = None
1262 1262 # list of step already done
1263 1263 self.stepsdone = set()
1264 1264 # Whether we attempted a clone from pre-generated bundles.
1265 1265 self.clonebundleattempted = False
1266 1266
1267 1267 @util.propertycache
1268 1268 def pulledsubset(self):
1269 1269 """heads of the set of changeset target by the pull"""
1270 1270 # compute target subset
1271 1271 if self.heads is None:
1272 1272 # We pulled every thing possible
1273 1273 # sync on everything common
1274 1274 c = set(self.common)
1275 1275 ret = list(self.common)
1276 1276 for n in self.rheads:
1277 1277 if n not in c:
1278 1278 ret.append(n)
1279 1279 return ret
1280 1280 else:
1281 1281 # We pulled a specific subset
1282 1282 # sync on this subset
1283 1283 return self.heads
1284 1284
1285 1285 @util.propertycache
1286 1286 def canusebundle2(self):
1287 1287 return not _forcebundle1(self)
1288 1288
1289 1289 @util.propertycache
1290 1290 def remotebundle2caps(self):
1291 1291 return bundle2.bundle2caps(self.remote)
1292 1292
1293 1293 def gettransaction(self):
1294 1294 # deprecated; talk to trmanager directly
1295 1295 return self.trmanager.transaction()
1296 1296
1297 1297 class transactionmanager(util.transactional):
1298 1298 """An object to manage the life cycle of a transaction
1299 1299
1300 1300 It creates the transaction on demand and calls the appropriate hooks when
1301 1301 closing the transaction."""
1302 1302 def __init__(self, repo, source, url):
1303 1303 self.repo = repo
1304 1304 self.source = source
1305 1305 self.url = url
1306 1306 self._tr = None
1307 1307
1308 1308 def transaction(self):
1309 1309 """Return an open transaction object, constructing if necessary"""
1310 1310 if not self._tr:
1311 1311 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1312 1312 self._tr = self.repo.transaction(trname)
1313 1313 self._tr.hookargs['source'] = self.source
1314 1314 self._tr.hookargs['url'] = self.url
1315 1315 return self._tr
1316 1316
1317 1317 def close(self):
1318 1318 """close transaction if created"""
1319 1319 if self._tr is not None:
1320 1320 self._tr.close()
1321 1321
1322 1322 def release(self):
1323 1323 """release transaction if created"""
1324 1324 if self._tr is not None:
1325 1325 self._tr.release()
1326 1326
1327 1327 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1328 1328 streamclonerequested=None):
1329 1329 """Fetch repository data from a remote.
1330 1330
1331 1331 This is the main function used to retrieve data from a remote repository.
1332 1332
1333 1333 ``repo`` is the local repository to clone into.
1334 1334 ``remote`` is a peer instance.
1335 1335 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1336 1336 default) means to pull everything from the remote.
1337 1337 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1338 1338 default, all remote bookmarks are pulled.
1339 1339 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1340 1340 initialization.
1341 1341 ``streamclonerequested`` is a boolean indicating whether a "streaming
1342 1342 clone" is requested. A "streaming clone" is essentially a raw file copy
1343 1343 of revlogs from the server. This only works when the local repository is
1344 1344 empty. The default value of ``None`` means to respect the server
1345 1345 configuration for preferring stream clones.
1346 1346
1347 1347 Returns the ``pulloperation`` created for this pull.
1348 1348 """
1349 1349 if opargs is None:
1350 1350 opargs = {}
1351 1351 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1352 1352 streamclonerequested=streamclonerequested,
1353 1353 **pycompat.strkwargs(opargs))
1354 1354
1355 1355 peerlocal = pullop.remote.local()
1356 1356 if peerlocal:
1357 1357 missing = set(peerlocal.requirements) - pullop.repo.supported
1358 1358 if missing:
1359 1359 msg = _("required features are not"
1360 1360 " supported in the destination:"
1361 1361 " %s") % (', '.join(sorted(missing)))
1362 1362 raise error.Abort(msg)
1363 1363
1364 1364 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1365 1365 with repo.wlock(), repo.lock(), pullop.trmanager:
1366 1366 # This should ideally be in _pullbundle2(). However, it needs to run
1367 1367 # before discovery to avoid extra work.
1368 1368 _maybeapplyclonebundle(pullop)
1369 1369 streamclone.maybeperformlegacystreamclone(pullop)
1370 1370 _pulldiscovery(pullop)
1371 1371 if pullop.canusebundle2:
1372 1372 _pullbundle2(pullop)
1373 1373 _pullchangeset(pullop)
1374 1374 _pullphase(pullop)
1375 1375 _pullbookmarks(pullop)
1376 1376 _pullobsolete(pullop)
1377 1377
1378 1378 # storing remotenames
1379 1379 if repo.ui.configbool('experimental', 'remotenames'):
1380 1380 logexchange.pullremotenames(repo, remote)
1381 1381
1382 1382 return pullop
1383 1383
1384 1384 # list of steps to perform discovery before pull
1385 1385 pulldiscoveryorder = []
1386 1386
1387 1387 # Mapping between step name and function
1388 1388 #
1389 1389 # This exists to help extensions wrap steps if necessary
1390 1390 pulldiscoverymapping = {}
1391 1391
1392 1392 def pulldiscovery(stepname):
1393 1393 """decorator for function performing discovery before pull
1394 1394
1395 1395 The function is added to the step -> function mapping and appended to the
1396 1396 list of steps. Beware that decorated function will be added in order (this
1397 1397 may matter).
1398 1398
1399 1399 You can only use this decorator for a new step, if you want to wrap a step
1400 1400 from an extension, change the pulldiscovery dictionary directly."""
1401 1401 def dec(func):
1402 1402 assert stepname not in pulldiscoverymapping
1403 1403 pulldiscoverymapping[stepname] = func
1404 1404 pulldiscoveryorder.append(stepname)
1405 1405 return func
1406 1406 return dec
1407 1407
1408 1408 def _pulldiscovery(pullop):
1409 1409 """Run all discovery steps"""
1410 1410 for stepname in pulldiscoveryorder:
1411 1411 step = pulldiscoverymapping[stepname]
1412 1412 step(pullop)
1413 1413
1414 1414 @pulldiscovery('b1:bookmarks')
1415 1415 def _pullbookmarkbundle1(pullop):
1416 1416 """fetch bookmark data in bundle1 case
1417 1417
1418 1418 If not using bundle2, we have to fetch bookmarks before changeset
1419 1419 discovery to reduce the chance and impact of race conditions."""
1420 1420 if pullop.remotebookmarks is not None:
1421 1421 return
1422 1422 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1423 1423 # all known bundle2 servers now support listkeys, but lets be nice with
1424 1424 # new implementation.
1425 1425 return
1426 1426 books = pullop.remote.listkeys('bookmarks')
1427 1427 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1428 1428
1429 1429
1430 1430 @pulldiscovery('changegroup')
1431 1431 def _pulldiscoverychangegroup(pullop):
1432 1432 """discovery phase for the pull
1433 1433
1434 1434 Current handle changeset discovery only, will change handle all discovery
1435 1435 at some point."""
1436 1436 tmp = discovery.findcommonincoming(pullop.repo,
1437 1437 pullop.remote,
1438 1438 heads=pullop.heads,
1439 1439 force=pullop.force)
1440 1440 common, fetch, rheads = tmp
1441 1441 nm = pullop.repo.unfiltered().changelog.nodemap
1442 1442 if fetch and rheads:
1443 1443 # If a remote heads is filtered locally, put in back in common.
1444 1444 #
1445 1445 # This is a hackish solution to catch most of "common but locally
1446 1446 # hidden situation". We do not performs discovery on unfiltered
1447 1447 # repository because it end up doing a pathological amount of round
1448 1448 # trip for w huge amount of changeset we do not care about.
1449 1449 #
1450 1450 # If a set of such "common but filtered" changeset exist on the server
1451 1451 # but are not including a remote heads, we'll not be able to detect it,
1452 1452 scommon = set(common)
1453 1453 for n in rheads:
1454 1454 if n in nm:
1455 1455 if n not in scommon:
1456 1456 common.append(n)
1457 1457 if set(rheads).issubset(set(common)):
1458 1458 fetch = []
1459 1459 pullop.common = common
1460 1460 pullop.fetch = fetch
1461 1461 pullop.rheads = rheads
1462 1462
1463 1463 def _pullbundle2(pullop):
1464 1464 """pull data using bundle2
1465 1465
1466 1466 For now, the only supported data are changegroup."""
1467 1467 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1468 1468
1469 1469 # make ui easier to access
1470 1470 ui = pullop.repo.ui
1471 1471
1472 1472 # At the moment we don't do stream clones over bundle2. If that is
1473 1473 # implemented then here's where the check for that will go.
1474 1474 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1475 1475
1476 1476 # declare pull perimeters
1477 1477 kwargs['common'] = pullop.common
1478 1478 kwargs['heads'] = pullop.heads or pullop.rheads
1479 1479
1480 1480 if streaming:
1481 1481 kwargs['cg'] = False
1482 1482 kwargs['stream'] = True
1483 1483 pullop.stepsdone.add('changegroup')
1484 1484 pullop.stepsdone.add('phases')
1485 1485
1486 1486 else:
1487 1487 # pulling changegroup
1488 1488 pullop.stepsdone.add('changegroup')
1489 1489
1490 1490 kwargs['cg'] = pullop.fetch
1491 1491
1492 1492 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1493 1493 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1494 1494 if (not legacyphase and hasbinaryphase):
1495 1495 kwargs['phases'] = True
1496 1496 pullop.stepsdone.add('phases')
1497 1497
1498 1498 if 'listkeys' in pullop.remotebundle2caps:
1499 1499 if 'phases' not in pullop.stepsdone:
1500 1500 kwargs['listkeys'] = ['phases']
1501 1501
1502 1502 bookmarksrequested = False
1503 1503 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1504 1504 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1505 1505
1506 1506 if pullop.remotebookmarks is not None:
1507 1507 pullop.stepsdone.add('request-bookmarks')
1508 1508
1509 1509 if ('request-bookmarks' not in pullop.stepsdone
1510 1510 and pullop.remotebookmarks is None
1511 1511 and not legacybookmark and hasbinarybook):
1512 1512 kwargs['bookmarks'] = True
1513 1513 bookmarksrequested = True
1514 1514
1515 1515 if 'listkeys' in pullop.remotebundle2caps:
1516 1516 if 'request-bookmarks' not in pullop.stepsdone:
1517 1517 # make sure to always includes bookmark data when migrating
1518 1518 # `hg incoming --bundle` to using this function.
1519 1519 pullop.stepsdone.add('request-bookmarks')
1520 1520 kwargs.setdefault('listkeys', []).append('bookmarks')
1521 1521
1522 1522 # If this is a full pull / clone and the server supports the clone bundles
1523 1523 # feature, tell the server whether we attempted a clone bundle. The
1524 1524 # presence of this flag indicates the client supports clone bundles. This
1525 1525 # will enable the server to treat clients that support clone bundles
1526 1526 # differently from those that don't.
1527 1527 if (pullop.remote.capable('clonebundles')
1528 1528 and pullop.heads is None and list(pullop.common) == [nullid]):
1529 1529 kwargs['cbattempted'] = pullop.clonebundleattempted
1530 1530
1531 1531 if streaming:
1532 1532 pullop.repo.ui.status(_('streaming all changes\n'))
1533 1533 elif not pullop.fetch:
1534 1534 pullop.repo.ui.status(_("no changes found\n"))
1535 1535 pullop.cgresult = 0
1536 1536 else:
1537 1537 if pullop.heads is None and list(pullop.common) == [nullid]:
1538 1538 pullop.repo.ui.status(_("requesting all changes\n"))
1539 1539 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1540 1540 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1541 1541 if obsolete.commonversion(remoteversions) is not None:
1542 1542 kwargs['obsmarkers'] = True
1543 1543 pullop.stepsdone.add('obsmarkers')
1544 1544 _pullbundle2extraprepare(pullop, kwargs)
1545 1545 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1546 1546 try:
1547 1547 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1548 1548 op.modes['bookmarks'] = 'records'
1549 1549 bundle2.processbundle(pullop.repo, bundle, op=op)
1550 1550 except bundle2.AbortFromPart as exc:
1551 1551 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1552 1552 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1553 1553 except error.BundleValueError as exc:
1554 1554 raise error.Abort(_('missing support for %s') % exc)
1555 1555
1556 1556 if pullop.fetch:
1557 1557 pullop.cgresult = bundle2.combinechangegroupresults(op)
1558 1558
1559 1559 # processing phases change
1560 1560 for namespace, value in op.records['listkeys']:
1561 1561 if namespace == 'phases':
1562 1562 _pullapplyphases(pullop, value)
1563 1563
1564 1564 # processing bookmark update
1565 1565 if bookmarksrequested:
1566 1566 books = {}
1567 1567 for record in op.records['bookmarks']:
1568 1568 books[record['bookmark']] = record["node"]
1569 1569 pullop.remotebookmarks = books
1570 1570 else:
1571 1571 for namespace, value in op.records['listkeys']:
1572 1572 if namespace == 'bookmarks':
1573 1573 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1574 1574
1575 1575 # bookmark data were either already there or pulled in the bundle
1576 1576 if pullop.remotebookmarks is not None:
1577 1577 _pullbookmarks(pullop)
1578 1578
1579 1579 def _pullbundle2extraprepare(pullop, kwargs):
1580 1580 """hook function so that extensions can extend the getbundle call"""
1581 1581
1582 1582 def _pullchangeset(pullop):
1583 1583 """pull changeset from unbundle into the local repo"""
1584 1584 # We delay the open of the transaction as late as possible so we
1585 1585 # don't open transaction for nothing or you break future useful
1586 1586 # rollback call
1587 1587 if 'changegroup' in pullop.stepsdone:
1588 1588 return
1589 1589 pullop.stepsdone.add('changegroup')
1590 1590 if not pullop.fetch:
1591 1591 pullop.repo.ui.status(_("no changes found\n"))
1592 1592 pullop.cgresult = 0
1593 1593 return
1594 1594 tr = pullop.gettransaction()
1595 1595 if pullop.heads is None and list(pullop.common) == [nullid]:
1596 1596 pullop.repo.ui.status(_("requesting all changes\n"))
1597 1597 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1598 1598 # issue1320, avoid a race if remote changed after discovery
1599 1599 pullop.heads = pullop.rheads
1600 1600
1601 1601 if pullop.remote.capable('getbundle'):
1602 1602 # TODO: get bundlecaps from remote
1603 1603 cg = pullop.remote.getbundle('pull', common=pullop.common,
1604 1604 heads=pullop.heads or pullop.rheads)
1605 1605 elif pullop.heads is None:
1606 1606 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1607 1607 elif not pullop.remote.capable('changegroupsubset'):
1608 1608 raise error.Abort(_("partial pull cannot be done because "
1609 1609 "other repository doesn't support "
1610 1610 "changegroupsubset."))
1611 1611 else:
1612 1612 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1613 1613 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1614 1614 pullop.remote.url())
1615 1615 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1616 1616
1617 1617 def _pullphase(pullop):
1618 1618 # Get remote phases data from remote
1619 1619 if 'phases' in pullop.stepsdone:
1620 1620 return
1621 1621 remotephases = pullop.remote.listkeys('phases')
1622 1622 _pullapplyphases(pullop, remotephases)
1623 1623
1624 1624 def _pullapplyphases(pullop, remotephases):
1625 1625 """apply phase movement from observed remote state"""
1626 1626 if 'phases' in pullop.stepsdone:
1627 1627 return
1628 1628 pullop.stepsdone.add('phases')
1629 1629 publishing = bool(remotephases.get('publishing', False))
1630 1630 if remotephases and not publishing:
1631 1631 # remote is new and non-publishing
1632 1632 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1633 1633 pullop.pulledsubset,
1634 1634 remotephases)
1635 1635 dheads = pullop.pulledsubset
1636 1636 else:
1637 1637 # Remote is old or publishing all common changesets
1638 1638 # should be seen as public
1639 1639 pheads = pullop.pulledsubset
1640 1640 dheads = []
1641 1641 unfi = pullop.repo.unfiltered()
1642 1642 phase = unfi._phasecache.phase
1643 1643 rev = unfi.changelog.nodemap.get
1644 1644 public = phases.public
1645 1645 draft = phases.draft
1646 1646
1647 1647 # exclude changesets already public locally and update the others
1648 1648 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1649 1649 if pheads:
1650 1650 tr = pullop.gettransaction()
1651 1651 phases.advanceboundary(pullop.repo, tr, public, pheads)
1652 1652
1653 1653 # exclude changesets already draft locally and update the others
1654 1654 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1655 1655 if dheads:
1656 1656 tr = pullop.gettransaction()
1657 1657 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1658 1658
1659 1659 def _pullbookmarks(pullop):
1660 1660 """process the remote bookmark information to update the local one"""
1661 1661 if 'bookmarks' in pullop.stepsdone:
1662 1662 return
1663 1663 pullop.stepsdone.add('bookmarks')
1664 1664 repo = pullop.repo
1665 1665 remotebookmarks = pullop.remotebookmarks
1666 1666 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1667 1667 pullop.remote.url(),
1668 1668 pullop.gettransaction,
1669 1669 explicit=pullop.explicitbookmarks)
1670 1670
1671 1671 def _pullobsolete(pullop):
1672 1672 """utility function to pull obsolete markers from a remote
1673 1673
1674 1674 The `gettransaction` is function that return the pull transaction, creating
1675 1675 one if necessary. We return the transaction to inform the calling code that
1676 1676 a new transaction have been created (when applicable).
1677 1677
1678 1678 Exists mostly to allow overriding for experimentation purpose"""
1679 1679 if 'obsmarkers' in pullop.stepsdone:
1680 1680 return
1681 1681 pullop.stepsdone.add('obsmarkers')
1682 1682 tr = None
1683 1683 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1684 1684 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1685 1685 remoteobs = pullop.remote.listkeys('obsolete')
1686 1686 if 'dump0' in remoteobs:
1687 1687 tr = pullop.gettransaction()
1688 1688 markers = []
1689 1689 for key in sorted(remoteobs, reverse=True):
1690 1690 if key.startswith('dump'):
1691 1691 data = util.b85decode(remoteobs[key])
1692 1692 version, newmarks = obsolete._readmarkers(data)
1693 1693 markers += newmarks
1694 1694 if markers:
1695 1695 pullop.repo.obsstore.add(tr, markers)
1696 1696 pullop.repo.invalidatevolatilesets()
1697 1697 return tr
1698 1698
1699 1699 def caps20to10(repo, role):
1700 1700 """return a set with appropriate options to use bundle20 during getbundle"""
1701 1701 caps = {'HG20'}
1702 1702 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1703 1703 caps.add('bundle2=' + urlreq.quote(capsblob))
1704 1704 return caps
1705 1705
1706 1706 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1707 1707 getbundle2partsorder = []
1708 1708
1709 1709 # Mapping between step name and function
1710 1710 #
1711 1711 # This exists to help extensions wrap steps if necessary
1712 1712 getbundle2partsmapping = {}
1713 1713
1714 1714 def getbundle2partsgenerator(stepname, idx=None):
1715 1715 """decorator for function generating bundle2 part for getbundle
1716 1716
1717 1717 The function is added to the step -> function mapping and appended to the
1718 1718 list of steps. Beware that decorated functions will be added in order
1719 1719 (this may matter).
1720 1720
1721 1721 You can only use this decorator for new steps, if you want to wrap a step
1722 1722 from an extension, attack the getbundle2partsmapping dictionary directly."""
1723 1723 def dec(func):
1724 1724 assert stepname not in getbundle2partsmapping
1725 1725 getbundle2partsmapping[stepname] = func
1726 1726 if idx is None:
1727 1727 getbundle2partsorder.append(stepname)
1728 1728 else:
1729 1729 getbundle2partsorder.insert(idx, stepname)
1730 1730 return func
1731 1731 return dec
1732 1732
1733 1733 def bundle2requested(bundlecaps):
1734 1734 if bundlecaps is not None:
1735 1735 return any(cap.startswith('HG2') for cap in bundlecaps)
1736 1736 return False
1737 1737
1738 1738 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1739 1739 **kwargs):
1740 1740 """Return chunks constituting a bundle's raw data.
1741 1741
1742 1742 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1743 1743 passed.
1744 1744
1745 1745 Returns a 2-tuple of a dict with metadata about the generated bundle
1746 1746 and an iterator over raw chunks (of varying sizes).
1747 1747 """
1748 1748 kwargs = pycompat.byteskwargs(kwargs)
1749 1749 info = {}
1750 1750 usebundle2 = bundle2requested(bundlecaps)
1751 1751 # bundle10 case
1752 1752 if not usebundle2:
1753 1753 if bundlecaps and not kwargs.get('cg', True):
1754 1754 raise ValueError(_('request for bundle10 must include changegroup'))
1755 1755
1756 1756 if kwargs:
1757 1757 raise ValueError(_('unsupported getbundle arguments: %s')
1758 1758 % ', '.join(sorted(kwargs.keys())))
1759 1759 outgoing = _computeoutgoing(repo, heads, common)
1760 1760 info['bundleversion'] = 1
1761 1761 return info, changegroup.makestream(repo, outgoing, '01', source,
1762 1762 bundlecaps=bundlecaps)
1763 1763
1764 1764 # bundle20 case
1765 1765 info['bundleversion'] = 2
1766 1766 b2caps = {}
1767 1767 for bcaps in bundlecaps:
1768 1768 if bcaps.startswith('bundle2='):
1769 1769 blob = urlreq.unquote(bcaps[len('bundle2='):])
1770 1770 b2caps.update(bundle2.decodecaps(blob))
1771 1771 bundler = bundle2.bundle20(repo.ui, b2caps)
1772 1772
1773 1773 kwargs['heads'] = heads
1774 1774 kwargs['common'] = common
1775 1775
1776 1776 for name in getbundle2partsorder:
1777 1777 func = getbundle2partsmapping[name]
1778 1778 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1779 1779 **pycompat.strkwargs(kwargs))
1780 1780
1781 1781 info['prefercompressed'] = bundler.prefercompressed
1782 1782
1783 1783 return info, bundler.getchunks()
1784 1784
1785 1785 @getbundle2partsgenerator('stream2')
1786 1786 def _getbundlestream2(bundler, repo, source, bundlecaps=None,
1787 1787 b2caps=None, heads=None, common=None, **kwargs):
1788 1788 if not kwargs.get('stream', False):
1789 1789 return
1790 1790
1791 1791 if not streamclone.allowservergeneration(repo):
1792 1792 raise error.Abort(_('stream data requested but server does not allow '
1793 1793 'this feature'),
1794 1794 hint=_('well-behaved clients should not be '
1795 1795 'requesting stream data from servers not '
1796 1796 'advertising it; the client may be buggy'))
1797 1797
1798 1798 # Stream clones don't compress well. And compression undermines a
1799 1799 # goal of stream clones, which is to be fast. Communicate the desire
1800 1800 # to avoid compression to consumers of the bundle.
1801 1801 bundler.prefercompressed = False
1802 1802
1803 1803 filecount, bytecount, it = streamclone.generatev2(repo)
1804 1804 requirements = _formatrequirementsspec(repo.requirements)
1805 1805 part = bundler.newpart('stream2', data=it)
1806 1806 part.addparam('bytecount', '%d' % bytecount, mandatory=True)
1807 1807 part.addparam('filecount', '%d' % filecount, mandatory=True)
1808 1808 part.addparam('requirements', requirements, mandatory=True)
1809 1809
1810 1810 @getbundle2partsgenerator('changegroup')
1811 1811 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1812 1812 b2caps=None, heads=None, common=None, **kwargs):
1813 1813 """add a changegroup part to the requested bundle"""
1814 1814 cgstream = None
1815 1815 if kwargs.get(r'cg', True):
1816 1816 # build changegroup bundle here.
1817 1817 version = '01'
1818 1818 cgversions = b2caps.get('changegroup')
1819 1819 if cgversions: # 3.1 and 3.2 ship with an empty value
1820 1820 cgversions = [v for v in cgversions
1821 1821 if v in changegroup.supportedoutgoingversions(repo)]
1822 1822 if not cgversions:
1823 1823 raise ValueError(_('no common changegroup version'))
1824 1824 version = max(cgversions)
1825 1825 outgoing = _computeoutgoing(repo, heads, common)
1826 1826 if outgoing.missing:
1827 1827 cgstream = changegroup.makestream(repo, outgoing, version, source,
1828 1828 bundlecaps=bundlecaps)
1829 1829
1830 1830 if cgstream:
1831 1831 part = bundler.newpart('changegroup', data=cgstream)
1832 1832 if cgversions:
1833 1833 part.addparam('version', version)
1834 1834 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1835 1835 mandatory=False)
1836 1836 if 'treemanifest' in repo.requirements:
1837 1837 part.addparam('treemanifest', '1')
1838 1838
1839 1839 @getbundle2partsgenerator('bookmarks')
1840 1840 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1841 1841 b2caps=None, **kwargs):
1842 1842 """add a bookmark part to the requested bundle"""
1843 1843 if not kwargs.get(r'bookmarks', False):
1844 1844 return
1845 1845 if 'bookmarks' not in b2caps:
1846 1846 raise ValueError(_('no common bookmarks exchange method'))
1847 1847 books = bookmod.listbinbookmarks(repo)
1848 1848 data = bookmod.binaryencode(books)
1849 1849 if data:
1850 1850 bundler.newpart('bookmarks', data=data)
1851 1851
1852 1852 @getbundle2partsgenerator('listkeys')
1853 1853 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1854 1854 b2caps=None, **kwargs):
1855 1855 """add parts containing listkeys namespaces to the requested bundle"""
1856 1856 listkeys = kwargs.get(r'listkeys', ())
1857 1857 for namespace in listkeys:
1858 1858 part = bundler.newpart('listkeys')
1859 1859 part.addparam('namespace', namespace)
1860 1860 keys = repo.listkeys(namespace).items()
1861 1861 part.data = pushkey.encodekeys(keys)
1862 1862
1863 1863 @getbundle2partsgenerator('obsmarkers')
1864 1864 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1865 1865 b2caps=None, heads=None, **kwargs):
1866 1866 """add an obsolescence markers part to the requested bundle"""
1867 1867 if kwargs.get(r'obsmarkers', False):
1868 1868 if heads is None:
1869 1869 heads = repo.heads()
1870 1870 subset = [c.node() for c in repo.set('::%ln', heads)]
1871 1871 markers = repo.obsstore.relevantmarkers(subset)
1872 1872 markers = sorted(markers)
1873 1873 bundle2.buildobsmarkerspart(bundler, markers)
1874 1874
1875 1875 @getbundle2partsgenerator('phases')
1876 1876 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1877 1877 b2caps=None, heads=None, **kwargs):
1878 1878 """add phase heads part to the requested bundle"""
1879 1879 if kwargs.get(r'phases', False):
1880 1880 if not 'heads' in b2caps.get('phases'):
1881 1881 raise ValueError(_('no common phases exchange method'))
1882 1882 if heads is None:
1883 1883 heads = repo.heads()
1884 1884
1885 1885 headsbyphase = collections.defaultdict(set)
1886 1886 if repo.publishing():
1887 1887 headsbyphase[phases.public] = heads
1888 1888 else:
1889 1889 # find the appropriate heads to move
1890 1890
1891 1891 phase = repo._phasecache.phase
1892 1892 node = repo.changelog.node
1893 1893 rev = repo.changelog.rev
1894 1894 for h in heads:
1895 1895 headsbyphase[phase(repo, rev(h))].add(h)
1896 1896 seenphases = list(headsbyphase.keys())
1897 1897
1898 1898 # We do not handle anything but public and draft phase for now)
1899 1899 if seenphases:
1900 1900 assert max(seenphases) <= phases.draft
1901 1901
1902 1902 # if client is pulling non-public changesets, we need to find
1903 1903 # intermediate public heads.
1904 1904 draftheads = headsbyphase.get(phases.draft, set())
1905 1905 if draftheads:
1906 1906 publicheads = headsbyphase.get(phases.public, set())
1907 1907
1908 1908 revset = 'heads(only(%ln, %ln) and public())'
1909 1909 extraheads = repo.revs(revset, draftheads, publicheads)
1910 1910 for r in extraheads:
1911 1911 headsbyphase[phases.public].add(node(r))
1912 1912
1913 1913 # transform data in a format used by the encoding function
1914 1914 phasemapping = []
1915 1915 for phase in phases.allphases:
1916 1916 phasemapping.append(sorted(headsbyphase[phase]))
1917 1917
1918 1918 # generate the actual part
1919 1919 phasedata = phases.binaryencode(phasemapping)
1920 1920 bundler.newpart('phase-heads', data=phasedata)
1921 1921
1922 1922 @getbundle2partsgenerator('hgtagsfnodes')
1923 1923 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1924 1924 b2caps=None, heads=None, common=None,
1925 1925 **kwargs):
1926 1926 """Transfer the .hgtags filenodes mapping.
1927 1927
1928 1928 Only values for heads in this bundle will be transferred.
1929 1929
1930 1930 The part data consists of pairs of 20 byte changeset node and .hgtags
1931 1931 filenodes raw values.
1932 1932 """
1933 1933 # Don't send unless:
1934 1934 # - changeset are being exchanged,
1935 1935 # - the client supports it.
1936 1936 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
1937 1937 return
1938 1938
1939 1939 outgoing = _computeoutgoing(repo, heads, common)
1940 1940 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1941 1941
1942 @getbundle2partsgenerator('cache:rev-branch-cache')
1943 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
1944 b2caps=None, heads=None, common=None,
1945 **kwargs):
1946 """Transfer the rev-branch-cache mapping
1947
1948 The payload is a series of data related to each branch
1949
1950 1) branch name length
1951 2) number of open heads
1952 3) number of closed heads
1953 4) open heads nodes
1954 5) closed heads nodes
1955 """
1956 # Don't send unless:
1957 # - changeset are being exchanged,
1958 # - the client supports it.
1959 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
1960 return
1961 outgoing = _computeoutgoing(repo, heads, common)
1962 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
1963
1942 1964 def check_heads(repo, their_heads, context):
1943 1965 """check if the heads of a repo have been modified
1944 1966
1945 1967 Used by peer for unbundling.
1946 1968 """
1947 1969 heads = repo.heads()
1948 1970 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1949 1971 if not (their_heads == ['force'] or their_heads == heads or
1950 1972 their_heads == ['hashed', heads_hash]):
1951 1973 # someone else committed/pushed/unbundled while we
1952 1974 # were transferring data
1953 1975 raise error.PushRaced('repository changed while %s - '
1954 1976 'please try again' % context)
1955 1977
1956 1978 def unbundle(repo, cg, heads, source, url):
1957 1979 """Apply a bundle to a repo.
1958 1980
1959 1981 this function makes sure the repo is locked during the application and have
1960 1982 mechanism to check that no push race occurred between the creation of the
1961 1983 bundle and its application.
1962 1984
1963 1985 If the push was raced as PushRaced exception is raised."""
1964 1986 r = 0
1965 1987 # need a transaction when processing a bundle2 stream
1966 1988 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1967 1989 lockandtr = [None, None, None]
1968 1990 recordout = None
1969 1991 # quick fix for output mismatch with bundle2 in 3.4
1970 1992 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1971 1993 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1972 1994 captureoutput = True
1973 1995 try:
1974 1996 # note: outside bundle1, 'heads' is expected to be empty and this
1975 1997 # 'check_heads' call wil be a no-op
1976 1998 check_heads(repo, heads, 'uploading changes')
1977 1999 # push can proceed
1978 2000 if not isinstance(cg, bundle2.unbundle20):
1979 2001 # legacy case: bundle1 (changegroup 01)
1980 2002 txnname = "\n".join([source, util.hidepassword(url)])
1981 2003 with repo.lock(), repo.transaction(txnname) as tr:
1982 2004 op = bundle2.applybundle(repo, cg, tr, source, url)
1983 2005 r = bundle2.combinechangegroupresults(op)
1984 2006 else:
1985 2007 r = None
1986 2008 try:
1987 2009 def gettransaction():
1988 2010 if not lockandtr[2]:
1989 2011 lockandtr[0] = repo.wlock()
1990 2012 lockandtr[1] = repo.lock()
1991 2013 lockandtr[2] = repo.transaction(source)
1992 2014 lockandtr[2].hookargs['source'] = source
1993 2015 lockandtr[2].hookargs['url'] = url
1994 2016 lockandtr[2].hookargs['bundle2'] = '1'
1995 2017 return lockandtr[2]
1996 2018
1997 2019 # Do greedy locking by default until we're satisfied with lazy
1998 2020 # locking.
1999 2021 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2000 2022 gettransaction()
2001 2023
2002 2024 op = bundle2.bundleoperation(repo, gettransaction,
2003 2025 captureoutput=captureoutput)
2004 2026 try:
2005 2027 op = bundle2.processbundle(repo, cg, op=op)
2006 2028 finally:
2007 2029 r = op.reply
2008 2030 if captureoutput and r is not None:
2009 2031 repo.ui.pushbuffer(error=True, subproc=True)
2010 2032 def recordout(output):
2011 2033 r.newpart('output', data=output, mandatory=False)
2012 2034 if lockandtr[2] is not None:
2013 2035 lockandtr[2].close()
2014 2036 except BaseException as exc:
2015 2037 exc.duringunbundle2 = True
2016 2038 if captureoutput and r is not None:
2017 2039 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2018 2040 def recordout(output):
2019 2041 part = bundle2.bundlepart('output', data=output,
2020 2042 mandatory=False)
2021 2043 parts.append(part)
2022 2044 raise
2023 2045 finally:
2024 2046 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2025 2047 if recordout is not None:
2026 2048 recordout(repo.ui.popbuffer())
2027 2049 return r
2028 2050
2029 2051 def _maybeapplyclonebundle(pullop):
2030 2052 """Apply a clone bundle from a remote, if possible."""
2031 2053
2032 2054 repo = pullop.repo
2033 2055 remote = pullop.remote
2034 2056
2035 2057 if not repo.ui.configbool('ui', 'clonebundles'):
2036 2058 return
2037 2059
2038 2060 # Only run if local repo is empty.
2039 2061 if len(repo):
2040 2062 return
2041 2063
2042 2064 if pullop.heads:
2043 2065 return
2044 2066
2045 2067 if not remote.capable('clonebundles'):
2046 2068 return
2047 2069
2048 2070 res = remote._call('clonebundles')
2049 2071
2050 2072 # If we call the wire protocol command, that's good enough to record the
2051 2073 # attempt.
2052 2074 pullop.clonebundleattempted = True
2053 2075
2054 2076 entries = parseclonebundlesmanifest(repo, res)
2055 2077 if not entries:
2056 2078 repo.ui.note(_('no clone bundles available on remote; '
2057 2079 'falling back to regular clone\n'))
2058 2080 return
2059 2081
2060 2082 entries = filterclonebundleentries(
2061 2083 repo, entries, streamclonerequested=pullop.streamclonerequested)
2062 2084
2063 2085 if not entries:
2064 2086 # There is a thundering herd concern here. However, if a server
2065 2087 # operator doesn't advertise bundles appropriate for its clients,
2066 2088 # they deserve what's coming. Furthermore, from a client's
2067 2089 # perspective, no automatic fallback would mean not being able to
2068 2090 # clone!
2069 2091 repo.ui.warn(_('no compatible clone bundles available on server; '
2070 2092 'falling back to regular clone\n'))
2071 2093 repo.ui.warn(_('(you may want to report this to the server '
2072 2094 'operator)\n'))
2073 2095 return
2074 2096
2075 2097 entries = sortclonebundleentries(repo.ui, entries)
2076 2098
2077 2099 url = entries[0]['URL']
2078 2100 repo.ui.status(_('applying clone bundle from %s\n') % url)
2079 2101 if trypullbundlefromurl(repo.ui, repo, url):
2080 2102 repo.ui.status(_('finished applying clone bundle\n'))
2081 2103 # Bundle failed.
2082 2104 #
2083 2105 # We abort by default to avoid the thundering herd of
2084 2106 # clients flooding a server that was expecting expensive
2085 2107 # clone load to be offloaded.
2086 2108 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2087 2109 repo.ui.warn(_('falling back to normal clone\n'))
2088 2110 else:
2089 2111 raise error.Abort(_('error applying bundle'),
2090 2112 hint=_('if this error persists, consider contacting '
2091 2113 'the server operator or disable clone '
2092 2114 'bundles via '
2093 2115 '"--config ui.clonebundles=false"'))
2094 2116
2095 2117 def parseclonebundlesmanifest(repo, s):
2096 2118 """Parses the raw text of a clone bundles manifest.
2097 2119
2098 2120 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2099 2121 to the URL and other keys are the attributes for the entry.
2100 2122 """
2101 2123 m = []
2102 2124 for line in s.splitlines():
2103 2125 fields = line.split()
2104 2126 if not fields:
2105 2127 continue
2106 2128 attrs = {'URL': fields[0]}
2107 2129 for rawattr in fields[1:]:
2108 2130 key, value = rawattr.split('=', 1)
2109 2131 key = urlreq.unquote(key)
2110 2132 value = urlreq.unquote(value)
2111 2133 attrs[key] = value
2112 2134
2113 2135 # Parse BUNDLESPEC into components. This makes client-side
2114 2136 # preferences easier to specify since you can prefer a single
2115 2137 # component of the BUNDLESPEC.
2116 2138 if key == 'BUNDLESPEC':
2117 2139 try:
2118 2140 comp, version, params = parsebundlespec(repo, value,
2119 2141 externalnames=True)
2120 2142 attrs['COMPRESSION'] = comp
2121 2143 attrs['VERSION'] = version
2122 2144 except error.InvalidBundleSpecification:
2123 2145 pass
2124 2146 except error.UnsupportedBundleSpecification:
2125 2147 pass
2126 2148
2127 2149 m.append(attrs)
2128 2150
2129 2151 return m
2130 2152
2131 2153 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2132 2154 """Remove incompatible clone bundle manifest entries.
2133 2155
2134 2156 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2135 2157 and returns a new list consisting of only the entries that this client
2136 2158 should be able to apply.
2137 2159
2138 2160 There is no guarantee we'll be able to apply all returned entries because
2139 2161 the metadata we use to filter on may be missing or wrong.
2140 2162 """
2141 2163 newentries = []
2142 2164 for entry in entries:
2143 2165 spec = entry.get('BUNDLESPEC')
2144 2166 if spec:
2145 2167 try:
2146 2168 comp, version, params = parsebundlespec(repo, spec, strict=True)
2147 2169
2148 2170 # If a stream clone was requested, filter out non-streamclone
2149 2171 # entries.
2150 2172 if streamclonerequested and (comp != 'UN' or version != 's1'):
2151 2173 repo.ui.debug('filtering %s because not a stream clone\n' %
2152 2174 entry['URL'])
2153 2175 continue
2154 2176
2155 2177 except error.InvalidBundleSpecification as e:
2156 2178 repo.ui.debug(str(e) + '\n')
2157 2179 continue
2158 2180 except error.UnsupportedBundleSpecification as e:
2159 2181 repo.ui.debug('filtering %s because unsupported bundle '
2160 2182 'spec: %s\n' % (
2161 2183 entry['URL'], util.forcebytestr(e)))
2162 2184 continue
2163 2185 # If we don't have a spec and requested a stream clone, we don't know
2164 2186 # what the entry is so don't attempt to apply it.
2165 2187 elif streamclonerequested:
2166 2188 repo.ui.debug('filtering %s because cannot determine if a stream '
2167 2189 'clone bundle\n' % entry['URL'])
2168 2190 continue
2169 2191
2170 2192 if 'REQUIRESNI' in entry and not sslutil.hassni:
2171 2193 repo.ui.debug('filtering %s because SNI not supported\n' %
2172 2194 entry['URL'])
2173 2195 continue
2174 2196
2175 2197 newentries.append(entry)
2176 2198
2177 2199 return newentries
2178 2200
2179 2201 class clonebundleentry(object):
2180 2202 """Represents an item in a clone bundles manifest.
2181 2203
2182 2204 This rich class is needed to support sorting since sorted() in Python 3
2183 2205 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2184 2206 won't work.
2185 2207 """
2186 2208
2187 2209 def __init__(self, value, prefers):
2188 2210 self.value = value
2189 2211 self.prefers = prefers
2190 2212
2191 2213 def _cmp(self, other):
2192 2214 for prefkey, prefvalue in self.prefers:
2193 2215 avalue = self.value.get(prefkey)
2194 2216 bvalue = other.value.get(prefkey)
2195 2217
2196 2218 # Special case for b missing attribute and a matches exactly.
2197 2219 if avalue is not None and bvalue is None and avalue == prefvalue:
2198 2220 return -1
2199 2221
2200 2222 # Special case for a missing attribute and b matches exactly.
2201 2223 if bvalue is not None and avalue is None and bvalue == prefvalue:
2202 2224 return 1
2203 2225
2204 2226 # We can't compare unless attribute present on both.
2205 2227 if avalue is None or bvalue is None:
2206 2228 continue
2207 2229
2208 2230 # Same values should fall back to next attribute.
2209 2231 if avalue == bvalue:
2210 2232 continue
2211 2233
2212 2234 # Exact matches come first.
2213 2235 if avalue == prefvalue:
2214 2236 return -1
2215 2237 if bvalue == prefvalue:
2216 2238 return 1
2217 2239
2218 2240 # Fall back to next attribute.
2219 2241 continue
2220 2242
2221 2243 # If we got here we couldn't sort by attributes and prefers. Fall
2222 2244 # back to index order.
2223 2245 return 0
2224 2246
2225 2247 def __lt__(self, other):
2226 2248 return self._cmp(other) < 0
2227 2249
2228 2250 def __gt__(self, other):
2229 2251 return self._cmp(other) > 0
2230 2252
2231 2253 def __eq__(self, other):
2232 2254 return self._cmp(other) == 0
2233 2255
2234 2256 def __le__(self, other):
2235 2257 return self._cmp(other) <= 0
2236 2258
2237 2259 def __ge__(self, other):
2238 2260 return self._cmp(other) >= 0
2239 2261
2240 2262 def __ne__(self, other):
2241 2263 return self._cmp(other) != 0
2242 2264
2243 2265 def sortclonebundleentries(ui, entries):
2244 2266 prefers = ui.configlist('ui', 'clonebundleprefers')
2245 2267 if not prefers:
2246 2268 return list(entries)
2247 2269
2248 2270 prefers = [p.split('=', 1) for p in prefers]
2249 2271
2250 2272 items = sorted(clonebundleentry(v, prefers) for v in entries)
2251 2273 return [i.value for i in items]
2252 2274
2253 2275 def trypullbundlefromurl(ui, repo, url):
2254 2276 """Attempt to apply a bundle from a URL."""
2255 2277 with repo.lock(), repo.transaction('bundleurl') as tr:
2256 2278 try:
2257 2279 fh = urlmod.open(ui, url)
2258 2280 cg = readbundle(ui, fh, 'stream')
2259 2281
2260 2282 if isinstance(cg, streamclone.streamcloneapplier):
2261 2283 cg.apply(repo)
2262 2284 else:
2263 2285 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2264 2286 return True
2265 2287 except urlerr.httperror as e:
2266 2288 ui.warn(_('HTTP error fetching bundle: %s\n') %
2267 2289 util.forcebytestr(e))
2268 2290 except urlerr.urlerror as e:
2269 2291 ui.warn(_('error fetching bundle: %s\n') %
2270 2292 util.forcebytestr(e.reason))
2271 2293
2272 2294 return False
General Comments 0
You need to be logged in to leave comments. Login now