##// END OF EJS Templates
bundlespec: add support for some variants...
Boris Feld -
r37185:a2b350d9 default
parent child Browse files
Show More
@@ -1,2310 +1,2326 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from .thirdparty import (
21 21 attr,
22 22 )
23 23 from . import (
24 24 bookmarks as bookmod,
25 25 bundle2,
26 26 changegroup,
27 27 discovery,
28 28 error,
29 29 lock as lockmod,
30 30 logexchange,
31 31 obsolete,
32 32 phases,
33 33 pushkey,
34 34 pycompat,
35 35 scmutil,
36 36 sslutil,
37 37 streamclone,
38 38 url as urlmod,
39 39 util,
40 40 )
41 41 from .utils import (
42 42 stringutil,
43 43 )
44 44
45 45 urlerr = util.urlerr
46 46 urlreq = util.urlreq
47 47
48 48 # Maps bundle version human names to changegroup versions.
49 49 _bundlespeccgversions = {'v1': '01',
50 50 'v2': '02',
51 51 'packed1': 's1',
52 52 'bundle2': '02', #legacy
53 53 }
54 54
55 55 # Maps bundle version with content opts to choose which part to bundle
56 56 _bundlespeccontentopts = {
57 57 'v1': {
58 58 'changegroup': True,
59 59 'cg.version': '01',
60 60 'obsolescence': False,
61 61 'phases': False,
62 62 'tagsfnodescache': False,
63 63 'revbranchcache': False
64 64 },
65 65 'v2': {
66 66 'changegroup': True,
67 67 'cg.version': '02',
68 68 'obsolescence': False,
69 69 'phases': False,
70 70 'tagsfnodescache': True,
71 71 'revbranchcache': True
72 72 },
73 73 'packed1' : {
74 74 'cg.version': 's1'
75 75 }
76 76 }
77 77 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
78 78
79 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
80 "tagsfnodescache": False,
81 "revbranchcache": False}}
82
79 83 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
80 84 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
81 85
82 86 @attr.s
83 87 class bundlespec(object):
84 88 compression = attr.ib()
85 89 version = attr.ib()
86 90 params = attr.ib()
87 91 contentopts = attr.ib()
88 92
89 93 def parsebundlespec(repo, spec, strict=True, externalnames=False):
90 94 """Parse a bundle string specification into parts.
91 95
92 96 Bundle specifications denote a well-defined bundle/exchange format.
93 97 The content of a given specification should not change over time in
94 98 order to ensure that bundles produced by a newer version of Mercurial are
95 99 readable from an older version.
96 100
97 101 The string currently has the form:
98 102
99 103 <compression>-<type>[;<parameter0>[;<parameter1>]]
100 104
101 105 Where <compression> is one of the supported compression formats
102 106 and <type> is (currently) a version string. A ";" can follow the type and
103 107 all text afterwards is interpreted as URI encoded, ";" delimited key=value
104 108 pairs.
105 109
106 110 If ``strict`` is True (the default) <compression> is required. Otherwise,
107 111 it is optional.
108 112
109 113 If ``externalnames`` is False (the default), the human-centric names will
110 114 be converted to their internal representation.
111 115
112 116 Returns a bundlespec object of (compression, version, parameters).
113 117 Compression will be ``None`` if not in strict mode and a compression isn't
114 118 defined.
115 119
116 120 An ``InvalidBundleSpecification`` is raised when the specification is
117 121 not syntactically well formed.
118 122
119 123 An ``UnsupportedBundleSpecification`` is raised when the compression or
120 124 bundle type/version is not recognized.
121 125
122 126 Note: this function will likely eventually return a more complex data
123 127 structure, including bundle2 part information.
124 128 """
125 129 def parseparams(s):
126 130 if ';' not in s:
127 131 return s, {}
128 132
129 133 params = {}
130 134 version, paramstr = s.split(';', 1)
131 135
132 136 for p in paramstr.split(';'):
133 137 if '=' not in p:
134 138 raise error.InvalidBundleSpecification(
135 139 _('invalid bundle specification: '
136 140 'missing "=" in parameter: %s') % p)
137 141
138 142 key, value = p.split('=', 1)
139 143 key = urlreq.unquote(key)
140 144 value = urlreq.unquote(value)
141 145 params[key] = value
142 146
143 147 return version, params
144 148
145 149
146 150 if strict and '-' not in spec:
147 151 raise error.InvalidBundleSpecification(
148 152 _('invalid bundle specification; '
149 153 'must be prefixed with compression: %s') % spec)
150 154
151 155 if '-' in spec:
152 156 compression, version = spec.split('-', 1)
153 157
154 158 if compression not in util.compengines.supportedbundlenames:
155 159 raise error.UnsupportedBundleSpecification(
156 160 _('%s compression is not supported') % compression)
157 161
158 162 version, params = parseparams(version)
159 163
160 164 if version not in _bundlespeccgversions:
161 165 raise error.UnsupportedBundleSpecification(
162 166 _('%s is not a recognized bundle version') % version)
163 167 else:
164 168 # Value could be just the compression or just the version, in which
165 169 # case some defaults are assumed (but only when not in strict mode).
166 170 assert not strict
167 171
168 172 spec, params = parseparams(spec)
169 173
170 174 if spec in util.compengines.supportedbundlenames:
171 175 compression = spec
172 176 version = 'v1'
173 177 # Generaldelta repos require v2.
174 178 if 'generaldelta' in repo.requirements:
175 179 version = 'v2'
176 180 # Modern compression engines require v2.
177 181 if compression not in _bundlespecv1compengines:
178 182 version = 'v2'
179 183 elif spec in _bundlespeccgversions:
180 184 if spec == 'packed1':
181 185 compression = 'none'
182 186 else:
183 187 compression = 'bzip2'
184 188 version = spec
185 189 else:
186 190 raise error.UnsupportedBundleSpecification(
187 191 _('%s is not a recognized bundle specification') % spec)
188 192
189 193 # Bundle version 1 only supports a known set of compression engines.
190 194 if version == 'v1' and compression not in _bundlespecv1compengines:
191 195 raise error.UnsupportedBundleSpecification(
192 196 _('compression engine %s is not supported on v1 bundles') %
193 197 compression)
194 198
195 199 # The specification for packed1 can optionally declare the data formats
196 200 # required to apply it. If we see this metadata, compare against what the
197 201 # repo supports and error if the bundle isn't compatible.
198 202 if version == 'packed1' and 'requirements' in params:
199 203 requirements = set(params['requirements'].split(','))
200 204 missingreqs = requirements - repo.supportedformats
201 205 if missingreqs:
202 206 raise error.UnsupportedBundleSpecification(
203 207 _('missing support for repository features: %s') %
204 208 ', '.join(sorted(missingreqs)))
205 209
206 210 # Compute contentopts based on the version
207 211 contentopts = _bundlespeccontentopts.get(version, {}).copy()
208 212
213 # Process the variants
214 if "stream" in params and params["stream"] == "v2":
215 variant = _bundlespecvariants["streamv2"]
216 contentopts.update(variant)
217
209 218 if not externalnames:
210 219 engine = util.compengines.forbundlename(compression)
211 220 compression = engine.bundletype()[1]
212 221 version = _bundlespeccgversions[version]
213 222
214 223 return bundlespec(compression, version, params, contentopts)
215 224
216 225 def readbundle(ui, fh, fname, vfs=None):
217 226 header = changegroup.readexactly(fh, 4)
218 227
219 228 alg = None
220 229 if not fname:
221 230 fname = "stream"
222 231 if not header.startswith('HG') and header.startswith('\0'):
223 232 fh = changegroup.headerlessfixup(fh, header)
224 233 header = "HG10"
225 234 alg = 'UN'
226 235 elif vfs:
227 236 fname = vfs.join(fname)
228 237
229 238 magic, version = header[0:2], header[2:4]
230 239
231 240 if magic != 'HG':
232 241 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
233 242 if version == '10':
234 243 if alg is None:
235 244 alg = changegroup.readexactly(fh, 2)
236 245 return changegroup.cg1unpacker(fh, alg)
237 246 elif version.startswith('2'):
238 247 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
239 248 elif version == 'S1':
240 249 return streamclone.streamcloneapplier(fh)
241 250 else:
242 251 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
243 252
244 253 def getbundlespec(ui, fh):
245 254 """Infer the bundlespec from a bundle file handle.
246 255
247 256 The input file handle is seeked and the original seek position is not
248 257 restored.
249 258 """
250 259 def speccompression(alg):
251 260 try:
252 261 return util.compengines.forbundletype(alg).bundletype()[0]
253 262 except KeyError:
254 263 return None
255 264
256 265 b = readbundle(ui, fh, None)
257 266 if isinstance(b, changegroup.cg1unpacker):
258 267 alg = b._type
259 268 if alg == '_truncatedBZ':
260 269 alg = 'BZ'
261 270 comp = speccompression(alg)
262 271 if not comp:
263 272 raise error.Abort(_('unknown compression algorithm: %s') % alg)
264 273 return '%s-v1' % comp
265 274 elif isinstance(b, bundle2.unbundle20):
266 275 if 'Compression' in b.params:
267 276 comp = speccompression(b.params['Compression'])
268 277 if not comp:
269 278 raise error.Abort(_('unknown compression algorithm: %s') % comp)
270 279 else:
271 280 comp = 'none'
272 281
273 282 version = None
274 283 for part in b.iterparts():
275 284 if part.type == 'changegroup':
276 285 version = part.params['version']
277 286 if version in ('01', '02'):
278 287 version = 'v2'
279 288 else:
280 289 raise error.Abort(_('changegroup version %s does not have '
281 290 'a known bundlespec') % version,
282 291 hint=_('try upgrading your Mercurial '
283 292 'client'))
293 elif part.type == 'stream2' and version is None:
294 # A stream2 part requires to be part of a v2 bundle
295 version = "v2"
296 requirements = urlreq.unquote(part.params['requirements'])
297 splitted = requirements.split()
298 params = bundle2._formatrequirementsparams(splitted)
299 return 'none-v2;stream=v2;%s' % params
284 300
285 301 if not version:
286 302 raise error.Abort(_('could not identify changegroup version in '
287 303 'bundle'))
288 304
289 305 return '%s-%s' % (comp, version)
290 306 elif isinstance(b, streamclone.streamcloneapplier):
291 307 requirements = streamclone.readbundle1header(fh)[2]
292 308 formatted = bundle2._formatrequirementsparams(requirements)
293 309 return 'none-packed1;%s' % formatted
294 310 else:
295 311 raise error.Abort(_('unknown bundle type: %s') % b)
296 312
297 313 def _computeoutgoing(repo, heads, common):
298 314 """Computes which revs are outgoing given a set of common
299 315 and a set of heads.
300 316
301 317 This is a separate function so extensions can have access to
302 318 the logic.
303 319
304 320 Returns a discovery.outgoing object.
305 321 """
306 322 cl = repo.changelog
307 323 if common:
308 324 hasnode = cl.hasnode
309 325 common = [n for n in common if hasnode(n)]
310 326 else:
311 327 common = [nullid]
312 328 if not heads:
313 329 heads = cl.heads()
314 330 return discovery.outgoing(repo, common, heads)
315 331
316 332 def _forcebundle1(op):
317 333 """return true if a pull/push must use bundle1
318 334
319 335 This function is used to allow testing of the older bundle version"""
320 336 ui = op.repo.ui
321 337 # The goal is this config is to allow developer to choose the bundle
322 338 # version used during exchanged. This is especially handy during test.
323 339 # Value is a list of bundle version to be picked from, highest version
324 340 # should be used.
325 341 #
326 342 # developer config: devel.legacy.exchange
327 343 exchange = ui.configlist('devel', 'legacy.exchange')
328 344 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
329 345 return forcebundle1 or not op.remote.capable('bundle2')
330 346
331 347 class pushoperation(object):
332 348 """A object that represent a single push operation
333 349
334 350 Its purpose is to carry push related state and very common operations.
335 351
336 352 A new pushoperation should be created at the beginning of each push and
337 353 discarded afterward.
338 354 """
339 355
340 356 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
341 357 bookmarks=(), pushvars=None):
342 358 # repo we push from
343 359 self.repo = repo
344 360 self.ui = repo.ui
345 361 # repo we push to
346 362 self.remote = remote
347 363 # force option provided
348 364 self.force = force
349 365 # revs to be pushed (None is "all")
350 366 self.revs = revs
351 367 # bookmark explicitly pushed
352 368 self.bookmarks = bookmarks
353 369 # allow push of new branch
354 370 self.newbranch = newbranch
355 371 # step already performed
356 372 # (used to check what steps have been already performed through bundle2)
357 373 self.stepsdone = set()
358 374 # Integer version of the changegroup push result
359 375 # - None means nothing to push
360 376 # - 0 means HTTP error
361 377 # - 1 means we pushed and remote head count is unchanged *or*
362 378 # we have outgoing changesets but refused to push
363 379 # - other values as described by addchangegroup()
364 380 self.cgresult = None
365 381 # Boolean value for the bookmark push
366 382 self.bkresult = None
367 383 # discover.outgoing object (contains common and outgoing data)
368 384 self.outgoing = None
369 385 # all remote topological heads before the push
370 386 self.remoteheads = None
371 387 # Details of the remote branch pre and post push
372 388 #
373 389 # mapping: {'branch': ([remoteheads],
374 390 # [newheads],
375 391 # [unsyncedheads],
376 392 # [discardedheads])}
377 393 # - branch: the branch name
378 394 # - remoteheads: the list of remote heads known locally
379 395 # None if the branch is new
380 396 # - newheads: the new remote heads (known locally) with outgoing pushed
381 397 # - unsyncedheads: the list of remote heads unknown locally.
382 398 # - discardedheads: the list of remote heads made obsolete by the push
383 399 self.pushbranchmap = None
384 400 # testable as a boolean indicating if any nodes are missing locally.
385 401 self.incoming = None
386 402 # summary of the remote phase situation
387 403 self.remotephases = None
388 404 # phases changes that must be pushed along side the changesets
389 405 self.outdatedphases = None
390 406 # phases changes that must be pushed if changeset push fails
391 407 self.fallbackoutdatedphases = None
392 408 # outgoing obsmarkers
393 409 self.outobsmarkers = set()
394 410 # outgoing bookmarks
395 411 self.outbookmarks = []
396 412 # transaction manager
397 413 self.trmanager = None
398 414 # map { pushkey partid -> callback handling failure}
399 415 # used to handle exception from mandatory pushkey part failure
400 416 self.pkfailcb = {}
401 417 # an iterable of pushvars or None
402 418 self.pushvars = pushvars
403 419
404 420 @util.propertycache
405 421 def futureheads(self):
406 422 """future remote heads if the changeset push succeeds"""
407 423 return self.outgoing.missingheads
408 424
409 425 @util.propertycache
410 426 def fallbackheads(self):
411 427 """future remote heads if the changeset push fails"""
412 428 if self.revs is None:
413 429 # not target to push, all common are relevant
414 430 return self.outgoing.commonheads
415 431 unfi = self.repo.unfiltered()
416 432 # I want cheads = heads(::missingheads and ::commonheads)
417 433 # (missingheads is revs with secret changeset filtered out)
418 434 #
419 435 # This can be expressed as:
420 436 # cheads = ( (missingheads and ::commonheads)
421 437 # + (commonheads and ::missingheads))"
422 438 # )
423 439 #
424 440 # while trying to push we already computed the following:
425 441 # common = (::commonheads)
426 442 # missing = ((commonheads::missingheads) - commonheads)
427 443 #
428 444 # We can pick:
429 445 # * missingheads part of common (::commonheads)
430 446 common = self.outgoing.common
431 447 nm = self.repo.changelog.nodemap
432 448 cheads = [node for node in self.revs if nm[node] in common]
433 449 # and
434 450 # * commonheads parents on missing
435 451 revset = unfi.set('%ln and parents(roots(%ln))',
436 452 self.outgoing.commonheads,
437 453 self.outgoing.missing)
438 454 cheads.extend(c.node() for c in revset)
439 455 return cheads
440 456
441 457 @property
442 458 def commonheads(self):
443 459 """set of all common heads after changeset bundle push"""
444 460 if self.cgresult:
445 461 return self.futureheads
446 462 else:
447 463 return self.fallbackheads
448 464
449 465 # mapping of message used when pushing bookmark
450 466 bookmsgmap = {'update': (_("updating bookmark %s\n"),
451 467 _('updating bookmark %s failed!\n')),
452 468 'export': (_("exporting bookmark %s\n"),
453 469 _('exporting bookmark %s failed!\n')),
454 470 'delete': (_("deleting remote bookmark %s\n"),
455 471 _('deleting remote bookmark %s failed!\n')),
456 472 }
457 473
458 474
459 475 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
460 476 opargs=None):
461 477 '''Push outgoing changesets (limited by revs) from a local
462 478 repository to remote. Return an integer:
463 479 - None means nothing to push
464 480 - 0 means HTTP error
465 481 - 1 means we pushed and remote head count is unchanged *or*
466 482 we have outgoing changesets but refused to push
467 483 - other values as described by addchangegroup()
468 484 '''
469 485 if opargs is None:
470 486 opargs = {}
471 487 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
472 488 **pycompat.strkwargs(opargs))
473 489 if pushop.remote.local():
474 490 missing = (set(pushop.repo.requirements)
475 491 - pushop.remote.local().supported)
476 492 if missing:
477 493 msg = _("required features are not"
478 494 " supported in the destination:"
479 495 " %s") % (', '.join(sorted(missing)))
480 496 raise error.Abort(msg)
481 497
482 498 if not pushop.remote.canpush():
483 499 raise error.Abort(_("destination does not support push"))
484 500
485 501 if not pushop.remote.capable('unbundle'):
486 502 raise error.Abort(_('cannot push: destination does not support the '
487 503 'unbundle wire protocol command'))
488 504
489 505 # get lock as we might write phase data
490 506 wlock = lock = None
491 507 try:
492 508 # bundle2 push may receive a reply bundle touching bookmarks or other
493 509 # things requiring the wlock. Take it now to ensure proper ordering.
494 510 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
495 511 if (not _forcebundle1(pushop)) and maypushback:
496 512 wlock = pushop.repo.wlock()
497 513 lock = pushop.repo.lock()
498 514 pushop.trmanager = transactionmanager(pushop.repo,
499 515 'push-response',
500 516 pushop.remote.url())
501 517 except IOError as err:
502 518 if err.errno != errno.EACCES:
503 519 raise
504 520 # source repo cannot be locked.
505 521 # We do not abort the push, but just disable the local phase
506 522 # synchronisation.
507 523 msg = 'cannot lock source repository: %s\n' % err
508 524 pushop.ui.debug(msg)
509 525
510 526 with wlock or util.nullcontextmanager(), \
511 527 lock or util.nullcontextmanager(), \
512 528 pushop.trmanager or util.nullcontextmanager():
513 529 pushop.repo.checkpush(pushop)
514 530 _pushdiscovery(pushop)
515 531 if not _forcebundle1(pushop):
516 532 _pushbundle2(pushop)
517 533 _pushchangeset(pushop)
518 534 _pushsyncphase(pushop)
519 535 _pushobsolete(pushop)
520 536 _pushbookmark(pushop)
521 537
522 538 return pushop
523 539
524 540 # list of steps to perform discovery before push
525 541 pushdiscoveryorder = []
526 542
527 543 # Mapping between step name and function
528 544 #
529 545 # This exists to help extensions wrap steps if necessary
530 546 pushdiscoverymapping = {}
531 547
532 548 def pushdiscovery(stepname):
533 549 """decorator for function performing discovery before push
534 550
535 551 The function is added to the step -> function mapping and appended to the
536 552 list of steps. Beware that decorated function will be added in order (this
537 553 may matter).
538 554
539 555 You can only use this decorator for a new step, if you want to wrap a step
540 556 from an extension, change the pushdiscovery dictionary directly."""
541 557 def dec(func):
542 558 assert stepname not in pushdiscoverymapping
543 559 pushdiscoverymapping[stepname] = func
544 560 pushdiscoveryorder.append(stepname)
545 561 return func
546 562 return dec
547 563
548 564 def _pushdiscovery(pushop):
549 565 """Run all discovery steps"""
550 566 for stepname in pushdiscoveryorder:
551 567 step = pushdiscoverymapping[stepname]
552 568 step(pushop)
553 569
554 570 @pushdiscovery('changeset')
555 571 def _pushdiscoverychangeset(pushop):
556 572 """discover the changeset that need to be pushed"""
557 573 fci = discovery.findcommonincoming
558 574 if pushop.revs:
559 575 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
560 576 ancestorsof=pushop.revs)
561 577 else:
562 578 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
563 579 common, inc, remoteheads = commoninc
564 580 fco = discovery.findcommonoutgoing
565 581 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
566 582 commoninc=commoninc, force=pushop.force)
567 583 pushop.outgoing = outgoing
568 584 pushop.remoteheads = remoteheads
569 585 pushop.incoming = inc
570 586
571 587 @pushdiscovery('phase')
572 588 def _pushdiscoveryphase(pushop):
573 589 """discover the phase that needs to be pushed
574 590
575 591 (computed for both success and failure case for changesets push)"""
576 592 outgoing = pushop.outgoing
577 593 unfi = pushop.repo.unfiltered()
578 594 remotephases = pushop.remote.listkeys('phases')
579 595 if (pushop.ui.configbool('ui', '_usedassubrepo')
580 596 and remotephases # server supports phases
581 597 and not pushop.outgoing.missing # no changesets to be pushed
582 598 and remotephases.get('publishing', False)):
583 599 # When:
584 600 # - this is a subrepo push
585 601 # - and remote support phase
586 602 # - and no changeset are to be pushed
587 603 # - and remote is publishing
588 604 # We may be in issue 3781 case!
589 605 # We drop the possible phase synchronisation done by
590 606 # courtesy to publish changesets possibly locally draft
591 607 # on the remote.
592 608 pushop.outdatedphases = []
593 609 pushop.fallbackoutdatedphases = []
594 610 return
595 611
596 612 pushop.remotephases = phases.remotephasessummary(pushop.repo,
597 613 pushop.fallbackheads,
598 614 remotephases)
599 615 droots = pushop.remotephases.draftroots
600 616
601 617 extracond = ''
602 618 if not pushop.remotephases.publishing:
603 619 extracond = ' and public()'
604 620 revset = 'heads((%%ln::%%ln) %s)' % extracond
605 621 # Get the list of all revs draft on remote by public here.
606 622 # XXX Beware that revset break if droots is not strictly
607 623 # XXX root we may want to ensure it is but it is costly
608 624 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
609 625 if not outgoing.missing:
610 626 future = fallback
611 627 else:
612 628 # adds changeset we are going to push as draft
613 629 #
614 630 # should not be necessary for publishing server, but because of an
615 631 # issue fixed in xxxxx we have to do it anyway.
616 632 fdroots = list(unfi.set('roots(%ln + %ln::)',
617 633 outgoing.missing, droots))
618 634 fdroots = [f.node() for f in fdroots]
619 635 future = list(unfi.set(revset, fdroots, pushop.futureheads))
620 636 pushop.outdatedphases = future
621 637 pushop.fallbackoutdatedphases = fallback
622 638
623 639 @pushdiscovery('obsmarker')
624 640 def _pushdiscoveryobsmarkers(pushop):
625 641 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
626 642 and pushop.repo.obsstore
627 643 and 'obsolete' in pushop.remote.listkeys('namespaces')):
628 644 repo = pushop.repo
629 645 # very naive computation, that can be quite expensive on big repo.
630 646 # However: evolution is currently slow on them anyway.
631 647 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
632 648 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
633 649
634 650 @pushdiscovery('bookmarks')
635 651 def _pushdiscoverybookmarks(pushop):
636 652 ui = pushop.ui
637 653 repo = pushop.repo.unfiltered()
638 654 remote = pushop.remote
639 655 ui.debug("checking for updated bookmarks\n")
640 656 ancestors = ()
641 657 if pushop.revs:
642 658 revnums = map(repo.changelog.rev, pushop.revs)
643 659 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
644 660 remotebookmark = remote.listkeys('bookmarks')
645 661
646 662 explicit = set([repo._bookmarks.expandname(bookmark)
647 663 for bookmark in pushop.bookmarks])
648 664
649 665 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
650 666 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
651 667
652 668 def safehex(x):
653 669 if x is None:
654 670 return x
655 671 return hex(x)
656 672
657 673 def hexifycompbookmarks(bookmarks):
658 674 return [(b, safehex(scid), safehex(dcid))
659 675 for (b, scid, dcid) in bookmarks]
660 676
661 677 comp = [hexifycompbookmarks(marks) for marks in comp]
662 678 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
663 679
664 680 def _processcompared(pushop, pushed, explicit, remotebms, comp):
665 681 """take decision on bookmark to pull from the remote bookmark
666 682
667 683 Exist to help extensions who want to alter this behavior.
668 684 """
669 685 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
670 686
671 687 repo = pushop.repo
672 688
673 689 for b, scid, dcid in advsrc:
674 690 if b in explicit:
675 691 explicit.remove(b)
676 692 if not pushed or repo[scid].rev() in pushed:
677 693 pushop.outbookmarks.append((b, dcid, scid))
678 694 # search added bookmark
679 695 for b, scid, dcid in addsrc:
680 696 if b in explicit:
681 697 explicit.remove(b)
682 698 pushop.outbookmarks.append((b, '', scid))
683 699 # search for overwritten bookmark
684 700 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
685 701 if b in explicit:
686 702 explicit.remove(b)
687 703 pushop.outbookmarks.append((b, dcid, scid))
688 704 # search for bookmark to delete
689 705 for b, scid, dcid in adddst:
690 706 if b in explicit:
691 707 explicit.remove(b)
692 708 # treat as "deleted locally"
693 709 pushop.outbookmarks.append((b, dcid, ''))
694 710 # identical bookmarks shouldn't get reported
695 711 for b, scid, dcid in same:
696 712 if b in explicit:
697 713 explicit.remove(b)
698 714
699 715 if explicit:
700 716 explicit = sorted(explicit)
701 717 # we should probably list all of them
702 718 pushop.ui.warn(_('bookmark %s does not exist on the local '
703 719 'or remote repository!\n') % explicit[0])
704 720 pushop.bkresult = 2
705 721
706 722 pushop.outbookmarks.sort()
707 723
708 724 def _pushcheckoutgoing(pushop):
709 725 outgoing = pushop.outgoing
710 726 unfi = pushop.repo.unfiltered()
711 727 if not outgoing.missing:
712 728 # nothing to push
713 729 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
714 730 return False
715 731 # something to push
716 732 if not pushop.force:
717 733 # if repo.obsstore == False --> no obsolete
718 734 # then, save the iteration
719 735 if unfi.obsstore:
720 736 # this message are here for 80 char limit reason
721 737 mso = _("push includes obsolete changeset: %s!")
722 738 mspd = _("push includes phase-divergent changeset: %s!")
723 739 mscd = _("push includes content-divergent changeset: %s!")
724 740 mst = {"orphan": _("push includes orphan changeset: %s!"),
725 741 "phase-divergent": mspd,
726 742 "content-divergent": mscd}
727 743 # If we are to push if there is at least one
728 744 # obsolete or unstable changeset in missing, at
729 745 # least one of the missinghead will be obsolete or
730 746 # unstable. So checking heads only is ok
731 747 for node in outgoing.missingheads:
732 748 ctx = unfi[node]
733 749 if ctx.obsolete():
734 750 raise error.Abort(mso % ctx)
735 751 elif ctx.isunstable():
736 752 # TODO print more than one instability in the abort
737 753 # message
738 754 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
739 755
740 756 discovery.checkheads(pushop)
741 757 return True
742 758
743 759 # List of names of steps to perform for an outgoing bundle2, order matters.
744 760 b2partsgenorder = []
745 761
746 762 # Mapping between step name and function
747 763 #
748 764 # This exists to help extensions wrap steps if necessary
749 765 b2partsgenmapping = {}
750 766
751 767 def b2partsgenerator(stepname, idx=None):
752 768 """decorator for function generating bundle2 part
753 769
754 770 The function is added to the step -> function mapping and appended to the
755 771 list of steps. Beware that decorated functions will be added in order
756 772 (this may matter).
757 773
758 774 You can only use this decorator for new steps, if you want to wrap a step
759 775 from an extension, attack the b2partsgenmapping dictionary directly."""
760 776 def dec(func):
761 777 assert stepname not in b2partsgenmapping
762 778 b2partsgenmapping[stepname] = func
763 779 if idx is None:
764 780 b2partsgenorder.append(stepname)
765 781 else:
766 782 b2partsgenorder.insert(idx, stepname)
767 783 return func
768 784 return dec
769 785
770 786 def _pushb2ctxcheckheads(pushop, bundler):
771 787 """Generate race condition checking parts
772 788
773 789 Exists as an independent function to aid extensions
774 790 """
775 791 # * 'force' do not check for push race,
776 792 # * if we don't push anything, there are nothing to check.
777 793 if not pushop.force and pushop.outgoing.missingheads:
778 794 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
779 795 emptyremote = pushop.pushbranchmap is None
780 796 if not allowunrelated or emptyremote:
781 797 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
782 798 else:
783 799 affected = set()
784 800 for branch, heads in pushop.pushbranchmap.iteritems():
785 801 remoteheads, newheads, unsyncedheads, discardedheads = heads
786 802 if remoteheads is not None:
787 803 remote = set(remoteheads)
788 804 affected |= set(discardedheads) & remote
789 805 affected |= remote - set(newheads)
790 806 if affected:
791 807 data = iter(sorted(affected))
792 808 bundler.newpart('check:updated-heads', data=data)
793 809
794 810 def _pushing(pushop):
795 811 """return True if we are pushing anything"""
796 812 return bool(pushop.outgoing.missing
797 813 or pushop.outdatedphases
798 814 or pushop.outobsmarkers
799 815 or pushop.outbookmarks)
800 816
801 817 @b2partsgenerator('check-bookmarks')
802 818 def _pushb2checkbookmarks(pushop, bundler):
803 819 """insert bookmark move checking"""
804 820 if not _pushing(pushop) or pushop.force:
805 821 return
806 822 b2caps = bundle2.bundle2caps(pushop.remote)
807 823 hasbookmarkcheck = 'bookmarks' in b2caps
808 824 if not (pushop.outbookmarks and hasbookmarkcheck):
809 825 return
810 826 data = []
811 827 for book, old, new in pushop.outbookmarks:
812 828 old = bin(old)
813 829 data.append((book, old))
814 830 checkdata = bookmod.binaryencode(data)
815 831 bundler.newpart('check:bookmarks', data=checkdata)
816 832
817 833 @b2partsgenerator('check-phases')
818 834 def _pushb2checkphases(pushop, bundler):
819 835 """insert phase move checking"""
820 836 if not _pushing(pushop) or pushop.force:
821 837 return
822 838 b2caps = bundle2.bundle2caps(pushop.remote)
823 839 hasphaseheads = 'heads' in b2caps.get('phases', ())
824 840 if pushop.remotephases is not None and hasphaseheads:
825 841 # check that the remote phase has not changed
826 842 checks = [[] for p in phases.allphases]
827 843 checks[phases.public].extend(pushop.remotephases.publicheads)
828 844 checks[phases.draft].extend(pushop.remotephases.draftroots)
829 845 if any(checks):
830 846 for nodes in checks:
831 847 nodes.sort()
832 848 checkdata = phases.binaryencode(checks)
833 849 bundler.newpart('check:phases', data=checkdata)
834 850
835 851 @b2partsgenerator('changeset')
836 852 def _pushb2ctx(pushop, bundler):
837 853 """handle changegroup push through bundle2
838 854
839 855 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
840 856 """
841 857 if 'changesets' in pushop.stepsdone:
842 858 return
843 859 pushop.stepsdone.add('changesets')
844 860 # Send known heads to the server for race detection.
845 861 if not _pushcheckoutgoing(pushop):
846 862 return
847 863 pushop.repo.prepushoutgoinghooks(pushop)
848 864
849 865 _pushb2ctxcheckheads(pushop, bundler)
850 866
851 867 b2caps = bundle2.bundle2caps(pushop.remote)
852 868 version = '01'
853 869 cgversions = b2caps.get('changegroup')
854 870 if cgversions: # 3.1 and 3.2 ship with an empty value
855 871 cgversions = [v for v in cgversions
856 872 if v in changegroup.supportedoutgoingversions(
857 873 pushop.repo)]
858 874 if not cgversions:
859 875 raise ValueError(_('no common changegroup version'))
860 876 version = max(cgversions)
861 877 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
862 878 'push')
863 879 cgpart = bundler.newpart('changegroup', data=cgstream)
864 880 if cgversions:
865 881 cgpart.addparam('version', version)
866 882 if 'treemanifest' in pushop.repo.requirements:
867 883 cgpart.addparam('treemanifest', '1')
868 884 def handlereply(op):
869 885 """extract addchangegroup returns from server reply"""
870 886 cgreplies = op.records.getreplies(cgpart.id)
871 887 assert len(cgreplies['changegroup']) == 1
872 888 pushop.cgresult = cgreplies['changegroup'][0]['return']
873 889 return handlereply
874 890
875 891 @b2partsgenerator('phase')
876 892 def _pushb2phases(pushop, bundler):
877 893 """handle phase push through bundle2"""
878 894 if 'phases' in pushop.stepsdone:
879 895 return
880 896 b2caps = bundle2.bundle2caps(pushop.remote)
881 897 ui = pushop.repo.ui
882 898
883 899 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
884 900 haspushkey = 'pushkey' in b2caps
885 901 hasphaseheads = 'heads' in b2caps.get('phases', ())
886 902
887 903 if hasphaseheads and not legacyphase:
888 904 return _pushb2phaseheads(pushop, bundler)
889 905 elif haspushkey:
890 906 return _pushb2phasespushkey(pushop, bundler)
891 907
892 908 def _pushb2phaseheads(pushop, bundler):
893 909 """push phase information through a bundle2 - binary part"""
894 910 pushop.stepsdone.add('phases')
895 911 if pushop.outdatedphases:
896 912 updates = [[] for p in phases.allphases]
897 913 updates[0].extend(h.node() for h in pushop.outdatedphases)
898 914 phasedata = phases.binaryencode(updates)
899 915 bundler.newpart('phase-heads', data=phasedata)
900 916
901 917 def _pushb2phasespushkey(pushop, bundler):
902 918 """push phase information through a bundle2 - pushkey part"""
903 919 pushop.stepsdone.add('phases')
904 920 part2node = []
905 921
906 922 def handlefailure(pushop, exc):
907 923 targetid = int(exc.partid)
908 924 for partid, node in part2node:
909 925 if partid == targetid:
910 926 raise error.Abort(_('updating %s to public failed') % node)
911 927
912 928 enc = pushkey.encode
913 929 for newremotehead in pushop.outdatedphases:
914 930 part = bundler.newpart('pushkey')
915 931 part.addparam('namespace', enc('phases'))
916 932 part.addparam('key', enc(newremotehead.hex()))
917 933 part.addparam('old', enc('%d' % phases.draft))
918 934 part.addparam('new', enc('%d' % phases.public))
919 935 part2node.append((part.id, newremotehead))
920 936 pushop.pkfailcb[part.id] = handlefailure
921 937
922 938 def handlereply(op):
923 939 for partid, node in part2node:
924 940 partrep = op.records.getreplies(partid)
925 941 results = partrep['pushkey']
926 942 assert len(results) <= 1
927 943 msg = None
928 944 if not results:
929 945 msg = _('server ignored update of %s to public!\n') % node
930 946 elif not int(results[0]['return']):
931 947 msg = _('updating %s to public failed!\n') % node
932 948 if msg is not None:
933 949 pushop.ui.warn(msg)
934 950 return handlereply
935 951
936 952 @b2partsgenerator('obsmarkers')
937 953 def _pushb2obsmarkers(pushop, bundler):
938 954 if 'obsmarkers' in pushop.stepsdone:
939 955 return
940 956 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
941 957 if obsolete.commonversion(remoteversions) is None:
942 958 return
943 959 pushop.stepsdone.add('obsmarkers')
944 960 if pushop.outobsmarkers:
945 961 markers = sorted(pushop.outobsmarkers)
946 962 bundle2.buildobsmarkerspart(bundler, markers)
947 963
948 964 @b2partsgenerator('bookmarks')
949 965 def _pushb2bookmarks(pushop, bundler):
950 966 """handle bookmark push through bundle2"""
951 967 if 'bookmarks' in pushop.stepsdone:
952 968 return
953 969 b2caps = bundle2.bundle2caps(pushop.remote)
954 970
955 971 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
956 972 legacybooks = 'bookmarks' in legacy
957 973
958 974 if not legacybooks and 'bookmarks' in b2caps:
959 975 return _pushb2bookmarkspart(pushop, bundler)
960 976 elif 'pushkey' in b2caps:
961 977 return _pushb2bookmarkspushkey(pushop, bundler)
962 978
963 979 def _bmaction(old, new):
964 980 """small utility for bookmark pushing"""
965 981 if not old:
966 982 return 'export'
967 983 elif not new:
968 984 return 'delete'
969 985 return 'update'
970 986
971 987 def _pushb2bookmarkspart(pushop, bundler):
972 988 pushop.stepsdone.add('bookmarks')
973 989 if not pushop.outbookmarks:
974 990 return
975 991
976 992 allactions = []
977 993 data = []
978 994 for book, old, new in pushop.outbookmarks:
979 995 new = bin(new)
980 996 data.append((book, new))
981 997 allactions.append((book, _bmaction(old, new)))
982 998 checkdata = bookmod.binaryencode(data)
983 999 bundler.newpart('bookmarks', data=checkdata)
984 1000
985 1001 def handlereply(op):
986 1002 ui = pushop.ui
987 1003 # if success
988 1004 for book, action in allactions:
989 1005 ui.status(bookmsgmap[action][0] % book)
990 1006
991 1007 return handlereply
992 1008
993 1009 def _pushb2bookmarkspushkey(pushop, bundler):
994 1010 pushop.stepsdone.add('bookmarks')
995 1011 part2book = []
996 1012 enc = pushkey.encode
997 1013
998 1014 def handlefailure(pushop, exc):
999 1015 targetid = int(exc.partid)
1000 1016 for partid, book, action in part2book:
1001 1017 if partid == targetid:
1002 1018 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1003 1019 # we should not be called for part we did not generated
1004 1020 assert False
1005 1021
1006 1022 for book, old, new in pushop.outbookmarks:
1007 1023 part = bundler.newpart('pushkey')
1008 1024 part.addparam('namespace', enc('bookmarks'))
1009 1025 part.addparam('key', enc(book))
1010 1026 part.addparam('old', enc(old))
1011 1027 part.addparam('new', enc(new))
1012 1028 action = 'update'
1013 1029 if not old:
1014 1030 action = 'export'
1015 1031 elif not new:
1016 1032 action = 'delete'
1017 1033 part2book.append((part.id, book, action))
1018 1034 pushop.pkfailcb[part.id] = handlefailure
1019 1035
1020 1036 def handlereply(op):
1021 1037 ui = pushop.ui
1022 1038 for partid, book, action in part2book:
1023 1039 partrep = op.records.getreplies(partid)
1024 1040 results = partrep['pushkey']
1025 1041 assert len(results) <= 1
1026 1042 if not results:
1027 1043 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1028 1044 else:
1029 1045 ret = int(results[0]['return'])
1030 1046 if ret:
1031 1047 ui.status(bookmsgmap[action][0] % book)
1032 1048 else:
1033 1049 ui.warn(bookmsgmap[action][1] % book)
1034 1050 if pushop.bkresult is not None:
1035 1051 pushop.bkresult = 1
1036 1052 return handlereply
1037 1053
1038 1054 @b2partsgenerator('pushvars', idx=0)
1039 1055 def _getbundlesendvars(pushop, bundler):
1040 1056 '''send shellvars via bundle2'''
1041 1057 pushvars = pushop.pushvars
1042 1058 if pushvars:
1043 1059 shellvars = {}
1044 1060 for raw in pushvars:
1045 1061 if '=' not in raw:
1046 1062 msg = ("unable to parse variable '%s', should follow "
1047 1063 "'KEY=VALUE' or 'KEY=' format")
1048 1064 raise error.Abort(msg % raw)
1049 1065 k, v = raw.split('=', 1)
1050 1066 shellvars[k] = v
1051 1067
1052 1068 part = bundler.newpart('pushvars')
1053 1069
1054 1070 for key, value in shellvars.iteritems():
1055 1071 part.addparam(key, value, mandatory=False)
1056 1072
1057 1073 def _pushbundle2(pushop):
1058 1074 """push data to the remote using bundle2
1059 1075
1060 1076 The only currently supported type of data is changegroup but this will
1061 1077 evolve in the future."""
1062 1078 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1063 1079 pushback = (pushop.trmanager
1064 1080 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1065 1081
1066 1082 # create reply capability
1067 1083 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1068 1084 allowpushback=pushback,
1069 1085 role='client'))
1070 1086 bundler.newpart('replycaps', data=capsblob)
1071 1087 replyhandlers = []
1072 1088 for partgenname in b2partsgenorder:
1073 1089 partgen = b2partsgenmapping[partgenname]
1074 1090 ret = partgen(pushop, bundler)
1075 1091 if callable(ret):
1076 1092 replyhandlers.append(ret)
1077 1093 # do not push if nothing to push
1078 1094 if bundler.nbparts <= 1:
1079 1095 return
1080 1096 stream = util.chunkbuffer(bundler.getchunks())
1081 1097 try:
1082 1098 try:
1083 1099 reply = pushop.remote.unbundle(
1084 1100 stream, ['force'], pushop.remote.url())
1085 1101 except error.BundleValueError as exc:
1086 1102 raise error.Abort(_('missing support for %s') % exc)
1087 1103 try:
1088 1104 trgetter = None
1089 1105 if pushback:
1090 1106 trgetter = pushop.trmanager.transaction
1091 1107 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1092 1108 except error.BundleValueError as exc:
1093 1109 raise error.Abort(_('missing support for %s') % exc)
1094 1110 except bundle2.AbortFromPart as exc:
1095 1111 pushop.ui.status(_('remote: %s\n') % exc)
1096 1112 if exc.hint is not None:
1097 1113 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1098 1114 raise error.Abort(_('push failed on remote'))
1099 1115 except error.PushkeyFailed as exc:
1100 1116 partid = int(exc.partid)
1101 1117 if partid not in pushop.pkfailcb:
1102 1118 raise
1103 1119 pushop.pkfailcb[partid](pushop, exc)
1104 1120 for rephand in replyhandlers:
1105 1121 rephand(op)
1106 1122
1107 1123 def _pushchangeset(pushop):
1108 1124 """Make the actual push of changeset bundle to remote repo"""
1109 1125 if 'changesets' in pushop.stepsdone:
1110 1126 return
1111 1127 pushop.stepsdone.add('changesets')
1112 1128 if not _pushcheckoutgoing(pushop):
1113 1129 return
1114 1130
1115 1131 # Should have verified this in push().
1116 1132 assert pushop.remote.capable('unbundle')
1117 1133
1118 1134 pushop.repo.prepushoutgoinghooks(pushop)
1119 1135 outgoing = pushop.outgoing
1120 1136 # TODO: get bundlecaps from remote
1121 1137 bundlecaps = None
1122 1138 # create a changegroup from local
1123 1139 if pushop.revs is None and not (outgoing.excluded
1124 1140 or pushop.repo.changelog.filteredrevs):
1125 1141 # push everything,
1126 1142 # use the fast path, no race possible on push
1127 1143 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1128 1144 fastpath=True, bundlecaps=bundlecaps)
1129 1145 else:
1130 1146 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1131 1147 'push', bundlecaps=bundlecaps)
1132 1148
1133 1149 # apply changegroup to remote
1134 1150 # local repo finds heads on server, finds out what
1135 1151 # revs it must push. once revs transferred, if server
1136 1152 # finds it has different heads (someone else won
1137 1153 # commit/push race), server aborts.
1138 1154 if pushop.force:
1139 1155 remoteheads = ['force']
1140 1156 else:
1141 1157 remoteheads = pushop.remoteheads
1142 1158 # ssh: return remote's addchangegroup()
1143 1159 # http: return remote's addchangegroup() or 0 for error
1144 1160 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1145 1161 pushop.repo.url())
1146 1162
1147 1163 def _pushsyncphase(pushop):
1148 1164 """synchronise phase information locally and remotely"""
1149 1165 cheads = pushop.commonheads
1150 1166 # even when we don't push, exchanging phase data is useful
1151 1167 remotephases = pushop.remote.listkeys('phases')
1152 1168 if (pushop.ui.configbool('ui', '_usedassubrepo')
1153 1169 and remotephases # server supports phases
1154 1170 and pushop.cgresult is None # nothing was pushed
1155 1171 and remotephases.get('publishing', False)):
1156 1172 # When:
1157 1173 # - this is a subrepo push
1158 1174 # - and remote support phase
1159 1175 # - and no changeset was pushed
1160 1176 # - and remote is publishing
1161 1177 # We may be in issue 3871 case!
1162 1178 # We drop the possible phase synchronisation done by
1163 1179 # courtesy to publish changesets possibly locally draft
1164 1180 # on the remote.
1165 1181 remotephases = {'publishing': 'True'}
1166 1182 if not remotephases: # old server or public only reply from non-publishing
1167 1183 _localphasemove(pushop, cheads)
1168 1184 # don't push any phase data as there is nothing to push
1169 1185 else:
1170 1186 ana = phases.analyzeremotephases(pushop.repo, cheads,
1171 1187 remotephases)
1172 1188 pheads, droots = ana
1173 1189 ### Apply remote phase on local
1174 1190 if remotephases.get('publishing', False):
1175 1191 _localphasemove(pushop, cheads)
1176 1192 else: # publish = False
1177 1193 _localphasemove(pushop, pheads)
1178 1194 _localphasemove(pushop, cheads, phases.draft)
1179 1195 ### Apply local phase on remote
1180 1196
1181 1197 if pushop.cgresult:
1182 1198 if 'phases' in pushop.stepsdone:
1183 1199 # phases already pushed though bundle2
1184 1200 return
1185 1201 outdated = pushop.outdatedphases
1186 1202 else:
1187 1203 outdated = pushop.fallbackoutdatedphases
1188 1204
1189 1205 pushop.stepsdone.add('phases')
1190 1206
1191 1207 # filter heads already turned public by the push
1192 1208 outdated = [c for c in outdated if c.node() not in pheads]
1193 1209 # fallback to independent pushkey command
1194 1210 for newremotehead in outdated:
1195 1211 r = pushop.remote.pushkey('phases',
1196 1212 newremotehead.hex(),
1197 1213 ('%d' % phases.draft),
1198 1214 ('%d' % phases.public))
1199 1215 if not r:
1200 1216 pushop.ui.warn(_('updating %s to public failed!\n')
1201 1217 % newremotehead)
1202 1218
1203 1219 def _localphasemove(pushop, nodes, phase=phases.public):
1204 1220 """move <nodes> to <phase> in the local source repo"""
1205 1221 if pushop.trmanager:
1206 1222 phases.advanceboundary(pushop.repo,
1207 1223 pushop.trmanager.transaction(),
1208 1224 phase,
1209 1225 nodes)
1210 1226 else:
1211 1227 # repo is not locked, do not change any phases!
1212 1228 # Informs the user that phases should have been moved when
1213 1229 # applicable.
1214 1230 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1215 1231 phasestr = phases.phasenames[phase]
1216 1232 if actualmoves:
1217 1233 pushop.ui.status(_('cannot lock source repo, skipping '
1218 1234 'local %s phase update\n') % phasestr)
1219 1235
1220 1236 def _pushobsolete(pushop):
1221 1237 """utility function to push obsolete markers to a remote"""
1222 1238 if 'obsmarkers' in pushop.stepsdone:
1223 1239 return
1224 1240 repo = pushop.repo
1225 1241 remote = pushop.remote
1226 1242 pushop.stepsdone.add('obsmarkers')
1227 1243 if pushop.outobsmarkers:
1228 1244 pushop.ui.debug('try to push obsolete markers to remote\n')
1229 1245 rslts = []
1230 1246 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1231 1247 for key in sorted(remotedata, reverse=True):
1232 1248 # reverse sort to ensure we end with dump0
1233 1249 data = remotedata[key]
1234 1250 rslts.append(remote.pushkey('obsolete', key, '', data))
1235 1251 if [r for r in rslts if not r]:
1236 1252 msg = _('failed to push some obsolete markers!\n')
1237 1253 repo.ui.warn(msg)
1238 1254
1239 1255 def _pushbookmark(pushop):
1240 1256 """Update bookmark position on remote"""
1241 1257 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1242 1258 return
1243 1259 pushop.stepsdone.add('bookmarks')
1244 1260 ui = pushop.ui
1245 1261 remote = pushop.remote
1246 1262
1247 1263 for b, old, new in pushop.outbookmarks:
1248 1264 action = 'update'
1249 1265 if not old:
1250 1266 action = 'export'
1251 1267 elif not new:
1252 1268 action = 'delete'
1253 1269 if remote.pushkey('bookmarks', b, old, new):
1254 1270 ui.status(bookmsgmap[action][0] % b)
1255 1271 else:
1256 1272 ui.warn(bookmsgmap[action][1] % b)
1257 1273 # discovery can have set the value form invalid entry
1258 1274 if pushop.bkresult is not None:
1259 1275 pushop.bkresult = 1
1260 1276
1261 1277 class pulloperation(object):
1262 1278 """A object that represent a single pull operation
1263 1279
1264 1280 It purpose is to carry pull related state and very common operation.
1265 1281
1266 1282 A new should be created at the beginning of each pull and discarded
1267 1283 afterward.
1268 1284 """
1269 1285
1270 1286 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1271 1287 remotebookmarks=None, streamclonerequested=None):
1272 1288 # repo we pull into
1273 1289 self.repo = repo
1274 1290 # repo we pull from
1275 1291 self.remote = remote
1276 1292 # revision we try to pull (None is "all")
1277 1293 self.heads = heads
1278 1294 # bookmark pulled explicitly
1279 1295 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1280 1296 for bookmark in bookmarks]
1281 1297 # do we force pull?
1282 1298 self.force = force
1283 1299 # whether a streaming clone was requested
1284 1300 self.streamclonerequested = streamclonerequested
1285 1301 # transaction manager
1286 1302 self.trmanager = None
1287 1303 # set of common changeset between local and remote before pull
1288 1304 self.common = None
1289 1305 # set of pulled head
1290 1306 self.rheads = None
1291 1307 # list of missing changeset to fetch remotely
1292 1308 self.fetch = None
1293 1309 # remote bookmarks data
1294 1310 self.remotebookmarks = remotebookmarks
1295 1311 # result of changegroup pulling (used as return code by pull)
1296 1312 self.cgresult = None
1297 1313 # list of step already done
1298 1314 self.stepsdone = set()
1299 1315 # Whether we attempted a clone from pre-generated bundles.
1300 1316 self.clonebundleattempted = False
1301 1317
1302 1318 @util.propertycache
1303 1319 def pulledsubset(self):
1304 1320 """heads of the set of changeset target by the pull"""
1305 1321 # compute target subset
1306 1322 if self.heads is None:
1307 1323 # We pulled every thing possible
1308 1324 # sync on everything common
1309 1325 c = set(self.common)
1310 1326 ret = list(self.common)
1311 1327 for n in self.rheads:
1312 1328 if n not in c:
1313 1329 ret.append(n)
1314 1330 return ret
1315 1331 else:
1316 1332 # We pulled a specific subset
1317 1333 # sync on this subset
1318 1334 return self.heads
1319 1335
1320 1336 @util.propertycache
1321 1337 def canusebundle2(self):
1322 1338 return not _forcebundle1(self)
1323 1339
1324 1340 @util.propertycache
1325 1341 def remotebundle2caps(self):
1326 1342 return bundle2.bundle2caps(self.remote)
1327 1343
1328 1344 def gettransaction(self):
1329 1345 # deprecated; talk to trmanager directly
1330 1346 return self.trmanager.transaction()
1331 1347
1332 1348 class transactionmanager(util.transactional):
1333 1349 """An object to manage the life cycle of a transaction
1334 1350
1335 1351 It creates the transaction on demand and calls the appropriate hooks when
1336 1352 closing the transaction."""
1337 1353 def __init__(self, repo, source, url):
1338 1354 self.repo = repo
1339 1355 self.source = source
1340 1356 self.url = url
1341 1357 self._tr = None
1342 1358
1343 1359 def transaction(self):
1344 1360 """Return an open transaction object, constructing if necessary"""
1345 1361 if not self._tr:
1346 1362 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1347 1363 self._tr = self.repo.transaction(trname)
1348 1364 self._tr.hookargs['source'] = self.source
1349 1365 self._tr.hookargs['url'] = self.url
1350 1366 return self._tr
1351 1367
1352 1368 def close(self):
1353 1369 """close transaction if created"""
1354 1370 if self._tr is not None:
1355 1371 self._tr.close()
1356 1372
1357 1373 def release(self):
1358 1374 """release transaction if created"""
1359 1375 if self._tr is not None:
1360 1376 self._tr.release()
1361 1377
1362 1378 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1363 1379 streamclonerequested=None):
1364 1380 """Fetch repository data from a remote.
1365 1381
1366 1382 This is the main function used to retrieve data from a remote repository.
1367 1383
1368 1384 ``repo`` is the local repository to clone into.
1369 1385 ``remote`` is a peer instance.
1370 1386 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1371 1387 default) means to pull everything from the remote.
1372 1388 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1373 1389 default, all remote bookmarks are pulled.
1374 1390 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1375 1391 initialization.
1376 1392 ``streamclonerequested`` is a boolean indicating whether a "streaming
1377 1393 clone" is requested. A "streaming clone" is essentially a raw file copy
1378 1394 of revlogs from the server. This only works when the local repository is
1379 1395 empty. The default value of ``None`` means to respect the server
1380 1396 configuration for preferring stream clones.
1381 1397
1382 1398 Returns the ``pulloperation`` created for this pull.
1383 1399 """
1384 1400 if opargs is None:
1385 1401 opargs = {}
1386 1402 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1387 1403 streamclonerequested=streamclonerequested,
1388 1404 **pycompat.strkwargs(opargs))
1389 1405
1390 1406 peerlocal = pullop.remote.local()
1391 1407 if peerlocal:
1392 1408 missing = set(peerlocal.requirements) - pullop.repo.supported
1393 1409 if missing:
1394 1410 msg = _("required features are not"
1395 1411 " supported in the destination:"
1396 1412 " %s") % (', '.join(sorted(missing)))
1397 1413 raise error.Abort(msg)
1398 1414
1399 1415 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1400 1416 with repo.wlock(), repo.lock(), pullop.trmanager:
1401 1417 # This should ideally be in _pullbundle2(). However, it needs to run
1402 1418 # before discovery to avoid extra work.
1403 1419 _maybeapplyclonebundle(pullop)
1404 1420 streamclone.maybeperformlegacystreamclone(pullop)
1405 1421 _pulldiscovery(pullop)
1406 1422 if pullop.canusebundle2:
1407 1423 _pullbundle2(pullop)
1408 1424 _pullchangeset(pullop)
1409 1425 _pullphase(pullop)
1410 1426 _pullbookmarks(pullop)
1411 1427 _pullobsolete(pullop)
1412 1428
1413 1429 # storing remotenames
1414 1430 if repo.ui.configbool('experimental', 'remotenames'):
1415 1431 logexchange.pullremotenames(repo, remote)
1416 1432
1417 1433 return pullop
1418 1434
1419 1435 # list of steps to perform discovery before pull
1420 1436 pulldiscoveryorder = []
1421 1437
1422 1438 # Mapping between step name and function
1423 1439 #
1424 1440 # This exists to help extensions wrap steps if necessary
1425 1441 pulldiscoverymapping = {}
1426 1442
1427 1443 def pulldiscovery(stepname):
1428 1444 """decorator for function performing discovery before pull
1429 1445
1430 1446 The function is added to the step -> function mapping and appended to the
1431 1447 list of steps. Beware that decorated function will be added in order (this
1432 1448 may matter).
1433 1449
1434 1450 You can only use this decorator for a new step, if you want to wrap a step
1435 1451 from an extension, change the pulldiscovery dictionary directly."""
1436 1452 def dec(func):
1437 1453 assert stepname not in pulldiscoverymapping
1438 1454 pulldiscoverymapping[stepname] = func
1439 1455 pulldiscoveryorder.append(stepname)
1440 1456 return func
1441 1457 return dec
1442 1458
1443 1459 def _pulldiscovery(pullop):
1444 1460 """Run all discovery steps"""
1445 1461 for stepname in pulldiscoveryorder:
1446 1462 step = pulldiscoverymapping[stepname]
1447 1463 step(pullop)
1448 1464
1449 1465 @pulldiscovery('b1:bookmarks')
1450 1466 def _pullbookmarkbundle1(pullop):
1451 1467 """fetch bookmark data in bundle1 case
1452 1468
1453 1469 If not using bundle2, we have to fetch bookmarks before changeset
1454 1470 discovery to reduce the chance and impact of race conditions."""
1455 1471 if pullop.remotebookmarks is not None:
1456 1472 return
1457 1473 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1458 1474 # all known bundle2 servers now support listkeys, but lets be nice with
1459 1475 # new implementation.
1460 1476 return
1461 1477 books = pullop.remote.listkeys('bookmarks')
1462 1478 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1463 1479
1464 1480
1465 1481 @pulldiscovery('changegroup')
1466 1482 def _pulldiscoverychangegroup(pullop):
1467 1483 """discovery phase for the pull
1468 1484
1469 1485 Current handle changeset discovery only, will change handle all discovery
1470 1486 at some point."""
1471 1487 tmp = discovery.findcommonincoming(pullop.repo,
1472 1488 pullop.remote,
1473 1489 heads=pullop.heads,
1474 1490 force=pullop.force)
1475 1491 common, fetch, rheads = tmp
1476 1492 nm = pullop.repo.unfiltered().changelog.nodemap
1477 1493 if fetch and rheads:
1478 1494 # If a remote heads is filtered locally, put in back in common.
1479 1495 #
1480 1496 # This is a hackish solution to catch most of "common but locally
1481 1497 # hidden situation". We do not performs discovery on unfiltered
1482 1498 # repository because it end up doing a pathological amount of round
1483 1499 # trip for w huge amount of changeset we do not care about.
1484 1500 #
1485 1501 # If a set of such "common but filtered" changeset exist on the server
1486 1502 # but are not including a remote heads, we'll not be able to detect it,
1487 1503 scommon = set(common)
1488 1504 for n in rheads:
1489 1505 if n in nm:
1490 1506 if n not in scommon:
1491 1507 common.append(n)
1492 1508 if set(rheads).issubset(set(common)):
1493 1509 fetch = []
1494 1510 pullop.common = common
1495 1511 pullop.fetch = fetch
1496 1512 pullop.rheads = rheads
1497 1513
1498 1514 def _pullbundle2(pullop):
1499 1515 """pull data using bundle2
1500 1516
1501 1517 For now, the only supported data are changegroup."""
1502 1518 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1503 1519
1504 1520 # make ui easier to access
1505 1521 ui = pullop.repo.ui
1506 1522
1507 1523 # At the moment we don't do stream clones over bundle2. If that is
1508 1524 # implemented then here's where the check for that will go.
1509 1525 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1510 1526
1511 1527 # declare pull perimeters
1512 1528 kwargs['common'] = pullop.common
1513 1529 kwargs['heads'] = pullop.heads or pullop.rheads
1514 1530
1515 1531 if streaming:
1516 1532 kwargs['cg'] = False
1517 1533 kwargs['stream'] = True
1518 1534 pullop.stepsdone.add('changegroup')
1519 1535 pullop.stepsdone.add('phases')
1520 1536
1521 1537 else:
1522 1538 # pulling changegroup
1523 1539 pullop.stepsdone.add('changegroup')
1524 1540
1525 1541 kwargs['cg'] = pullop.fetch
1526 1542
1527 1543 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1528 1544 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1529 1545 if (not legacyphase and hasbinaryphase):
1530 1546 kwargs['phases'] = True
1531 1547 pullop.stepsdone.add('phases')
1532 1548
1533 1549 if 'listkeys' in pullop.remotebundle2caps:
1534 1550 if 'phases' not in pullop.stepsdone:
1535 1551 kwargs['listkeys'] = ['phases']
1536 1552
1537 1553 bookmarksrequested = False
1538 1554 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1539 1555 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1540 1556
1541 1557 if pullop.remotebookmarks is not None:
1542 1558 pullop.stepsdone.add('request-bookmarks')
1543 1559
1544 1560 if ('request-bookmarks' not in pullop.stepsdone
1545 1561 and pullop.remotebookmarks is None
1546 1562 and not legacybookmark and hasbinarybook):
1547 1563 kwargs['bookmarks'] = True
1548 1564 bookmarksrequested = True
1549 1565
1550 1566 if 'listkeys' in pullop.remotebundle2caps:
1551 1567 if 'request-bookmarks' not in pullop.stepsdone:
1552 1568 # make sure to always includes bookmark data when migrating
1553 1569 # `hg incoming --bundle` to using this function.
1554 1570 pullop.stepsdone.add('request-bookmarks')
1555 1571 kwargs.setdefault('listkeys', []).append('bookmarks')
1556 1572
1557 1573 # If this is a full pull / clone and the server supports the clone bundles
1558 1574 # feature, tell the server whether we attempted a clone bundle. The
1559 1575 # presence of this flag indicates the client supports clone bundles. This
1560 1576 # will enable the server to treat clients that support clone bundles
1561 1577 # differently from those that don't.
1562 1578 if (pullop.remote.capable('clonebundles')
1563 1579 and pullop.heads is None and list(pullop.common) == [nullid]):
1564 1580 kwargs['cbattempted'] = pullop.clonebundleattempted
1565 1581
1566 1582 if streaming:
1567 1583 pullop.repo.ui.status(_('streaming all changes\n'))
1568 1584 elif not pullop.fetch:
1569 1585 pullop.repo.ui.status(_("no changes found\n"))
1570 1586 pullop.cgresult = 0
1571 1587 else:
1572 1588 if pullop.heads is None and list(pullop.common) == [nullid]:
1573 1589 pullop.repo.ui.status(_("requesting all changes\n"))
1574 1590 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1575 1591 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1576 1592 if obsolete.commonversion(remoteversions) is not None:
1577 1593 kwargs['obsmarkers'] = True
1578 1594 pullop.stepsdone.add('obsmarkers')
1579 1595 _pullbundle2extraprepare(pullop, kwargs)
1580 1596 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1581 1597 try:
1582 1598 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1583 1599 op.modes['bookmarks'] = 'records'
1584 1600 bundle2.processbundle(pullop.repo, bundle, op=op)
1585 1601 except bundle2.AbortFromPart as exc:
1586 1602 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1587 1603 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1588 1604 except error.BundleValueError as exc:
1589 1605 raise error.Abort(_('missing support for %s') % exc)
1590 1606
1591 1607 if pullop.fetch:
1592 1608 pullop.cgresult = bundle2.combinechangegroupresults(op)
1593 1609
1594 1610 # processing phases change
1595 1611 for namespace, value in op.records['listkeys']:
1596 1612 if namespace == 'phases':
1597 1613 _pullapplyphases(pullop, value)
1598 1614
1599 1615 # processing bookmark update
1600 1616 if bookmarksrequested:
1601 1617 books = {}
1602 1618 for record in op.records['bookmarks']:
1603 1619 books[record['bookmark']] = record["node"]
1604 1620 pullop.remotebookmarks = books
1605 1621 else:
1606 1622 for namespace, value in op.records['listkeys']:
1607 1623 if namespace == 'bookmarks':
1608 1624 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1609 1625
1610 1626 # bookmark data were either already there or pulled in the bundle
1611 1627 if pullop.remotebookmarks is not None:
1612 1628 _pullbookmarks(pullop)
1613 1629
1614 1630 def _pullbundle2extraprepare(pullop, kwargs):
1615 1631 """hook function so that extensions can extend the getbundle call"""
1616 1632
1617 1633 def _pullchangeset(pullop):
1618 1634 """pull changeset from unbundle into the local repo"""
1619 1635 # We delay the open of the transaction as late as possible so we
1620 1636 # don't open transaction for nothing or you break future useful
1621 1637 # rollback call
1622 1638 if 'changegroup' in pullop.stepsdone:
1623 1639 return
1624 1640 pullop.stepsdone.add('changegroup')
1625 1641 if not pullop.fetch:
1626 1642 pullop.repo.ui.status(_("no changes found\n"))
1627 1643 pullop.cgresult = 0
1628 1644 return
1629 1645 tr = pullop.gettransaction()
1630 1646 if pullop.heads is None and list(pullop.common) == [nullid]:
1631 1647 pullop.repo.ui.status(_("requesting all changes\n"))
1632 1648 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1633 1649 # issue1320, avoid a race if remote changed after discovery
1634 1650 pullop.heads = pullop.rheads
1635 1651
1636 1652 if pullop.remote.capable('getbundle'):
1637 1653 # TODO: get bundlecaps from remote
1638 1654 cg = pullop.remote.getbundle('pull', common=pullop.common,
1639 1655 heads=pullop.heads or pullop.rheads)
1640 1656 elif pullop.heads is None:
1641 1657 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1642 1658 elif not pullop.remote.capable('changegroupsubset'):
1643 1659 raise error.Abort(_("partial pull cannot be done because "
1644 1660 "other repository doesn't support "
1645 1661 "changegroupsubset."))
1646 1662 else:
1647 1663 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1648 1664 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1649 1665 pullop.remote.url())
1650 1666 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1651 1667
1652 1668 def _pullphase(pullop):
1653 1669 # Get remote phases data from remote
1654 1670 if 'phases' in pullop.stepsdone:
1655 1671 return
1656 1672 remotephases = pullop.remote.listkeys('phases')
1657 1673 _pullapplyphases(pullop, remotephases)
1658 1674
1659 1675 def _pullapplyphases(pullop, remotephases):
1660 1676 """apply phase movement from observed remote state"""
1661 1677 if 'phases' in pullop.stepsdone:
1662 1678 return
1663 1679 pullop.stepsdone.add('phases')
1664 1680 publishing = bool(remotephases.get('publishing', False))
1665 1681 if remotephases and not publishing:
1666 1682 # remote is new and non-publishing
1667 1683 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1668 1684 pullop.pulledsubset,
1669 1685 remotephases)
1670 1686 dheads = pullop.pulledsubset
1671 1687 else:
1672 1688 # Remote is old or publishing all common changesets
1673 1689 # should be seen as public
1674 1690 pheads = pullop.pulledsubset
1675 1691 dheads = []
1676 1692 unfi = pullop.repo.unfiltered()
1677 1693 phase = unfi._phasecache.phase
1678 1694 rev = unfi.changelog.nodemap.get
1679 1695 public = phases.public
1680 1696 draft = phases.draft
1681 1697
1682 1698 # exclude changesets already public locally and update the others
1683 1699 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1684 1700 if pheads:
1685 1701 tr = pullop.gettransaction()
1686 1702 phases.advanceboundary(pullop.repo, tr, public, pheads)
1687 1703
1688 1704 # exclude changesets already draft locally and update the others
1689 1705 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1690 1706 if dheads:
1691 1707 tr = pullop.gettransaction()
1692 1708 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1693 1709
1694 1710 def _pullbookmarks(pullop):
1695 1711 """process the remote bookmark information to update the local one"""
1696 1712 if 'bookmarks' in pullop.stepsdone:
1697 1713 return
1698 1714 pullop.stepsdone.add('bookmarks')
1699 1715 repo = pullop.repo
1700 1716 remotebookmarks = pullop.remotebookmarks
1701 1717 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1702 1718 pullop.remote.url(),
1703 1719 pullop.gettransaction,
1704 1720 explicit=pullop.explicitbookmarks)
1705 1721
1706 1722 def _pullobsolete(pullop):
1707 1723 """utility function to pull obsolete markers from a remote
1708 1724
1709 1725 The `gettransaction` is function that return the pull transaction, creating
1710 1726 one if necessary. We return the transaction to inform the calling code that
1711 1727 a new transaction have been created (when applicable).
1712 1728
1713 1729 Exists mostly to allow overriding for experimentation purpose"""
1714 1730 if 'obsmarkers' in pullop.stepsdone:
1715 1731 return
1716 1732 pullop.stepsdone.add('obsmarkers')
1717 1733 tr = None
1718 1734 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1719 1735 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1720 1736 remoteobs = pullop.remote.listkeys('obsolete')
1721 1737 if 'dump0' in remoteobs:
1722 1738 tr = pullop.gettransaction()
1723 1739 markers = []
1724 1740 for key in sorted(remoteobs, reverse=True):
1725 1741 if key.startswith('dump'):
1726 1742 data = util.b85decode(remoteobs[key])
1727 1743 version, newmarks = obsolete._readmarkers(data)
1728 1744 markers += newmarks
1729 1745 if markers:
1730 1746 pullop.repo.obsstore.add(tr, markers)
1731 1747 pullop.repo.invalidatevolatilesets()
1732 1748 return tr
1733 1749
1734 1750 def caps20to10(repo, role):
1735 1751 """return a set with appropriate options to use bundle20 during getbundle"""
1736 1752 caps = {'HG20'}
1737 1753 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1738 1754 caps.add('bundle2=' + urlreq.quote(capsblob))
1739 1755 return caps
1740 1756
1741 1757 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1742 1758 getbundle2partsorder = []
1743 1759
1744 1760 # Mapping between step name and function
1745 1761 #
1746 1762 # This exists to help extensions wrap steps if necessary
1747 1763 getbundle2partsmapping = {}
1748 1764
1749 1765 def getbundle2partsgenerator(stepname, idx=None):
1750 1766 """decorator for function generating bundle2 part for getbundle
1751 1767
1752 1768 The function is added to the step -> function mapping and appended to the
1753 1769 list of steps. Beware that decorated functions will be added in order
1754 1770 (this may matter).
1755 1771
1756 1772 You can only use this decorator for new steps, if you want to wrap a step
1757 1773 from an extension, attack the getbundle2partsmapping dictionary directly."""
1758 1774 def dec(func):
1759 1775 assert stepname not in getbundle2partsmapping
1760 1776 getbundle2partsmapping[stepname] = func
1761 1777 if idx is None:
1762 1778 getbundle2partsorder.append(stepname)
1763 1779 else:
1764 1780 getbundle2partsorder.insert(idx, stepname)
1765 1781 return func
1766 1782 return dec
1767 1783
1768 1784 def bundle2requested(bundlecaps):
1769 1785 if bundlecaps is not None:
1770 1786 return any(cap.startswith('HG2') for cap in bundlecaps)
1771 1787 return False
1772 1788
1773 1789 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1774 1790 **kwargs):
1775 1791 """Return chunks constituting a bundle's raw data.
1776 1792
1777 1793 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1778 1794 passed.
1779 1795
1780 1796 Returns a 2-tuple of a dict with metadata about the generated bundle
1781 1797 and an iterator over raw chunks (of varying sizes).
1782 1798 """
1783 1799 kwargs = pycompat.byteskwargs(kwargs)
1784 1800 info = {}
1785 1801 usebundle2 = bundle2requested(bundlecaps)
1786 1802 # bundle10 case
1787 1803 if not usebundle2:
1788 1804 if bundlecaps and not kwargs.get('cg', True):
1789 1805 raise ValueError(_('request for bundle10 must include changegroup'))
1790 1806
1791 1807 if kwargs:
1792 1808 raise ValueError(_('unsupported getbundle arguments: %s')
1793 1809 % ', '.join(sorted(kwargs.keys())))
1794 1810 outgoing = _computeoutgoing(repo, heads, common)
1795 1811 info['bundleversion'] = 1
1796 1812 return info, changegroup.makestream(repo, outgoing, '01', source,
1797 1813 bundlecaps=bundlecaps)
1798 1814
1799 1815 # bundle20 case
1800 1816 info['bundleversion'] = 2
1801 1817 b2caps = {}
1802 1818 for bcaps in bundlecaps:
1803 1819 if bcaps.startswith('bundle2='):
1804 1820 blob = urlreq.unquote(bcaps[len('bundle2='):])
1805 1821 b2caps.update(bundle2.decodecaps(blob))
1806 1822 bundler = bundle2.bundle20(repo.ui, b2caps)
1807 1823
1808 1824 kwargs['heads'] = heads
1809 1825 kwargs['common'] = common
1810 1826
1811 1827 for name in getbundle2partsorder:
1812 1828 func = getbundle2partsmapping[name]
1813 1829 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1814 1830 **pycompat.strkwargs(kwargs))
1815 1831
1816 1832 info['prefercompressed'] = bundler.prefercompressed
1817 1833
1818 1834 return info, bundler.getchunks()
1819 1835
1820 1836 @getbundle2partsgenerator('stream2')
1821 1837 def _getbundlestream2(bundler, repo, *args, **kwargs):
1822 1838 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1823 1839
1824 1840 @getbundle2partsgenerator('changegroup')
1825 1841 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1826 1842 b2caps=None, heads=None, common=None, **kwargs):
1827 1843 """add a changegroup part to the requested bundle"""
1828 1844 cgstream = None
1829 1845 if kwargs.get(r'cg', True):
1830 1846 # build changegroup bundle here.
1831 1847 version = '01'
1832 1848 cgversions = b2caps.get('changegroup')
1833 1849 if cgversions: # 3.1 and 3.2 ship with an empty value
1834 1850 cgversions = [v for v in cgversions
1835 1851 if v in changegroup.supportedoutgoingversions(repo)]
1836 1852 if not cgversions:
1837 1853 raise ValueError(_('no common changegroup version'))
1838 1854 version = max(cgversions)
1839 1855 outgoing = _computeoutgoing(repo, heads, common)
1840 1856 if outgoing.missing:
1841 1857 cgstream = changegroup.makestream(repo, outgoing, version, source,
1842 1858 bundlecaps=bundlecaps)
1843 1859
1844 1860 if cgstream:
1845 1861 part = bundler.newpart('changegroup', data=cgstream)
1846 1862 if cgversions:
1847 1863 part.addparam('version', version)
1848 1864 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1849 1865 mandatory=False)
1850 1866 if 'treemanifest' in repo.requirements:
1851 1867 part.addparam('treemanifest', '1')
1852 1868
1853 1869 @getbundle2partsgenerator('bookmarks')
1854 1870 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1855 1871 b2caps=None, **kwargs):
1856 1872 """add a bookmark part to the requested bundle"""
1857 1873 if not kwargs.get(r'bookmarks', False):
1858 1874 return
1859 1875 if 'bookmarks' not in b2caps:
1860 1876 raise ValueError(_('no common bookmarks exchange method'))
1861 1877 books = bookmod.listbinbookmarks(repo)
1862 1878 data = bookmod.binaryencode(books)
1863 1879 if data:
1864 1880 bundler.newpart('bookmarks', data=data)
1865 1881
1866 1882 @getbundle2partsgenerator('listkeys')
1867 1883 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1868 1884 b2caps=None, **kwargs):
1869 1885 """add parts containing listkeys namespaces to the requested bundle"""
1870 1886 listkeys = kwargs.get(r'listkeys', ())
1871 1887 for namespace in listkeys:
1872 1888 part = bundler.newpart('listkeys')
1873 1889 part.addparam('namespace', namespace)
1874 1890 keys = repo.listkeys(namespace).items()
1875 1891 part.data = pushkey.encodekeys(keys)
1876 1892
1877 1893 @getbundle2partsgenerator('obsmarkers')
1878 1894 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1879 1895 b2caps=None, heads=None, **kwargs):
1880 1896 """add an obsolescence markers part to the requested bundle"""
1881 1897 if kwargs.get(r'obsmarkers', False):
1882 1898 if heads is None:
1883 1899 heads = repo.heads()
1884 1900 subset = [c.node() for c in repo.set('::%ln', heads)]
1885 1901 markers = repo.obsstore.relevantmarkers(subset)
1886 1902 markers = sorted(markers)
1887 1903 bundle2.buildobsmarkerspart(bundler, markers)
1888 1904
1889 1905 @getbundle2partsgenerator('phases')
1890 1906 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1891 1907 b2caps=None, heads=None, **kwargs):
1892 1908 """add phase heads part to the requested bundle"""
1893 1909 if kwargs.get(r'phases', False):
1894 1910 if not 'heads' in b2caps.get('phases'):
1895 1911 raise ValueError(_('no common phases exchange method'))
1896 1912 if heads is None:
1897 1913 heads = repo.heads()
1898 1914
1899 1915 headsbyphase = collections.defaultdict(set)
1900 1916 if repo.publishing():
1901 1917 headsbyphase[phases.public] = heads
1902 1918 else:
1903 1919 # find the appropriate heads to move
1904 1920
1905 1921 phase = repo._phasecache.phase
1906 1922 node = repo.changelog.node
1907 1923 rev = repo.changelog.rev
1908 1924 for h in heads:
1909 1925 headsbyphase[phase(repo, rev(h))].add(h)
1910 1926 seenphases = list(headsbyphase.keys())
1911 1927
1912 1928 # We do not handle anything but public and draft phase for now)
1913 1929 if seenphases:
1914 1930 assert max(seenphases) <= phases.draft
1915 1931
1916 1932 # if client is pulling non-public changesets, we need to find
1917 1933 # intermediate public heads.
1918 1934 draftheads = headsbyphase.get(phases.draft, set())
1919 1935 if draftheads:
1920 1936 publicheads = headsbyphase.get(phases.public, set())
1921 1937
1922 1938 revset = 'heads(only(%ln, %ln) and public())'
1923 1939 extraheads = repo.revs(revset, draftheads, publicheads)
1924 1940 for r in extraheads:
1925 1941 headsbyphase[phases.public].add(node(r))
1926 1942
1927 1943 # transform data in a format used by the encoding function
1928 1944 phasemapping = []
1929 1945 for phase in phases.allphases:
1930 1946 phasemapping.append(sorted(headsbyphase[phase]))
1931 1947
1932 1948 # generate the actual part
1933 1949 phasedata = phases.binaryencode(phasemapping)
1934 1950 bundler.newpart('phase-heads', data=phasedata)
1935 1951
1936 1952 @getbundle2partsgenerator('hgtagsfnodes')
1937 1953 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1938 1954 b2caps=None, heads=None, common=None,
1939 1955 **kwargs):
1940 1956 """Transfer the .hgtags filenodes mapping.
1941 1957
1942 1958 Only values for heads in this bundle will be transferred.
1943 1959
1944 1960 The part data consists of pairs of 20 byte changeset node and .hgtags
1945 1961 filenodes raw values.
1946 1962 """
1947 1963 # Don't send unless:
1948 1964 # - changeset are being exchanged,
1949 1965 # - the client supports it.
1950 1966 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
1951 1967 return
1952 1968
1953 1969 outgoing = _computeoutgoing(repo, heads, common)
1954 1970 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1955 1971
1956 1972 @getbundle2partsgenerator('cache:rev-branch-cache')
1957 1973 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
1958 1974 b2caps=None, heads=None, common=None,
1959 1975 **kwargs):
1960 1976 """Transfer the rev-branch-cache mapping
1961 1977
1962 1978 The payload is a series of data related to each branch
1963 1979
1964 1980 1) branch name length
1965 1981 2) number of open heads
1966 1982 3) number of closed heads
1967 1983 4) open heads nodes
1968 1984 5) closed heads nodes
1969 1985 """
1970 1986 # Don't send unless:
1971 1987 # - changeset are being exchanged,
1972 1988 # - the client supports it.
1973 1989 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
1974 1990 return
1975 1991 outgoing = _computeoutgoing(repo, heads, common)
1976 1992 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
1977 1993
1978 1994 def check_heads(repo, their_heads, context):
1979 1995 """check if the heads of a repo have been modified
1980 1996
1981 1997 Used by peer for unbundling.
1982 1998 """
1983 1999 heads = repo.heads()
1984 2000 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1985 2001 if not (their_heads == ['force'] or their_heads == heads or
1986 2002 their_heads == ['hashed', heads_hash]):
1987 2003 # someone else committed/pushed/unbundled while we
1988 2004 # were transferring data
1989 2005 raise error.PushRaced('repository changed while %s - '
1990 2006 'please try again' % context)
1991 2007
1992 2008 def unbundle(repo, cg, heads, source, url):
1993 2009 """Apply a bundle to a repo.
1994 2010
1995 2011 this function makes sure the repo is locked during the application and have
1996 2012 mechanism to check that no push race occurred between the creation of the
1997 2013 bundle and its application.
1998 2014
1999 2015 If the push was raced as PushRaced exception is raised."""
2000 2016 r = 0
2001 2017 # need a transaction when processing a bundle2 stream
2002 2018 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2003 2019 lockandtr = [None, None, None]
2004 2020 recordout = None
2005 2021 # quick fix for output mismatch with bundle2 in 3.4
2006 2022 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2007 2023 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2008 2024 captureoutput = True
2009 2025 try:
2010 2026 # note: outside bundle1, 'heads' is expected to be empty and this
2011 2027 # 'check_heads' call wil be a no-op
2012 2028 check_heads(repo, heads, 'uploading changes')
2013 2029 # push can proceed
2014 2030 if not isinstance(cg, bundle2.unbundle20):
2015 2031 # legacy case: bundle1 (changegroup 01)
2016 2032 txnname = "\n".join([source, util.hidepassword(url)])
2017 2033 with repo.lock(), repo.transaction(txnname) as tr:
2018 2034 op = bundle2.applybundle(repo, cg, tr, source, url)
2019 2035 r = bundle2.combinechangegroupresults(op)
2020 2036 else:
2021 2037 r = None
2022 2038 try:
2023 2039 def gettransaction():
2024 2040 if not lockandtr[2]:
2025 2041 lockandtr[0] = repo.wlock()
2026 2042 lockandtr[1] = repo.lock()
2027 2043 lockandtr[2] = repo.transaction(source)
2028 2044 lockandtr[2].hookargs['source'] = source
2029 2045 lockandtr[2].hookargs['url'] = url
2030 2046 lockandtr[2].hookargs['bundle2'] = '1'
2031 2047 return lockandtr[2]
2032 2048
2033 2049 # Do greedy locking by default until we're satisfied with lazy
2034 2050 # locking.
2035 2051 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2036 2052 gettransaction()
2037 2053
2038 2054 op = bundle2.bundleoperation(repo, gettransaction,
2039 2055 captureoutput=captureoutput)
2040 2056 try:
2041 2057 op = bundle2.processbundle(repo, cg, op=op)
2042 2058 finally:
2043 2059 r = op.reply
2044 2060 if captureoutput and r is not None:
2045 2061 repo.ui.pushbuffer(error=True, subproc=True)
2046 2062 def recordout(output):
2047 2063 r.newpart('output', data=output, mandatory=False)
2048 2064 if lockandtr[2] is not None:
2049 2065 lockandtr[2].close()
2050 2066 except BaseException as exc:
2051 2067 exc.duringunbundle2 = True
2052 2068 if captureoutput and r is not None:
2053 2069 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2054 2070 def recordout(output):
2055 2071 part = bundle2.bundlepart('output', data=output,
2056 2072 mandatory=False)
2057 2073 parts.append(part)
2058 2074 raise
2059 2075 finally:
2060 2076 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2061 2077 if recordout is not None:
2062 2078 recordout(repo.ui.popbuffer())
2063 2079 return r
2064 2080
2065 2081 def _maybeapplyclonebundle(pullop):
2066 2082 """Apply a clone bundle from a remote, if possible."""
2067 2083
2068 2084 repo = pullop.repo
2069 2085 remote = pullop.remote
2070 2086
2071 2087 if not repo.ui.configbool('ui', 'clonebundles'):
2072 2088 return
2073 2089
2074 2090 # Only run if local repo is empty.
2075 2091 if len(repo):
2076 2092 return
2077 2093
2078 2094 if pullop.heads:
2079 2095 return
2080 2096
2081 2097 if not remote.capable('clonebundles'):
2082 2098 return
2083 2099
2084 2100 res = remote._call('clonebundles')
2085 2101
2086 2102 # If we call the wire protocol command, that's good enough to record the
2087 2103 # attempt.
2088 2104 pullop.clonebundleattempted = True
2089 2105
2090 2106 entries = parseclonebundlesmanifest(repo, res)
2091 2107 if not entries:
2092 2108 repo.ui.note(_('no clone bundles available on remote; '
2093 2109 'falling back to regular clone\n'))
2094 2110 return
2095 2111
2096 2112 entries = filterclonebundleentries(
2097 2113 repo, entries, streamclonerequested=pullop.streamclonerequested)
2098 2114
2099 2115 if not entries:
2100 2116 # There is a thundering herd concern here. However, if a server
2101 2117 # operator doesn't advertise bundles appropriate for its clients,
2102 2118 # they deserve what's coming. Furthermore, from a client's
2103 2119 # perspective, no automatic fallback would mean not being able to
2104 2120 # clone!
2105 2121 repo.ui.warn(_('no compatible clone bundles available on server; '
2106 2122 'falling back to regular clone\n'))
2107 2123 repo.ui.warn(_('(you may want to report this to the server '
2108 2124 'operator)\n'))
2109 2125 return
2110 2126
2111 2127 entries = sortclonebundleentries(repo.ui, entries)
2112 2128
2113 2129 url = entries[0]['URL']
2114 2130 repo.ui.status(_('applying clone bundle from %s\n') % url)
2115 2131 if trypullbundlefromurl(repo.ui, repo, url):
2116 2132 repo.ui.status(_('finished applying clone bundle\n'))
2117 2133 # Bundle failed.
2118 2134 #
2119 2135 # We abort by default to avoid the thundering herd of
2120 2136 # clients flooding a server that was expecting expensive
2121 2137 # clone load to be offloaded.
2122 2138 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2123 2139 repo.ui.warn(_('falling back to normal clone\n'))
2124 2140 else:
2125 2141 raise error.Abort(_('error applying bundle'),
2126 2142 hint=_('if this error persists, consider contacting '
2127 2143 'the server operator or disable clone '
2128 2144 'bundles via '
2129 2145 '"--config ui.clonebundles=false"'))
2130 2146
2131 2147 def parseclonebundlesmanifest(repo, s):
2132 2148 """Parses the raw text of a clone bundles manifest.
2133 2149
2134 2150 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2135 2151 to the URL and other keys are the attributes for the entry.
2136 2152 """
2137 2153 m = []
2138 2154 for line in s.splitlines():
2139 2155 fields = line.split()
2140 2156 if not fields:
2141 2157 continue
2142 2158 attrs = {'URL': fields[0]}
2143 2159 for rawattr in fields[1:]:
2144 2160 key, value = rawattr.split('=', 1)
2145 2161 key = urlreq.unquote(key)
2146 2162 value = urlreq.unquote(value)
2147 2163 attrs[key] = value
2148 2164
2149 2165 # Parse BUNDLESPEC into components. This makes client-side
2150 2166 # preferences easier to specify since you can prefer a single
2151 2167 # component of the BUNDLESPEC.
2152 2168 if key == 'BUNDLESPEC':
2153 2169 try:
2154 2170 bundlespec = parsebundlespec(repo, value,
2155 2171 externalnames=True)
2156 2172 attrs['COMPRESSION'] = bundlespec.compression
2157 2173 attrs['VERSION'] = bundlespec.version
2158 2174 except error.InvalidBundleSpecification:
2159 2175 pass
2160 2176 except error.UnsupportedBundleSpecification:
2161 2177 pass
2162 2178
2163 2179 m.append(attrs)
2164 2180
2165 2181 return m
2166 2182
2167 2183 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2168 2184 """Remove incompatible clone bundle manifest entries.
2169 2185
2170 2186 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2171 2187 and returns a new list consisting of only the entries that this client
2172 2188 should be able to apply.
2173 2189
2174 2190 There is no guarantee we'll be able to apply all returned entries because
2175 2191 the metadata we use to filter on may be missing or wrong.
2176 2192 """
2177 2193 newentries = []
2178 2194 for entry in entries:
2179 2195 spec = entry.get('BUNDLESPEC')
2180 2196 if spec:
2181 2197 try:
2182 2198 bundlespec = parsebundlespec(repo, spec, strict=True)
2183 2199
2184 2200 # If a stream clone was requested, filter out non-streamclone
2185 2201 # entries.
2186 2202 comp = bundlespec.compression
2187 2203 version = bundlespec.version
2188 2204 if streamclonerequested and (comp != 'UN' or version != 's1'):
2189 2205 repo.ui.debug('filtering %s because not a stream clone\n' %
2190 2206 entry['URL'])
2191 2207 continue
2192 2208
2193 2209 except error.InvalidBundleSpecification as e:
2194 2210 repo.ui.debug(str(e) + '\n')
2195 2211 continue
2196 2212 except error.UnsupportedBundleSpecification as e:
2197 2213 repo.ui.debug('filtering %s because unsupported bundle '
2198 2214 'spec: %s\n' % (
2199 2215 entry['URL'], stringutil.forcebytestr(e)))
2200 2216 continue
2201 2217 # If we don't have a spec and requested a stream clone, we don't know
2202 2218 # what the entry is so don't attempt to apply it.
2203 2219 elif streamclonerequested:
2204 2220 repo.ui.debug('filtering %s because cannot determine if a stream '
2205 2221 'clone bundle\n' % entry['URL'])
2206 2222 continue
2207 2223
2208 2224 if 'REQUIRESNI' in entry and not sslutil.hassni:
2209 2225 repo.ui.debug('filtering %s because SNI not supported\n' %
2210 2226 entry['URL'])
2211 2227 continue
2212 2228
2213 2229 newentries.append(entry)
2214 2230
2215 2231 return newentries
2216 2232
2217 2233 class clonebundleentry(object):
2218 2234 """Represents an item in a clone bundles manifest.
2219 2235
2220 2236 This rich class is needed to support sorting since sorted() in Python 3
2221 2237 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2222 2238 won't work.
2223 2239 """
2224 2240
2225 2241 def __init__(self, value, prefers):
2226 2242 self.value = value
2227 2243 self.prefers = prefers
2228 2244
2229 2245 def _cmp(self, other):
2230 2246 for prefkey, prefvalue in self.prefers:
2231 2247 avalue = self.value.get(prefkey)
2232 2248 bvalue = other.value.get(prefkey)
2233 2249
2234 2250 # Special case for b missing attribute and a matches exactly.
2235 2251 if avalue is not None and bvalue is None and avalue == prefvalue:
2236 2252 return -1
2237 2253
2238 2254 # Special case for a missing attribute and b matches exactly.
2239 2255 if bvalue is not None and avalue is None and bvalue == prefvalue:
2240 2256 return 1
2241 2257
2242 2258 # We can't compare unless attribute present on both.
2243 2259 if avalue is None or bvalue is None:
2244 2260 continue
2245 2261
2246 2262 # Same values should fall back to next attribute.
2247 2263 if avalue == bvalue:
2248 2264 continue
2249 2265
2250 2266 # Exact matches come first.
2251 2267 if avalue == prefvalue:
2252 2268 return -1
2253 2269 if bvalue == prefvalue:
2254 2270 return 1
2255 2271
2256 2272 # Fall back to next attribute.
2257 2273 continue
2258 2274
2259 2275 # If we got here we couldn't sort by attributes and prefers. Fall
2260 2276 # back to index order.
2261 2277 return 0
2262 2278
2263 2279 def __lt__(self, other):
2264 2280 return self._cmp(other) < 0
2265 2281
2266 2282 def __gt__(self, other):
2267 2283 return self._cmp(other) > 0
2268 2284
2269 2285 def __eq__(self, other):
2270 2286 return self._cmp(other) == 0
2271 2287
2272 2288 def __le__(self, other):
2273 2289 return self._cmp(other) <= 0
2274 2290
2275 2291 def __ge__(self, other):
2276 2292 return self._cmp(other) >= 0
2277 2293
2278 2294 def __ne__(self, other):
2279 2295 return self._cmp(other) != 0
2280 2296
2281 2297 def sortclonebundleentries(ui, entries):
2282 2298 prefers = ui.configlist('ui', 'clonebundleprefers')
2283 2299 if not prefers:
2284 2300 return list(entries)
2285 2301
2286 2302 prefers = [p.split('=', 1) for p in prefers]
2287 2303
2288 2304 items = sorted(clonebundleentry(v, prefers) for v in entries)
2289 2305 return [i.value for i in items]
2290 2306
2291 2307 def trypullbundlefromurl(ui, repo, url):
2292 2308 """Attempt to apply a bundle from a URL."""
2293 2309 with repo.lock(), repo.transaction('bundleurl') as tr:
2294 2310 try:
2295 2311 fh = urlmod.open(ui, url)
2296 2312 cg = readbundle(ui, fh, 'stream')
2297 2313
2298 2314 if isinstance(cg, streamclone.streamcloneapplier):
2299 2315 cg.apply(repo)
2300 2316 else:
2301 2317 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2302 2318 return True
2303 2319 except urlerr.httperror as e:
2304 2320 ui.warn(_('HTTP error fetching bundle: %s\n') %
2305 2321 stringutil.forcebytestr(e))
2306 2322 except urlerr.urlerror as e:
2307 2323 ui.warn(_('error fetching bundle: %s\n') %
2308 2324 stringutil.forcebytestr(e.reason))
2309 2325
2310 2326 return False
@@ -1,55 +1,48 b''
1 1 Test creating a consuming stream bundle v2
2 2
3 3 $ getmainid() {
4 4 > hg -R main log --template '{node}\n' --rev "$1"
5 5 > }
6 6
7 7 $ cp $HGRCPATH $TESTTMP/hgrc.orig
8 8
9 9 $ cat >> $HGRCPATH << EOF
10 10 > [experimental]
11 11 > evolution.createmarkers=True
12 12 > evolution.exchange=True
13 13 > bundle2-output-capture=True
14 14 > [ui]
15 15 > ssh="$PYTHON" "$TESTDIR/dummyssh"
16 16 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
17 17 > [web]
18 18 > push_ssl = false
19 19 > allow_push = *
20 20 > [phases]
21 21 > publish=False
22 22 > [extensions]
23 23 > drawdag=$TESTDIR/drawdag.py
24 24 > EOF
25 25
26 26 The extension requires a repo (currently unused)
27 27
28 28 $ hg init main
29 29 $ cd main
30 30
31 31 $ hg debugdrawdag <<'EOF'
32 32 > E
33 33 > |
34 34 > D
35 35 > |
36 36 > C
37 37 > |
38 38 > B
39 39 > |
40 40 > A
41 41 > EOF
42 42
43 43 $ hg bundle -a --type="none-v2;stream=v2" bundle.hg
44 5 changesets found
45 44 $ hg debugbundle bundle.hg
46 45 Stream params: {}
47 changegroup -- {nbchanges: 5, version: 02}
48 426bada5c67598ca65036d57d9e4b64b0c1ce7a0
49 112478962961147124edd43549aedd1a335e44bf
50 26805aba1e600a82e93661149f2313866a221a7b
51 f585351a92f85104bff7c284233c338b10eb1df7
52 9bc730a19041f9ec7cb33c626e811aa233efb18c
53 cache:rev-branch-cache -- {}
46 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Cstore}
54 47 $ hg debugbundle --spec bundle.hg
55 none-v2
48 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Cstore
General Comments 0
You need to be logged in to leave comments. Login now