##// END OF EJS Templates
streamclonebundle: make sure we accept new stream clone bundle spec...
Boris Feld -
r37187:b837655c default
parent child Browse files
Show More
@@ -1,2326 +1,2336 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from .thirdparty import (
21 21 attr,
22 22 )
23 23 from . import (
24 24 bookmarks as bookmod,
25 25 bundle2,
26 26 changegroup,
27 27 discovery,
28 28 error,
29 29 lock as lockmod,
30 30 logexchange,
31 31 obsolete,
32 32 phases,
33 33 pushkey,
34 34 pycompat,
35 35 scmutil,
36 36 sslutil,
37 37 streamclone,
38 38 url as urlmod,
39 39 util,
40 40 )
41 41 from .utils import (
42 42 stringutil,
43 43 )
44 44
45 45 urlerr = util.urlerr
46 46 urlreq = util.urlreq
47 47
48 48 # Maps bundle version human names to changegroup versions.
49 49 _bundlespeccgversions = {'v1': '01',
50 50 'v2': '02',
51 51 'packed1': 's1',
52 52 'bundle2': '02', #legacy
53 53 }
54 54
55 55 # Maps bundle version with content opts to choose which part to bundle
56 56 _bundlespeccontentopts = {
57 57 'v1': {
58 58 'changegroup': True,
59 59 'cg.version': '01',
60 60 'obsolescence': False,
61 61 'phases': False,
62 62 'tagsfnodescache': False,
63 63 'revbranchcache': False
64 64 },
65 65 'v2': {
66 66 'changegroup': True,
67 67 'cg.version': '02',
68 68 'obsolescence': False,
69 69 'phases': False,
70 70 'tagsfnodescache': True,
71 71 'revbranchcache': True
72 72 },
73 73 'packed1' : {
74 74 'cg.version': 's1'
75 75 }
76 76 }
77 77 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
78 78
79 79 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
80 80 "tagsfnodescache": False,
81 81 "revbranchcache": False}}
82 82
83 83 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
84 84 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
85 85
86 86 @attr.s
87 87 class bundlespec(object):
88 88 compression = attr.ib()
89 89 version = attr.ib()
90 90 params = attr.ib()
91 91 contentopts = attr.ib()
92 92
93 93 def parsebundlespec(repo, spec, strict=True, externalnames=False):
94 94 """Parse a bundle string specification into parts.
95 95
96 96 Bundle specifications denote a well-defined bundle/exchange format.
97 97 The content of a given specification should not change over time in
98 98 order to ensure that bundles produced by a newer version of Mercurial are
99 99 readable from an older version.
100 100
101 101 The string currently has the form:
102 102
103 103 <compression>-<type>[;<parameter0>[;<parameter1>]]
104 104
105 105 Where <compression> is one of the supported compression formats
106 106 and <type> is (currently) a version string. A ";" can follow the type and
107 107 all text afterwards is interpreted as URI encoded, ";" delimited key=value
108 108 pairs.
109 109
110 110 If ``strict`` is True (the default) <compression> is required. Otherwise,
111 111 it is optional.
112 112
113 113 If ``externalnames`` is False (the default), the human-centric names will
114 114 be converted to their internal representation.
115 115
116 116 Returns a bundlespec object of (compression, version, parameters).
117 117 Compression will be ``None`` if not in strict mode and a compression isn't
118 118 defined.
119 119
120 120 An ``InvalidBundleSpecification`` is raised when the specification is
121 121 not syntactically well formed.
122 122
123 123 An ``UnsupportedBundleSpecification`` is raised when the compression or
124 124 bundle type/version is not recognized.
125 125
126 126 Note: this function will likely eventually return a more complex data
127 127 structure, including bundle2 part information.
128 128 """
129 129 def parseparams(s):
130 130 if ';' not in s:
131 131 return s, {}
132 132
133 133 params = {}
134 134 version, paramstr = s.split(';', 1)
135 135
136 136 for p in paramstr.split(';'):
137 137 if '=' not in p:
138 138 raise error.InvalidBundleSpecification(
139 139 _('invalid bundle specification: '
140 140 'missing "=" in parameter: %s') % p)
141 141
142 142 key, value = p.split('=', 1)
143 143 key = urlreq.unquote(key)
144 144 value = urlreq.unquote(value)
145 145 params[key] = value
146 146
147 147 return version, params
148 148
149 149
150 150 if strict and '-' not in spec:
151 151 raise error.InvalidBundleSpecification(
152 152 _('invalid bundle specification; '
153 153 'must be prefixed with compression: %s') % spec)
154 154
155 155 if '-' in spec:
156 156 compression, version = spec.split('-', 1)
157 157
158 158 if compression not in util.compengines.supportedbundlenames:
159 159 raise error.UnsupportedBundleSpecification(
160 160 _('%s compression is not supported') % compression)
161 161
162 162 version, params = parseparams(version)
163 163
164 164 if version not in _bundlespeccgversions:
165 165 raise error.UnsupportedBundleSpecification(
166 166 _('%s is not a recognized bundle version') % version)
167 167 else:
168 168 # Value could be just the compression or just the version, in which
169 169 # case some defaults are assumed (but only when not in strict mode).
170 170 assert not strict
171 171
172 172 spec, params = parseparams(spec)
173 173
174 174 if spec in util.compengines.supportedbundlenames:
175 175 compression = spec
176 176 version = 'v1'
177 177 # Generaldelta repos require v2.
178 178 if 'generaldelta' in repo.requirements:
179 179 version = 'v2'
180 180 # Modern compression engines require v2.
181 181 if compression not in _bundlespecv1compengines:
182 182 version = 'v2'
183 183 elif spec in _bundlespeccgversions:
184 184 if spec == 'packed1':
185 185 compression = 'none'
186 186 else:
187 187 compression = 'bzip2'
188 188 version = spec
189 189 else:
190 190 raise error.UnsupportedBundleSpecification(
191 191 _('%s is not a recognized bundle specification') % spec)
192 192
193 193 # Bundle version 1 only supports a known set of compression engines.
194 194 if version == 'v1' and compression not in _bundlespecv1compengines:
195 195 raise error.UnsupportedBundleSpecification(
196 196 _('compression engine %s is not supported on v1 bundles') %
197 197 compression)
198 198
199 199 # The specification for packed1 can optionally declare the data formats
200 200 # required to apply it. If we see this metadata, compare against what the
201 201 # repo supports and error if the bundle isn't compatible.
202 202 if version == 'packed1' and 'requirements' in params:
203 203 requirements = set(params['requirements'].split(','))
204 204 missingreqs = requirements - repo.supportedformats
205 205 if missingreqs:
206 206 raise error.UnsupportedBundleSpecification(
207 207 _('missing support for repository features: %s') %
208 208 ', '.join(sorted(missingreqs)))
209 209
210 210 # Compute contentopts based on the version
211 211 contentopts = _bundlespeccontentopts.get(version, {}).copy()
212 212
213 213 # Process the variants
214 214 if "stream" in params and params["stream"] == "v2":
215 215 variant = _bundlespecvariants["streamv2"]
216 216 contentopts.update(variant)
217 217
218 218 if not externalnames:
219 219 engine = util.compengines.forbundlename(compression)
220 220 compression = engine.bundletype()[1]
221 221 version = _bundlespeccgversions[version]
222 222
223 223 return bundlespec(compression, version, params, contentopts)
224 224
225 225 def readbundle(ui, fh, fname, vfs=None):
226 226 header = changegroup.readexactly(fh, 4)
227 227
228 228 alg = None
229 229 if not fname:
230 230 fname = "stream"
231 231 if not header.startswith('HG') and header.startswith('\0'):
232 232 fh = changegroup.headerlessfixup(fh, header)
233 233 header = "HG10"
234 234 alg = 'UN'
235 235 elif vfs:
236 236 fname = vfs.join(fname)
237 237
238 238 magic, version = header[0:2], header[2:4]
239 239
240 240 if magic != 'HG':
241 241 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
242 242 if version == '10':
243 243 if alg is None:
244 244 alg = changegroup.readexactly(fh, 2)
245 245 return changegroup.cg1unpacker(fh, alg)
246 246 elif version.startswith('2'):
247 247 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
248 248 elif version == 'S1':
249 249 return streamclone.streamcloneapplier(fh)
250 250 else:
251 251 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
252 252
253 253 def getbundlespec(ui, fh):
254 254 """Infer the bundlespec from a bundle file handle.
255 255
256 256 The input file handle is seeked and the original seek position is not
257 257 restored.
258 258 """
259 259 def speccompression(alg):
260 260 try:
261 261 return util.compengines.forbundletype(alg).bundletype()[0]
262 262 except KeyError:
263 263 return None
264 264
265 265 b = readbundle(ui, fh, None)
266 266 if isinstance(b, changegroup.cg1unpacker):
267 267 alg = b._type
268 268 if alg == '_truncatedBZ':
269 269 alg = 'BZ'
270 270 comp = speccompression(alg)
271 271 if not comp:
272 272 raise error.Abort(_('unknown compression algorithm: %s') % alg)
273 273 return '%s-v1' % comp
274 274 elif isinstance(b, bundle2.unbundle20):
275 275 if 'Compression' in b.params:
276 276 comp = speccompression(b.params['Compression'])
277 277 if not comp:
278 278 raise error.Abort(_('unknown compression algorithm: %s') % comp)
279 279 else:
280 280 comp = 'none'
281 281
282 282 version = None
283 283 for part in b.iterparts():
284 284 if part.type == 'changegroup':
285 285 version = part.params['version']
286 286 if version in ('01', '02'):
287 287 version = 'v2'
288 288 else:
289 289 raise error.Abort(_('changegroup version %s does not have '
290 290 'a known bundlespec') % version,
291 291 hint=_('try upgrading your Mercurial '
292 292 'client'))
293 293 elif part.type == 'stream2' and version is None:
294 294 # A stream2 part requires to be part of a v2 bundle
295 295 version = "v2"
296 296 requirements = urlreq.unquote(part.params['requirements'])
297 297 splitted = requirements.split()
298 298 params = bundle2._formatrequirementsparams(splitted)
299 299 return 'none-v2;stream=v2;%s' % params
300 300
301 301 if not version:
302 302 raise error.Abort(_('could not identify changegroup version in '
303 303 'bundle'))
304 304
305 305 return '%s-%s' % (comp, version)
306 306 elif isinstance(b, streamclone.streamcloneapplier):
307 307 requirements = streamclone.readbundle1header(fh)[2]
308 308 formatted = bundle2._formatrequirementsparams(requirements)
309 309 return 'none-packed1;%s' % formatted
310 310 else:
311 311 raise error.Abort(_('unknown bundle type: %s') % b)
312 312
313 313 def _computeoutgoing(repo, heads, common):
314 314 """Computes which revs are outgoing given a set of common
315 315 and a set of heads.
316 316
317 317 This is a separate function so extensions can have access to
318 318 the logic.
319 319
320 320 Returns a discovery.outgoing object.
321 321 """
322 322 cl = repo.changelog
323 323 if common:
324 324 hasnode = cl.hasnode
325 325 common = [n for n in common if hasnode(n)]
326 326 else:
327 327 common = [nullid]
328 328 if not heads:
329 329 heads = cl.heads()
330 330 return discovery.outgoing(repo, common, heads)
331 331
332 332 def _forcebundle1(op):
333 333 """return true if a pull/push must use bundle1
334 334
335 335 This function is used to allow testing of the older bundle version"""
336 336 ui = op.repo.ui
337 337 # The goal is this config is to allow developer to choose the bundle
338 338 # version used during exchanged. This is especially handy during test.
339 339 # Value is a list of bundle version to be picked from, highest version
340 340 # should be used.
341 341 #
342 342 # developer config: devel.legacy.exchange
343 343 exchange = ui.configlist('devel', 'legacy.exchange')
344 344 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
345 345 return forcebundle1 or not op.remote.capable('bundle2')
346 346
347 347 class pushoperation(object):
348 348 """A object that represent a single push operation
349 349
350 350 Its purpose is to carry push related state and very common operations.
351 351
352 352 A new pushoperation should be created at the beginning of each push and
353 353 discarded afterward.
354 354 """
355 355
356 356 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
357 357 bookmarks=(), pushvars=None):
358 358 # repo we push from
359 359 self.repo = repo
360 360 self.ui = repo.ui
361 361 # repo we push to
362 362 self.remote = remote
363 363 # force option provided
364 364 self.force = force
365 365 # revs to be pushed (None is "all")
366 366 self.revs = revs
367 367 # bookmark explicitly pushed
368 368 self.bookmarks = bookmarks
369 369 # allow push of new branch
370 370 self.newbranch = newbranch
371 371 # step already performed
372 372 # (used to check what steps have been already performed through bundle2)
373 373 self.stepsdone = set()
374 374 # Integer version of the changegroup push result
375 375 # - None means nothing to push
376 376 # - 0 means HTTP error
377 377 # - 1 means we pushed and remote head count is unchanged *or*
378 378 # we have outgoing changesets but refused to push
379 379 # - other values as described by addchangegroup()
380 380 self.cgresult = None
381 381 # Boolean value for the bookmark push
382 382 self.bkresult = None
383 383 # discover.outgoing object (contains common and outgoing data)
384 384 self.outgoing = None
385 385 # all remote topological heads before the push
386 386 self.remoteheads = None
387 387 # Details of the remote branch pre and post push
388 388 #
389 389 # mapping: {'branch': ([remoteheads],
390 390 # [newheads],
391 391 # [unsyncedheads],
392 392 # [discardedheads])}
393 393 # - branch: the branch name
394 394 # - remoteheads: the list of remote heads known locally
395 395 # None if the branch is new
396 396 # - newheads: the new remote heads (known locally) with outgoing pushed
397 397 # - unsyncedheads: the list of remote heads unknown locally.
398 398 # - discardedheads: the list of remote heads made obsolete by the push
399 399 self.pushbranchmap = None
400 400 # testable as a boolean indicating if any nodes are missing locally.
401 401 self.incoming = None
402 402 # summary of the remote phase situation
403 403 self.remotephases = None
404 404 # phases changes that must be pushed along side the changesets
405 405 self.outdatedphases = None
406 406 # phases changes that must be pushed if changeset push fails
407 407 self.fallbackoutdatedphases = None
408 408 # outgoing obsmarkers
409 409 self.outobsmarkers = set()
410 410 # outgoing bookmarks
411 411 self.outbookmarks = []
412 412 # transaction manager
413 413 self.trmanager = None
414 414 # map { pushkey partid -> callback handling failure}
415 415 # used to handle exception from mandatory pushkey part failure
416 416 self.pkfailcb = {}
417 417 # an iterable of pushvars or None
418 418 self.pushvars = pushvars
419 419
420 420 @util.propertycache
421 421 def futureheads(self):
422 422 """future remote heads if the changeset push succeeds"""
423 423 return self.outgoing.missingheads
424 424
425 425 @util.propertycache
426 426 def fallbackheads(self):
427 427 """future remote heads if the changeset push fails"""
428 428 if self.revs is None:
429 429 # not target to push, all common are relevant
430 430 return self.outgoing.commonheads
431 431 unfi = self.repo.unfiltered()
432 432 # I want cheads = heads(::missingheads and ::commonheads)
433 433 # (missingheads is revs with secret changeset filtered out)
434 434 #
435 435 # This can be expressed as:
436 436 # cheads = ( (missingheads and ::commonheads)
437 437 # + (commonheads and ::missingheads))"
438 438 # )
439 439 #
440 440 # while trying to push we already computed the following:
441 441 # common = (::commonheads)
442 442 # missing = ((commonheads::missingheads) - commonheads)
443 443 #
444 444 # We can pick:
445 445 # * missingheads part of common (::commonheads)
446 446 common = self.outgoing.common
447 447 nm = self.repo.changelog.nodemap
448 448 cheads = [node for node in self.revs if nm[node] in common]
449 449 # and
450 450 # * commonheads parents on missing
451 451 revset = unfi.set('%ln and parents(roots(%ln))',
452 452 self.outgoing.commonheads,
453 453 self.outgoing.missing)
454 454 cheads.extend(c.node() for c in revset)
455 455 return cheads
456 456
457 457 @property
458 458 def commonheads(self):
459 459 """set of all common heads after changeset bundle push"""
460 460 if self.cgresult:
461 461 return self.futureheads
462 462 else:
463 463 return self.fallbackheads
464 464
465 465 # mapping of message used when pushing bookmark
466 466 bookmsgmap = {'update': (_("updating bookmark %s\n"),
467 467 _('updating bookmark %s failed!\n')),
468 468 'export': (_("exporting bookmark %s\n"),
469 469 _('exporting bookmark %s failed!\n')),
470 470 'delete': (_("deleting remote bookmark %s\n"),
471 471 _('deleting remote bookmark %s failed!\n')),
472 472 }
473 473
474 474
475 475 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
476 476 opargs=None):
477 477 '''Push outgoing changesets (limited by revs) from a local
478 478 repository to remote. Return an integer:
479 479 - None means nothing to push
480 480 - 0 means HTTP error
481 481 - 1 means we pushed and remote head count is unchanged *or*
482 482 we have outgoing changesets but refused to push
483 483 - other values as described by addchangegroup()
484 484 '''
485 485 if opargs is None:
486 486 opargs = {}
487 487 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
488 488 **pycompat.strkwargs(opargs))
489 489 if pushop.remote.local():
490 490 missing = (set(pushop.repo.requirements)
491 491 - pushop.remote.local().supported)
492 492 if missing:
493 493 msg = _("required features are not"
494 494 " supported in the destination:"
495 495 " %s") % (', '.join(sorted(missing)))
496 496 raise error.Abort(msg)
497 497
498 498 if not pushop.remote.canpush():
499 499 raise error.Abort(_("destination does not support push"))
500 500
501 501 if not pushop.remote.capable('unbundle'):
502 502 raise error.Abort(_('cannot push: destination does not support the '
503 503 'unbundle wire protocol command'))
504 504
505 505 # get lock as we might write phase data
506 506 wlock = lock = None
507 507 try:
508 508 # bundle2 push may receive a reply bundle touching bookmarks or other
509 509 # things requiring the wlock. Take it now to ensure proper ordering.
510 510 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
511 511 if (not _forcebundle1(pushop)) and maypushback:
512 512 wlock = pushop.repo.wlock()
513 513 lock = pushop.repo.lock()
514 514 pushop.trmanager = transactionmanager(pushop.repo,
515 515 'push-response',
516 516 pushop.remote.url())
517 517 except IOError as err:
518 518 if err.errno != errno.EACCES:
519 519 raise
520 520 # source repo cannot be locked.
521 521 # We do not abort the push, but just disable the local phase
522 522 # synchronisation.
523 523 msg = 'cannot lock source repository: %s\n' % err
524 524 pushop.ui.debug(msg)
525 525
526 526 with wlock or util.nullcontextmanager(), \
527 527 lock or util.nullcontextmanager(), \
528 528 pushop.trmanager or util.nullcontextmanager():
529 529 pushop.repo.checkpush(pushop)
530 530 _pushdiscovery(pushop)
531 531 if not _forcebundle1(pushop):
532 532 _pushbundle2(pushop)
533 533 _pushchangeset(pushop)
534 534 _pushsyncphase(pushop)
535 535 _pushobsolete(pushop)
536 536 _pushbookmark(pushop)
537 537
538 538 return pushop
539 539
540 540 # list of steps to perform discovery before push
541 541 pushdiscoveryorder = []
542 542
543 543 # Mapping between step name and function
544 544 #
545 545 # This exists to help extensions wrap steps if necessary
546 546 pushdiscoverymapping = {}
547 547
548 548 def pushdiscovery(stepname):
549 549 """decorator for function performing discovery before push
550 550
551 551 The function is added to the step -> function mapping and appended to the
552 552 list of steps. Beware that decorated function will be added in order (this
553 553 may matter).
554 554
555 555 You can only use this decorator for a new step, if you want to wrap a step
556 556 from an extension, change the pushdiscovery dictionary directly."""
557 557 def dec(func):
558 558 assert stepname not in pushdiscoverymapping
559 559 pushdiscoverymapping[stepname] = func
560 560 pushdiscoveryorder.append(stepname)
561 561 return func
562 562 return dec
563 563
564 564 def _pushdiscovery(pushop):
565 565 """Run all discovery steps"""
566 566 for stepname in pushdiscoveryorder:
567 567 step = pushdiscoverymapping[stepname]
568 568 step(pushop)
569 569
570 570 @pushdiscovery('changeset')
571 571 def _pushdiscoverychangeset(pushop):
572 572 """discover the changeset that need to be pushed"""
573 573 fci = discovery.findcommonincoming
574 574 if pushop.revs:
575 575 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
576 576 ancestorsof=pushop.revs)
577 577 else:
578 578 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
579 579 common, inc, remoteheads = commoninc
580 580 fco = discovery.findcommonoutgoing
581 581 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
582 582 commoninc=commoninc, force=pushop.force)
583 583 pushop.outgoing = outgoing
584 584 pushop.remoteheads = remoteheads
585 585 pushop.incoming = inc
586 586
587 587 @pushdiscovery('phase')
588 588 def _pushdiscoveryphase(pushop):
589 589 """discover the phase that needs to be pushed
590 590
591 591 (computed for both success and failure case for changesets push)"""
592 592 outgoing = pushop.outgoing
593 593 unfi = pushop.repo.unfiltered()
594 594 remotephases = pushop.remote.listkeys('phases')
595 595 if (pushop.ui.configbool('ui', '_usedassubrepo')
596 596 and remotephases # server supports phases
597 597 and not pushop.outgoing.missing # no changesets to be pushed
598 598 and remotephases.get('publishing', False)):
599 599 # When:
600 600 # - this is a subrepo push
601 601 # - and remote support phase
602 602 # - and no changeset are to be pushed
603 603 # - and remote is publishing
604 604 # We may be in issue 3781 case!
605 605 # We drop the possible phase synchronisation done by
606 606 # courtesy to publish changesets possibly locally draft
607 607 # on the remote.
608 608 pushop.outdatedphases = []
609 609 pushop.fallbackoutdatedphases = []
610 610 return
611 611
612 612 pushop.remotephases = phases.remotephasessummary(pushop.repo,
613 613 pushop.fallbackheads,
614 614 remotephases)
615 615 droots = pushop.remotephases.draftroots
616 616
617 617 extracond = ''
618 618 if not pushop.remotephases.publishing:
619 619 extracond = ' and public()'
620 620 revset = 'heads((%%ln::%%ln) %s)' % extracond
621 621 # Get the list of all revs draft on remote by public here.
622 622 # XXX Beware that revset break if droots is not strictly
623 623 # XXX root we may want to ensure it is but it is costly
624 624 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
625 625 if not outgoing.missing:
626 626 future = fallback
627 627 else:
628 628 # adds changeset we are going to push as draft
629 629 #
630 630 # should not be necessary for publishing server, but because of an
631 631 # issue fixed in xxxxx we have to do it anyway.
632 632 fdroots = list(unfi.set('roots(%ln + %ln::)',
633 633 outgoing.missing, droots))
634 634 fdroots = [f.node() for f in fdroots]
635 635 future = list(unfi.set(revset, fdroots, pushop.futureheads))
636 636 pushop.outdatedphases = future
637 637 pushop.fallbackoutdatedphases = fallback
638 638
639 639 @pushdiscovery('obsmarker')
640 640 def _pushdiscoveryobsmarkers(pushop):
641 641 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
642 642 and pushop.repo.obsstore
643 643 and 'obsolete' in pushop.remote.listkeys('namespaces')):
644 644 repo = pushop.repo
645 645 # very naive computation, that can be quite expensive on big repo.
646 646 # However: evolution is currently slow on them anyway.
647 647 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
648 648 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
649 649
650 650 @pushdiscovery('bookmarks')
651 651 def _pushdiscoverybookmarks(pushop):
652 652 ui = pushop.ui
653 653 repo = pushop.repo.unfiltered()
654 654 remote = pushop.remote
655 655 ui.debug("checking for updated bookmarks\n")
656 656 ancestors = ()
657 657 if pushop.revs:
658 658 revnums = map(repo.changelog.rev, pushop.revs)
659 659 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
660 660 remotebookmark = remote.listkeys('bookmarks')
661 661
662 662 explicit = set([repo._bookmarks.expandname(bookmark)
663 663 for bookmark in pushop.bookmarks])
664 664
665 665 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
666 666 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
667 667
668 668 def safehex(x):
669 669 if x is None:
670 670 return x
671 671 return hex(x)
672 672
673 673 def hexifycompbookmarks(bookmarks):
674 674 return [(b, safehex(scid), safehex(dcid))
675 675 for (b, scid, dcid) in bookmarks]
676 676
677 677 comp = [hexifycompbookmarks(marks) for marks in comp]
678 678 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
679 679
680 680 def _processcompared(pushop, pushed, explicit, remotebms, comp):
681 681 """take decision on bookmark to pull from the remote bookmark
682 682
683 683 Exist to help extensions who want to alter this behavior.
684 684 """
685 685 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
686 686
687 687 repo = pushop.repo
688 688
689 689 for b, scid, dcid in advsrc:
690 690 if b in explicit:
691 691 explicit.remove(b)
692 692 if not pushed or repo[scid].rev() in pushed:
693 693 pushop.outbookmarks.append((b, dcid, scid))
694 694 # search added bookmark
695 695 for b, scid, dcid in addsrc:
696 696 if b in explicit:
697 697 explicit.remove(b)
698 698 pushop.outbookmarks.append((b, '', scid))
699 699 # search for overwritten bookmark
700 700 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
701 701 if b in explicit:
702 702 explicit.remove(b)
703 703 pushop.outbookmarks.append((b, dcid, scid))
704 704 # search for bookmark to delete
705 705 for b, scid, dcid in adddst:
706 706 if b in explicit:
707 707 explicit.remove(b)
708 708 # treat as "deleted locally"
709 709 pushop.outbookmarks.append((b, dcid, ''))
710 710 # identical bookmarks shouldn't get reported
711 711 for b, scid, dcid in same:
712 712 if b in explicit:
713 713 explicit.remove(b)
714 714
715 715 if explicit:
716 716 explicit = sorted(explicit)
717 717 # we should probably list all of them
718 718 pushop.ui.warn(_('bookmark %s does not exist on the local '
719 719 'or remote repository!\n') % explicit[0])
720 720 pushop.bkresult = 2
721 721
722 722 pushop.outbookmarks.sort()
723 723
724 724 def _pushcheckoutgoing(pushop):
725 725 outgoing = pushop.outgoing
726 726 unfi = pushop.repo.unfiltered()
727 727 if not outgoing.missing:
728 728 # nothing to push
729 729 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
730 730 return False
731 731 # something to push
732 732 if not pushop.force:
733 733 # if repo.obsstore == False --> no obsolete
734 734 # then, save the iteration
735 735 if unfi.obsstore:
736 736 # this message are here for 80 char limit reason
737 737 mso = _("push includes obsolete changeset: %s!")
738 738 mspd = _("push includes phase-divergent changeset: %s!")
739 739 mscd = _("push includes content-divergent changeset: %s!")
740 740 mst = {"orphan": _("push includes orphan changeset: %s!"),
741 741 "phase-divergent": mspd,
742 742 "content-divergent": mscd}
743 743 # If we are to push if there is at least one
744 744 # obsolete or unstable changeset in missing, at
745 745 # least one of the missinghead will be obsolete or
746 746 # unstable. So checking heads only is ok
747 747 for node in outgoing.missingheads:
748 748 ctx = unfi[node]
749 749 if ctx.obsolete():
750 750 raise error.Abort(mso % ctx)
751 751 elif ctx.isunstable():
752 752 # TODO print more than one instability in the abort
753 753 # message
754 754 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
755 755
756 756 discovery.checkheads(pushop)
757 757 return True
758 758
759 759 # List of names of steps to perform for an outgoing bundle2, order matters.
760 760 b2partsgenorder = []
761 761
762 762 # Mapping between step name and function
763 763 #
764 764 # This exists to help extensions wrap steps if necessary
765 765 b2partsgenmapping = {}
766 766
767 767 def b2partsgenerator(stepname, idx=None):
768 768 """decorator for function generating bundle2 part
769 769
770 770 The function is added to the step -> function mapping and appended to the
771 771 list of steps. Beware that decorated functions will be added in order
772 772 (this may matter).
773 773
774 774 You can only use this decorator for new steps, if you want to wrap a step
775 775 from an extension, attack the b2partsgenmapping dictionary directly."""
776 776 def dec(func):
777 777 assert stepname not in b2partsgenmapping
778 778 b2partsgenmapping[stepname] = func
779 779 if idx is None:
780 780 b2partsgenorder.append(stepname)
781 781 else:
782 782 b2partsgenorder.insert(idx, stepname)
783 783 return func
784 784 return dec
785 785
786 786 def _pushb2ctxcheckheads(pushop, bundler):
787 787 """Generate race condition checking parts
788 788
789 789 Exists as an independent function to aid extensions
790 790 """
791 791 # * 'force' do not check for push race,
792 792 # * if we don't push anything, there are nothing to check.
793 793 if not pushop.force and pushop.outgoing.missingheads:
794 794 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
795 795 emptyremote = pushop.pushbranchmap is None
796 796 if not allowunrelated or emptyremote:
797 797 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
798 798 else:
799 799 affected = set()
800 800 for branch, heads in pushop.pushbranchmap.iteritems():
801 801 remoteheads, newheads, unsyncedheads, discardedheads = heads
802 802 if remoteheads is not None:
803 803 remote = set(remoteheads)
804 804 affected |= set(discardedheads) & remote
805 805 affected |= remote - set(newheads)
806 806 if affected:
807 807 data = iter(sorted(affected))
808 808 bundler.newpart('check:updated-heads', data=data)
809 809
810 810 def _pushing(pushop):
811 811 """return True if we are pushing anything"""
812 812 return bool(pushop.outgoing.missing
813 813 or pushop.outdatedphases
814 814 or pushop.outobsmarkers
815 815 or pushop.outbookmarks)
816 816
817 817 @b2partsgenerator('check-bookmarks')
818 818 def _pushb2checkbookmarks(pushop, bundler):
819 819 """insert bookmark move checking"""
820 820 if not _pushing(pushop) or pushop.force:
821 821 return
822 822 b2caps = bundle2.bundle2caps(pushop.remote)
823 823 hasbookmarkcheck = 'bookmarks' in b2caps
824 824 if not (pushop.outbookmarks and hasbookmarkcheck):
825 825 return
826 826 data = []
827 827 for book, old, new in pushop.outbookmarks:
828 828 old = bin(old)
829 829 data.append((book, old))
830 830 checkdata = bookmod.binaryencode(data)
831 831 bundler.newpart('check:bookmarks', data=checkdata)
832 832
833 833 @b2partsgenerator('check-phases')
834 834 def _pushb2checkphases(pushop, bundler):
835 835 """insert phase move checking"""
836 836 if not _pushing(pushop) or pushop.force:
837 837 return
838 838 b2caps = bundle2.bundle2caps(pushop.remote)
839 839 hasphaseheads = 'heads' in b2caps.get('phases', ())
840 840 if pushop.remotephases is not None and hasphaseheads:
841 841 # check that the remote phase has not changed
842 842 checks = [[] for p in phases.allphases]
843 843 checks[phases.public].extend(pushop.remotephases.publicheads)
844 844 checks[phases.draft].extend(pushop.remotephases.draftroots)
845 845 if any(checks):
846 846 for nodes in checks:
847 847 nodes.sort()
848 848 checkdata = phases.binaryencode(checks)
849 849 bundler.newpart('check:phases', data=checkdata)
850 850
851 851 @b2partsgenerator('changeset')
852 852 def _pushb2ctx(pushop, bundler):
853 853 """handle changegroup push through bundle2
854 854
855 855 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
856 856 """
857 857 if 'changesets' in pushop.stepsdone:
858 858 return
859 859 pushop.stepsdone.add('changesets')
860 860 # Send known heads to the server for race detection.
861 861 if not _pushcheckoutgoing(pushop):
862 862 return
863 863 pushop.repo.prepushoutgoinghooks(pushop)
864 864
865 865 _pushb2ctxcheckheads(pushop, bundler)
866 866
867 867 b2caps = bundle2.bundle2caps(pushop.remote)
868 868 version = '01'
869 869 cgversions = b2caps.get('changegroup')
870 870 if cgversions: # 3.1 and 3.2 ship with an empty value
871 871 cgversions = [v for v in cgversions
872 872 if v in changegroup.supportedoutgoingversions(
873 873 pushop.repo)]
874 874 if not cgversions:
875 875 raise ValueError(_('no common changegroup version'))
876 876 version = max(cgversions)
877 877 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
878 878 'push')
879 879 cgpart = bundler.newpart('changegroup', data=cgstream)
880 880 if cgversions:
881 881 cgpart.addparam('version', version)
882 882 if 'treemanifest' in pushop.repo.requirements:
883 883 cgpart.addparam('treemanifest', '1')
884 884 def handlereply(op):
885 885 """extract addchangegroup returns from server reply"""
886 886 cgreplies = op.records.getreplies(cgpart.id)
887 887 assert len(cgreplies['changegroup']) == 1
888 888 pushop.cgresult = cgreplies['changegroup'][0]['return']
889 889 return handlereply
890 890
891 891 @b2partsgenerator('phase')
892 892 def _pushb2phases(pushop, bundler):
893 893 """handle phase push through bundle2"""
894 894 if 'phases' in pushop.stepsdone:
895 895 return
896 896 b2caps = bundle2.bundle2caps(pushop.remote)
897 897 ui = pushop.repo.ui
898 898
899 899 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
900 900 haspushkey = 'pushkey' in b2caps
901 901 hasphaseheads = 'heads' in b2caps.get('phases', ())
902 902
903 903 if hasphaseheads and not legacyphase:
904 904 return _pushb2phaseheads(pushop, bundler)
905 905 elif haspushkey:
906 906 return _pushb2phasespushkey(pushop, bundler)
907 907
908 908 def _pushb2phaseheads(pushop, bundler):
909 909 """push phase information through a bundle2 - binary part"""
910 910 pushop.stepsdone.add('phases')
911 911 if pushop.outdatedphases:
912 912 updates = [[] for p in phases.allphases]
913 913 updates[0].extend(h.node() for h in pushop.outdatedphases)
914 914 phasedata = phases.binaryencode(updates)
915 915 bundler.newpart('phase-heads', data=phasedata)
916 916
917 917 def _pushb2phasespushkey(pushop, bundler):
918 918 """push phase information through a bundle2 - pushkey part"""
919 919 pushop.stepsdone.add('phases')
920 920 part2node = []
921 921
922 922 def handlefailure(pushop, exc):
923 923 targetid = int(exc.partid)
924 924 for partid, node in part2node:
925 925 if partid == targetid:
926 926 raise error.Abort(_('updating %s to public failed') % node)
927 927
928 928 enc = pushkey.encode
929 929 for newremotehead in pushop.outdatedphases:
930 930 part = bundler.newpart('pushkey')
931 931 part.addparam('namespace', enc('phases'))
932 932 part.addparam('key', enc(newremotehead.hex()))
933 933 part.addparam('old', enc('%d' % phases.draft))
934 934 part.addparam('new', enc('%d' % phases.public))
935 935 part2node.append((part.id, newremotehead))
936 936 pushop.pkfailcb[part.id] = handlefailure
937 937
938 938 def handlereply(op):
939 939 for partid, node in part2node:
940 940 partrep = op.records.getreplies(partid)
941 941 results = partrep['pushkey']
942 942 assert len(results) <= 1
943 943 msg = None
944 944 if not results:
945 945 msg = _('server ignored update of %s to public!\n') % node
946 946 elif not int(results[0]['return']):
947 947 msg = _('updating %s to public failed!\n') % node
948 948 if msg is not None:
949 949 pushop.ui.warn(msg)
950 950 return handlereply
951 951
952 952 @b2partsgenerator('obsmarkers')
953 953 def _pushb2obsmarkers(pushop, bundler):
954 954 if 'obsmarkers' in pushop.stepsdone:
955 955 return
956 956 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
957 957 if obsolete.commonversion(remoteversions) is None:
958 958 return
959 959 pushop.stepsdone.add('obsmarkers')
960 960 if pushop.outobsmarkers:
961 961 markers = sorted(pushop.outobsmarkers)
962 962 bundle2.buildobsmarkerspart(bundler, markers)
963 963
964 964 @b2partsgenerator('bookmarks')
965 965 def _pushb2bookmarks(pushop, bundler):
966 966 """handle bookmark push through bundle2"""
967 967 if 'bookmarks' in pushop.stepsdone:
968 968 return
969 969 b2caps = bundle2.bundle2caps(pushop.remote)
970 970
971 971 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
972 972 legacybooks = 'bookmarks' in legacy
973 973
974 974 if not legacybooks and 'bookmarks' in b2caps:
975 975 return _pushb2bookmarkspart(pushop, bundler)
976 976 elif 'pushkey' in b2caps:
977 977 return _pushb2bookmarkspushkey(pushop, bundler)
978 978
979 979 def _bmaction(old, new):
980 980 """small utility for bookmark pushing"""
981 981 if not old:
982 982 return 'export'
983 983 elif not new:
984 984 return 'delete'
985 985 return 'update'
986 986
987 987 def _pushb2bookmarkspart(pushop, bundler):
988 988 pushop.stepsdone.add('bookmarks')
989 989 if not pushop.outbookmarks:
990 990 return
991 991
992 992 allactions = []
993 993 data = []
994 994 for book, old, new in pushop.outbookmarks:
995 995 new = bin(new)
996 996 data.append((book, new))
997 997 allactions.append((book, _bmaction(old, new)))
998 998 checkdata = bookmod.binaryencode(data)
999 999 bundler.newpart('bookmarks', data=checkdata)
1000 1000
1001 1001 def handlereply(op):
1002 1002 ui = pushop.ui
1003 1003 # if success
1004 1004 for book, action in allactions:
1005 1005 ui.status(bookmsgmap[action][0] % book)
1006 1006
1007 1007 return handlereply
1008 1008
1009 1009 def _pushb2bookmarkspushkey(pushop, bundler):
1010 1010 pushop.stepsdone.add('bookmarks')
1011 1011 part2book = []
1012 1012 enc = pushkey.encode
1013 1013
1014 1014 def handlefailure(pushop, exc):
1015 1015 targetid = int(exc.partid)
1016 1016 for partid, book, action in part2book:
1017 1017 if partid == targetid:
1018 1018 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1019 1019 # we should not be called for part we did not generated
1020 1020 assert False
1021 1021
1022 1022 for book, old, new in pushop.outbookmarks:
1023 1023 part = bundler.newpart('pushkey')
1024 1024 part.addparam('namespace', enc('bookmarks'))
1025 1025 part.addparam('key', enc(book))
1026 1026 part.addparam('old', enc(old))
1027 1027 part.addparam('new', enc(new))
1028 1028 action = 'update'
1029 1029 if not old:
1030 1030 action = 'export'
1031 1031 elif not new:
1032 1032 action = 'delete'
1033 1033 part2book.append((part.id, book, action))
1034 1034 pushop.pkfailcb[part.id] = handlefailure
1035 1035
1036 1036 def handlereply(op):
1037 1037 ui = pushop.ui
1038 1038 for partid, book, action in part2book:
1039 1039 partrep = op.records.getreplies(partid)
1040 1040 results = partrep['pushkey']
1041 1041 assert len(results) <= 1
1042 1042 if not results:
1043 1043 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1044 1044 else:
1045 1045 ret = int(results[0]['return'])
1046 1046 if ret:
1047 1047 ui.status(bookmsgmap[action][0] % book)
1048 1048 else:
1049 1049 ui.warn(bookmsgmap[action][1] % book)
1050 1050 if pushop.bkresult is not None:
1051 1051 pushop.bkresult = 1
1052 1052 return handlereply
1053 1053
1054 1054 @b2partsgenerator('pushvars', idx=0)
1055 1055 def _getbundlesendvars(pushop, bundler):
1056 1056 '''send shellvars via bundle2'''
1057 1057 pushvars = pushop.pushvars
1058 1058 if pushvars:
1059 1059 shellvars = {}
1060 1060 for raw in pushvars:
1061 1061 if '=' not in raw:
1062 1062 msg = ("unable to parse variable '%s', should follow "
1063 1063 "'KEY=VALUE' or 'KEY=' format")
1064 1064 raise error.Abort(msg % raw)
1065 1065 k, v = raw.split('=', 1)
1066 1066 shellvars[k] = v
1067 1067
1068 1068 part = bundler.newpart('pushvars')
1069 1069
1070 1070 for key, value in shellvars.iteritems():
1071 1071 part.addparam(key, value, mandatory=False)
1072 1072
1073 1073 def _pushbundle2(pushop):
1074 1074 """push data to the remote using bundle2
1075 1075
1076 1076 The only currently supported type of data is changegroup but this will
1077 1077 evolve in the future."""
1078 1078 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1079 1079 pushback = (pushop.trmanager
1080 1080 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1081 1081
1082 1082 # create reply capability
1083 1083 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1084 1084 allowpushback=pushback,
1085 1085 role='client'))
1086 1086 bundler.newpart('replycaps', data=capsblob)
1087 1087 replyhandlers = []
1088 1088 for partgenname in b2partsgenorder:
1089 1089 partgen = b2partsgenmapping[partgenname]
1090 1090 ret = partgen(pushop, bundler)
1091 1091 if callable(ret):
1092 1092 replyhandlers.append(ret)
1093 1093 # do not push if nothing to push
1094 1094 if bundler.nbparts <= 1:
1095 1095 return
1096 1096 stream = util.chunkbuffer(bundler.getchunks())
1097 1097 try:
1098 1098 try:
1099 1099 reply = pushop.remote.unbundle(
1100 1100 stream, ['force'], pushop.remote.url())
1101 1101 except error.BundleValueError as exc:
1102 1102 raise error.Abort(_('missing support for %s') % exc)
1103 1103 try:
1104 1104 trgetter = None
1105 1105 if pushback:
1106 1106 trgetter = pushop.trmanager.transaction
1107 1107 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1108 1108 except error.BundleValueError as exc:
1109 1109 raise error.Abort(_('missing support for %s') % exc)
1110 1110 except bundle2.AbortFromPart as exc:
1111 1111 pushop.ui.status(_('remote: %s\n') % exc)
1112 1112 if exc.hint is not None:
1113 1113 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1114 1114 raise error.Abort(_('push failed on remote'))
1115 1115 except error.PushkeyFailed as exc:
1116 1116 partid = int(exc.partid)
1117 1117 if partid not in pushop.pkfailcb:
1118 1118 raise
1119 1119 pushop.pkfailcb[partid](pushop, exc)
1120 1120 for rephand in replyhandlers:
1121 1121 rephand(op)
1122 1122
1123 1123 def _pushchangeset(pushop):
1124 1124 """Make the actual push of changeset bundle to remote repo"""
1125 1125 if 'changesets' in pushop.stepsdone:
1126 1126 return
1127 1127 pushop.stepsdone.add('changesets')
1128 1128 if not _pushcheckoutgoing(pushop):
1129 1129 return
1130 1130
1131 1131 # Should have verified this in push().
1132 1132 assert pushop.remote.capable('unbundle')
1133 1133
1134 1134 pushop.repo.prepushoutgoinghooks(pushop)
1135 1135 outgoing = pushop.outgoing
1136 1136 # TODO: get bundlecaps from remote
1137 1137 bundlecaps = None
1138 1138 # create a changegroup from local
1139 1139 if pushop.revs is None and not (outgoing.excluded
1140 1140 or pushop.repo.changelog.filteredrevs):
1141 1141 # push everything,
1142 1142 # use the fast path, no race possible on push
1143 1143 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1144 1144 fastpath=True, bundlecaps=bundlecaps)
1145 1145 else:
1146 1146 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1147 1147 'push', bundlecaps=bundlecaps)
1148 1148
1149 1149 # apply changegroup to remote
1150 1150 # local repo finds heads on server, finds out what
1151 1151 # revs it must push. once revs transferred, if server
1152 1152 # finds it has different heads (someone else won
1153 1153 # commit/push race), server aborts.
1154 1154 if pushop.force:
1155 1155 remoteheads = ['force']
1156 1156 else:
1157 1157 remoteheads = pushop.remoteheads
1158 1158 # ssh: return remote's addchangegroup()
1159 1159 # http: return remote's addchangegroup() or 0 for error
1160 1160 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1161 1161 pushop.repo.url())
1162 1162
1163 1163 def _pushsyncphase(pushop):
1164 1164 """synchronise phase information locally and remotely"""
1165 1165 cheads = pushop.commonheads
1166 1166 # even when we don't push, exchanging phase data is useful
1167 1167 remotephases = pushop.remote.listkeys('phases')
1168 1168 if (pushop.ui.configbool('ui', '_usedassubrepo')
1169 1169 and remotephases # server supports phases
1170 1170 and pushop.cgresult is None # nothing was pushed
1171 1171 and remotephases.get('publishing', False)):
1172 1172 # When:
1173 1173 # - this is a subrepo push
1174 1174 # - and remote support phase
1175 1175 # - and no changeset was pushed
1176 1176 # - and remote is publishing
1177 1177 # We may be in issue 3871 case!
1178 1178 # We drop the possible phase synchronisation done by
1179 1179 # courtesy to publish changesets possibly locally draft
1180 1180 # on the remote.
1181 1181 remotephases = {'publishing': 'True'}
1182 1182 if not remotephases: # old server or public only reply from non-publishing
1183 1183 _localphasemove(pushop, cheads)
1184 1184 # don't push any phase data as there is nothing to push
1185 1185 else:
1186 1186 ana = phases.analyzeremotephases(pushop.repo, cheads,
1187 1187 remotephases)
1188 1188 pheads, droots = ana
1189 1189 ### Apply remote phase on local
1190 1190 if remotephases.get('publishing', False):
1191 1191 _localphasemove(pushop, cheads)
1192 1192 else: # publish = False
1193 1193 _localphasemove(pushop, pheads)
1194 1194 _localphasemove(pushop, cheads, phases.draft)
1195 1195 ### Apply local phase on remote
1196 1196
1197 1197 if pushop.cgresult:
1198 1198 if 'phases' in pushop.stepsdone:
1199 1199 # phases already pushed though bundle2
1200 1200 return
1201 1201 outdated = pushop.outdatedphases
1202 1202 else:
1203 1203 outdated = pushop.fallbackoutdatedphases
1204 1204
1205 1205 pushop.stepsdone.add('phases')
1206 1206
1207 1207 # filter heads already turned public by the push
1208 1208 outdated = [c for c in outdated if c.node() not in pheads]
1209 1209 # fallback to independent pushkey command
1210 1210 for newremotehead in outdated:
1211 1211 r = pushop.remote.pushkey('phases',
1212 1212 newremotehead.hex(),
1213 1213 ('%d' % phases.draft),
1214 1214 ('%d' % phases.public))
1215 1215 if not r:
1216 1216 pushop.ui.warn(_('updating %s to public failed!\n')
1217 1217 % newremotehead)
1218 1218
1219 1219 def _localphasemove(pushop, nodes, phase=phases.public):
1220 1220 """move <nodes> to <phase> in the local source repo"""
1221 1221 if pushop.trmanager:
1222 1222 phases.advanceboundary(pushop.repo,
1223 1223 pushop.trmanager.transaction(),
1224 1224 phase,
1225 1225 nodes)
1226 1226 else:
1227 1227 # repo is not locked, do not change any phases!
1228 1228 # Informs the user that phases should have been moved when
1229 1229 # applicable.
1230 1230 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1231 1231 phasestr = phases.phasenames[phase]
1232 1232 if actualmoves:
1233 1233 pushop.ui.status(_('cannot lock source repo, skipping '
1234 1234 'local %s phase update\n') % phasestr)
1235 1235
1236 1236 def _pushobsolete(pushop):
1237 1237 """utility function to push obsolete markers to a remote"""
1238 1238 if 'obsmarkers' in pushop.stepsdone:
1239 1239 return
1240 1240 repo = pushop.repo
1241 1241 remote = pushop.remote
1242 1242 pushop.stepsdone.add('obsmarkers')
1243 1243 if pushop.outobsmarkers:
1244 1244 pushop.ui.debug('try to push obsolete markers to remote\n')
1245 1245 rslts = []
1246 1246 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1247 1247 for key in sorted(remotedata, reverse=True):
1248 1248 # reverse sort to ensure we end with dump0
1249 1249 data = remotedata[key]
1250 1250 rslts.append(remote.pushkey('obsolete', key, '', data))
1251 1251 if [r for r in rslts if not r]:
1252 1252 msg = _('failed to push some obsolete markers!\n')
1253 1253 repo.ui.warn(msg)
1254 1254
1255 1255 def _pushbookmark(pushop):
1256 1256 """Update bookmark position on remote"""
1257 1257 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1258 1258 return
1259 1259 pushop.stepsdone.add('bookmarks')
1260 1260 ui = pushop.ui
1261 1261 remote = pushop.remote
1262 1262
1263 1263 for b, old, new in pushop.outbookmarks:
1264 1264 action = 'update'
1265 1265 if not old:
1266 1266 action = 'export'
1267 1267 elif not new:
1268 1268 action = 'delete'
1269 1269 if remote.pushkey('bookmarks', b, old, new):
1270 1270 ui.status(bookmsgmap[action][0] % b)
1271 1271 else:
1272 1272 ui.warn(bookmsgmap[action][1] % b)
1273 1273 # discovery can have set the value form invalid entry
1274 1274 if pushop.bkresult is not None:
1275 1275 pushop.bkresult = 1
1276 1276
1277 1277 class pulloperation(object):
1278 1278 """A object that represent a single pull operation
1279 1279
1280 1280 It purpose is to carry pull related state and very common operation.
1281 1281
1282 1282 A new should be created at the beginning of each pull and discarded
1283 1283 afterward.
1284 1284 """
1285 1285
1286 1286 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1287 1287 remotebookmarks=None, streamclonerequested=None):
1288 1288 # repo we pull into
1289 1289 self.repo = repo
1290 1290 # repo we pull from
1291 1291 self.remote = remote
1292 1292 # revision we try to pull (None is "all")
1293 1293 self.heads = heads
1294 1294 # bookmark pulled explicitly
1295 1295 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1296 1296 for bookmark in bookmarks]
1297 1297 # do we force pull?
1298 1298 self.force = force
1299 1299 # whether a streaming clone was requested
1300 1300 self.streamclonerequested = streamclonerequested
1301 1301 # transaction manager
1302 1302 self.trmanager = None
1303 1303 # set of common changeset between local and remote before pull
1304 1304 self.common = None
1305 1305 # set of pulled head
1306 1306 self.rheads = None
1307 1307 # list of missing changeset to fetch remotely
1308 1308 self.fetch = None
1309 1309 # remote bookmarks data
1310 1310 self.remotebookmarks = remotebookmarks
1311 1311 # result of changegroup pulling (used as return code by pull)
1312 1312 self.cgresult = None
1313 1313 # list of step already done
1314 1314 self.stepsdone = set()
1315 1315 # Whether we attempted a clone from pre-generated bundles.
1316 1316 self.clonebundleattempted = False
1317 1317
1318 1318 @util.propertycache
1319 1319 def pulledsubset(self):
1320 1320 """heads of the set of changeset target by the pull"""
1321 1321 # compute target subset
1322 1322 if self.heads is None:
1323 1323 # We pulled every thing possible
1324 1324 # sync on everything common
1325 1325 c = set(self.common)
1326 1326 ret = list(self.common)
1327 1327 for n in self.rheads:
1328 1328 if n not in c:
1329 1329 ret.append(n)
1330 1330 return ret
1331 1331 else:
1332 1332 # We pulled a specific subset
1333 1333 # sync on this subset
1334 1334 return self.heads
1335 1335
1336 1336 @util.propertycache
1337 1337 def canusebundle2(self):
1338 1338 return not _forcebundle1(self)
1339 1339
1340 1340 @util.propertycache
1341 1341 def remotebundle2caps(self):
1342 1342 return bundle2.bundle2caps(self.remote)
1343 1343
1344 1344 def gettransaction(self):
1345 1345 # deprecated; talk to trmanager directly
1346 1346 return self.trmanager.transaction()
1347 1347
1348 1348 class transactionmanager(util.transactional):
1349 1349 """An object to manage the life cycle of a transaction
1350 1350
1351 1351 It creates the transaction on demand and calls the appropriate hooks when
1352 1352 closing the transaction."""
1353 1353 def __init__(self, repo, source, url):
1354 1354 self.repo = repo
1355 1355 self.source = source
1356 1356 self.url = url
1357 1357 self._tr = None
1358 1358
1359 1359 def transaction(self):
1360 1360 """Return an open transaction object, constructing if necessary"""
1361 1361 if not self._tr:
1362 1362 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1363 1363 self._tr = self.repo.transaction(trname)
1364 1364 self._tr.hookargs['source'] = self.source
1365 1365 self._tr.hookargs['url'] = self.url
1366 1366 return self._tr
1367 1367
1368 1368 def close(self):
1369 1369 """close transaction if created"""
1370 1370 if self._tr is not None:
1371 1371 self._tr.close()
1372 1372
1373 1373 def release(self):
1374 1374 """release transaction if created"""
1375 1375 if self._tr is not None:
1376 1376 self._tr.release()
1377 1377
1378 1378 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1379 1379 streamclonerequested=None):
1380 1380 """Fetch repository data from a remote.
1381 1381
1382 1382 This is the main function used to retrieve data from a remote repository.
1383 1383
1384 1384 ``repo`` is the local repository to clone into.
1385 1385 ``remote`` is a peer instance.
1386 1386 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1387 1387 default) means to pull everything from the remote.
1388 1388 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1389 1389 default, all remote bookmarks are pulled.
1390 1390 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1391 1391 initialization.
1392 1392 ``streamclonerequested`` is a boolean indicating whether a "streaming
1393 1393 clone" is requested. A "streaming clone" is essentially a raw file copy
1394 1394 of revlogs from the server. This only works when the local repository is
1395 1395 empty. The default value of ``None`` means to respect the server
1396 1396 configuration for preferring stream clones.
1397 1397
1398 1398 Returns the ``pulloperation`` created for this pull.
1399 1399 """
1400 1400 if opargs is None:
1401 1401 opargs = {}
1402 1402 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1403 1403 streamclonerequested=streamclonerequested,
1404 1404 **pycompat.strkwargs(opargs))
1405 1405
1406 1406 peerlocal = pullop.remote.local()
1407 1407 if peerlocal:
1408 1408 missing = set(peerlocal.requirements) - pullop.repo.supported
1409 1409 if missing:
1410 1410 msg = _("required features are not"
1411 1411 " supported in the destination:"
1412 1412 " %s") % (', '.join(sorted(missing)))
1413 1413 raise error.Abort(msg)
1414 1414
1415 1415 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1416 1416 with repo.wlock(), repo.lock(), pullop.trmanager:
1417 1417 # This should ideally be in _pullbundle2(). However, it needs to run
1418 1418 # before discovery to avoid extra work.
1419 1419 _maybeapplyclonebundle(pullop)
1420 1420 streamclone.maybeperformlegacystreamclone(pullop)
1421 1421 _pulldiscovery(pullop)
1422 1422 if pullop.canusebundle2:
1423 1423 _pullbundle2(pullop)
1424 1424 _pullchangeset(pullop)
1425 1425 _pullphase(pullop)
1426 1426 _pullbookmarks(pullop)
1427 1427 _pullobsolete(pullop)
1428 1428
1429 1429 # storing remotenames
1430 1430 if repo.ui.configbool('experimental', 'remotenames'):
1431 1431 logexchange.pullremotenames(repo, remote)
1432 1432
1433 1433 return pullop
1434 1434
1435 1435 # list of steps to perform discovery before pull
1436 1436 pulldiscoveryorder = []
1437 1437
1438 1438 # Mapping between step name and function
1439 1439 #
1440 1440 # This exists to help extensions wrap steps if necessary
1441 1441 pulldiscoverymapping = {}
1442 1442
1443 1443 def pulldiscovery(stepname):
1444 1444 """decorator for function performing discovery before pull
1445 1445
1446 1446 The function is added to the step -> function mapping and appended to the
1447 1447 list of steps. Beware that decorated function will be added in order (this
1448 1448 may matter).
1449 1449
1450 1450 You can only use this decorator for a new step, if you want to wrap a step
1451 1451 from an extension, change the pulldiscovery dictionary directly."""
1452 1452 def dec(func):
1453 1453 assert stepname not in pulldiscoverymapping
1454 1454 pulldiscoverymapping[stepname] = func
1455 1455 pulldiscoveryorder.append(stepname)
1456 1456 return func
1457 1457 return dec
1458 1458
1459 1459 def _pulldiscovery(pullop):
1460 1460 """Run all discovery steps"""
1461 1461 for stepname in pulldiscoveryorder:
1462 1462 step = pulldiscoverymapping[stepname]
1463 1463 step(pullop)
1464 1464
1465 1465 @pulldiscovery('b1:bookmarks')
1466 1466 def _pullbookmarkbundle1(pullop):
1467 1467 """fetch bookmark data in bundle1 case
1468 1468
1469 1469 If not using bundle2, we have to fetch bookmarks before changeset
1470 1470 discovery to reduce the chance and impact of race conditions."""
1471 1471 if pullop.remotebookmarks is not None:
1472 1472 return
1473 1473 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1474 1474 # all known bundle2 servers now support listkeys, but lets be nice with
1475 1475 # new implementation.
1476 1476 return
1477 1477 books = pullop.remote.listkeys('bookmarks')
1478 1478 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1479 1479
1480 1480
1481 1481 @pulldiscovery('changegroup')
1482 1482 def _pulldiscoverychangegroup(pullop):
1483 1483 """discovery phase for the pull
1484 1484
1485 1485 Current handle changeset discovery only, will change handle all discovery
1486 1486 at some point."""
1487 1487 tmp = discovery.findcommonincoming(pullop.repo,
1488 1488 pullop.remote,
1489 1489 heads=pullop.heads,
1490 1490 force=pullop.force)
1491 1491 common, fetch, rheads = tmp
1492 1492 nm = pullop.repo.unfiltered().changelog.nodemap
1493 1493 if fetch and rheads:
1494 1494 # If a remote heads is filtered locally, put in back in common.
1495 1495 #
1496 1496 # This is a hackish solution to catch most of "common but locally
1497 1497 # hidden situation". We do not performs discovery on unfiltered
1498 1498 # repository because it end up doing a pathological amount of round
1499 1499 # trip for w huge amount of changeset we do not care about.
1500 1500 #
1501 1501 # If a set of such "common but filtered" changeset exist on the server
1502 1502 # but are not including a remote heads, we'll not be able to detect it,
1503 1503 scommon = set(common)
1504 1504 for n in rheads:
1505 1505 if n in nm:
1506 1506 if n not in scommon:
1507 1507 common.append(n)
1508 1508 if set(rheads).issubset(set(common)):
1509 1509 fetch = []
1510 1510 pullop.common = common
1511 1511 pullop.fetch = fetch
1512 1512 pullop.rheads = rheads
1513 1513
1514 1514 def _pullbundle2(pullop):
1515 1515 """pull data using bundle2
1516 1516
1517 1517 For now, the only supported data are changegroup."""
1518 1518 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1519 1519
1520 1520 # make ui easier to access
1521 1521 ui = pullop.repo.ui
1522 1522
1523 1523 # At the moment we don't do stream clones over bundle2. If that is
1524 1524 # implemented then here's where the check for that will go.
1525 1525 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1526 1526
1527 1527 # declare pull perimeters
1528 1528 kwargs['common'] = pullop.common
1529 1529 kwargs['heads'] = pullop.heads or pullop.rheads
1530 1530
1531 1531 if streaming:
1532 1532 kwargs['cg'] = False
1533 1533 kwargs['stream'] = True
1534 1534 pullop.stepsdone.add('changegroup')
1535 1535 pullop.stepsdone.add('phases')
1536 1536
1537 1537 else:
1538 1538 # pulling changegroup
1539 1539 pullop.stepsdone.add('changegroup')
1540 1540
1541 1541 kwargs['cg'] = pullop.fetch
1542 1542
1543 1543 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1544 1544 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1545 1545 if (not legacyphase and hasbinaryphase):
1546 1546 kwargs['phases'] = True
1547 1547 pullop.stepsdone.add('phases')
1548 1548
1549 1549 if 'listkeys' in pullop.remotebundle2caps:
1550 1550 if 'phases' not in pullop.stepsdone:
1551 1551 kwargs['listkeys'] = ['phases']
1552 1552
1553 1553 bookmarksrequested = False
1554 1554 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1555 1555 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1556 1556
1557 1557 if pullop.remotebookmarks is not None:
1558 1558 pullop.stepsdone.add('request-bookmarks')
1559 1559
1560 1560 if ('request-bookmarks' not in pullop.stepsdone
1561 1561 and pullop.remotebookmarks is None
1562 1562 and not legacybookmark and hasbinarybook):
1563 1563 kwargs['bookmarks'] = True
1564 1564 bookmarksrequested = True
1565 1565
1566 1566 if 'listkeys' in pullop.remotebundle2caps:
1567 1567 if 'request-bookmarks' not in pullop.stepsdone:
1568 1568 # make sure to always includes bookmark data when migrating
1569 1569 # `hg incoming --bundle` to using this function.
1570 1570 pullop.stepsdone.add('request-bookmarks')
1571 1571 kwargs.setdefault('listkeys', []).append('bookmarks')
1572 1572
1573 1573 # If this is a full pull / clone and the server supports the clone bundles
1574 1574 # feature, tell the server whether we attempted a clone bundle. The
1575 1575 # presence of this flag indicates the client supports clone bundles. This
1576 1576 # will enable the server to treat clients that support clone bundles
1577 1577 # differently from those that don't.
1578 1578 if (pullop.remote.capable('clonebundles')
1579 1579 and pullop.heads is None and list(pullop.common) == [nullid]):
1580 1580 kwargs['cbattempted'] = pullop.clonebundleattempted
1581 1581
1582 1582 if streaming:
1583 1583 pullop.repo.ui.status(_('streaming all changes\n'))
1584 1584 elif not pullop.fetch:
1585 1585 pullop.repo.ui.status(_("no changes found\n"))
1586 1586 pullop.cgresult = 0
1587 1587 else:
1588 1588 if pullop.heads is None and list(pullop.common) == [nullid]:
1589 1589 pullop.repo.ui.status(_("requesting all changes\n"))
1590 1590 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1591 1591 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1592 1592 if obsolete.commonversion(remoteversions) is not None:
1593 1593 kwargs['obsmarkers'] = True
1594 1594 pullop.stepsdone.add('obsmarkers')
1595 1595 _pullbundle2extraprepare(pullop, kwargs)
1596 1596 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1597 1597 try:
1598 1598 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1599 1599 op.modes['bookmarks'] = 'records'
1600 1600 bundle2.processbundle(pullop.repo, bundle, op=op)
1601 1601 except bundle2.AbortFromPart as exc:
1602 1602 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1603 1603 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1604 1604 except error.BundleValueError as exc:
1605 1605 raise error.Abort(_('missing support for %s') % exc)
1606 1606
1607 1607 if pullop.fetch:
1608 1608 pullop.cgresult = bundle2.combinechangegroupresults(op)
1609 1609
1610 1610 # processing phases change
1611 1611 for namespace, value in op.records['listkeys']:
1612 1612 if namespace == 'phases':
1613 1613 _pullapplyphases(pullop, value)
1614 1614
1615 1615 # processing bookmark update
1616 1616 if bookmarksrequested:
1617 1617 books = {}
1618 1618 for record in op.records['bookmarks']:
1619 1619 books[record['bookmark']] = record["node"]
1620 1620 pullop.remotebookmarks = books
1621 1621 else:
1622 1622 for namespace, value in op.records['listkeys']:
1623 1623 if namespace == 'bookmarks':
1624 1624 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1625 1625
1626 1626 # bookmark data were either already there or pulled in the bundle
1627 1627 if pullop.remotebookmarks is not None:
1628 1628 _pullbookmarks(pullop)
1629 1629
1630 1630 def _pullbundle2extraprepare(pullop, kwargs):
1631 1631 """hook function so that extensions can extend the getbundle call"""
1632 1632
1633 1633 def _pullchangeset(pullop):
1634 1634 """pull changeset from unbundle into the local repo"""
1635 1635 # We delay the open of the transaction as late as possible so we
1636 1636 # don't open transaction for nothing or you break future useful
1637 1637 # rollback call
1638 1638 if 'changegroup' in pullop.stepsdone:
1639 1639 return
1640 1640 pullop.stepsdone.add('changegroup')
1641 1641 if not pullop.fetch:
1642 1642 pullop.repo.ui.status(_("no changes found\n"))
1643 1643 pullop.cgresult = 0
1644 1644 return
1645 1645 tr = pullop.gettransaction()
1646 1646 if pullop.heads is None and list(pullop.common) == [nullid]:
1647 1647 pullop.repo.ui.status(_("requesting all changes\n"))
1648 1648 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1649 1649 # issue1320, avoid a race if remote changed after discovery
1650 1650 pullop.heads = pullop.rheads
1651 1651
1652 1652 if pullop.remote.capable('getbundle'):
1653 1653 # TODO: get bundlecaps from remote
1654 1654 cg = pullop.remote.getbundle('pull', common=pullop.common,
1655 1655 heads=pullop.heads or pullop.rheads)
1656 1656 elif pullop.heads is None:
1657 1657 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1658 1658 elif not pullop.remote.capable('changegroupsubset'):
1659 1659 raise error.Abort(_("partial pull cannot be done because "
1660 1660 "other repository doesn't support "
1661 1661 "changegroupsubset."))
1662 1662 else:
1663 1663 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1664 1664 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1665 1665 pullop.remote.url())
1666 1666 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1667 1667
1668 1668 def _pullphase(pullop):
1669 1669 # Get remote phases data from remote
1670 1670 if 'phases' in pullop.stepsdone:
1671 1671 return
1672 1672 remotephases = pullop.remote.listkeys('phases')
1673 1673 _pullapplyphases(pullop, remotephases)
1674 1674
1675 1675 def _pullapplyphases(pullop, remotephases):
1676 1676 """apply phase movement from observed remote state"""
1677 1677 if 'phases' in pullop.stepsdone:
1678 1678 return
1679 1679 pullop.stepsdone.add('phases')
1680 1680 publishing = bool(remotephases.get('publishing', False))
1681 1681 if remotephases and not publishing:
1682 1682 # remote is new and non-publishing
1683 1683 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1684 1684 pullop.pulledsubset,
1685 1685 remotephases)
1686 1686 dheads = pullop.pulledsubset
1687 1687 else:
1688 1688 # Remote is old or publishing all common changesets
1689 1689 # should be seen as public
1690 1690 pheads = pullop.pulledsubset
1691 1691 dheads = []
1692 1692 unfi = pullop.repo.unfiltered()
1693 1693 phase = unfi._phasecache.phase
1694 1694 rev = unfi.changelog.nodemap.get
1695 1695 public = phases.public
1696 1696 draft = phases.draft
1697 1697
1698 1698 # exclude changesets already public locally and update the others
1699 1699 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1700 1700 if pheads:
1701 1701 tr = pullop.gettransaction()
1702 1702 phases.advanceboundary(pullop.repo, tr, public, pheads)
1703 1703
1704 1704 # exclude changesets already draft locally and update the others
1705 1705 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1706 1706 if dheads:
1707 1707 tr = pullop.gettransaction()
1708 1708 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1709 1709
1710 1710 def _pullbookmarks(pullop):
1711 1711 """process the remote bookmark information to update the local one"""
1712 1712 if 'bookmarks' in pullop.stepsdone:
1713 1713 return
1714 1714 pullop.stepsdone.add('bookmarks')
1715 1715 repo = pullop.repo
1716 1716 remotebookmarks = pullop.remotebookmarks
1717 1717 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1718 1718 pullop.remote.url(),
1719 1719 pullop.gettransaction,
1720 1720 explicit=pullop.explicitbookmarks)
1721 1721
1722 1722 def _pullobsolete(pullop):
1723 1723 """utility function to pull obsolete markers from a remote
1724 1724
1725 1725 The `gettransaction` is function that return the pull transaction, creating
1726 1726 one if necessary. We return the transaction to inform the calling code that
1727 1727 a new transaction have been created (when applicable).
1728 1728
1729 1729 Exists mostly to allow overriding for experimentation purpose"""
1730 1730 if 'obsmarkers' in pullop.stepsdone:
1731 1731 return
1732 1732 pullop.stepsdone.add('obsmarkers')
1733 1733 tr = None
1734 1734 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1735 1735 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1736 1736 remoteobs = pullop.remote.listkeys('obsolete')
1737 1737 if 'dump0' in remoteobs:
1738 1738 tr = pullop.gettransaction()
1739 1739 markers = []
1740 1740 for key in sorted(remoteobs, reverse=True):
1741 1741 if key.startswith('dump'):
1742 1742 data = util.b85decode(remoteobs[key])
1743 1743 version, newmarks = obsolete._readmarkers(data)
1744 1744 markers += newmarks
1745 1745 if markers:
1746 1746 pullop.repo.obsstore.add(tr, markers)
1747 1747 pullop.repo.invalidatevolatilesets()
1748 1748 return tr
1749 1749
1750 1750 def caps20to10(repo, role):
1751 1751 """return a set with appropriate options to use bundle20 during getbundle"""
1752 1752 caps = {'HG20'}
1753 1753 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1754 1754 caps.add('bundle2=' + urlreq.quote(capsblob))
1755 1755 return caps
1756 1756
1757 1757 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1758 1758 getbundle2partsorder = []
1759 1759
1760 1760 # Mapping between step name and function
1761 1761 #
1762 1762 # This exists to help extensions wrap steps if necessary
1763 1763 getbundle2partsmapping = {}
1764 1764
1765 1765 def getbundle2partsgenerator(stepname, idx=None):
1766 1766 """decorator for function generating bundle2 part for getbundle
1767 1767
1768 1768 The function is added to the step -> function mapping and appended to the
1769 1769 list of steps. Beware that decorated functions will be added in order
1770 1770 (this may matter).
1771 1771
1772 1772 You can only use this decorator for new steps, if you want to wrap a step
1773 1773 from an extension, attack the getbundle2partsmapping dictionary directly."""
1774 1774 def dec(func):
1775 1775 assert stepname not in getbundle2partsmapping
1776 1776 getbundle2partsmapping[stepname] = func
1777 1777 if idx is None:
1778 1778 getbundle2partsorder.append(stepname)
1779 1779 else:
1780 1780 getbundle2partsorder.insert(idx, stepname)
1781 1781 return func
1782 1782 return dec
1783 1783
1784 1784 def bundle2requested(bundlecaps):
1785 1785 if bundlecaps is not None:
1786 1786 return any(cap.startswith('HG2') for cap in bundlecaps)
1787 1787 return False
1788 1788
1789 1789 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1790 1790 **kwargs):
1791 1791 """Return chunks constituting a bundle's raw data.
1792 1792
1793 1793 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1794 1794 passed.
1795 1795
1796 1796 Returns a 2-tuple of a dict with metadata about the generated bundle
1797 1797 and an iterator over raw chunks (of varying sizes).
1798 1798 """
1799 1799 kwargs = pycompat.byteskwargs(kwargs)
1800 1800 info = {}
1801 1801 usebundle2 = bundle2requested(bundlecaps)
1802 1802 # bundle10 case
1803 1803 if not usebundle2:
1804 1804 if bundlecaps and not kwargs.get('cg', True):
1805 1805 raise ValueError(_('request for bundle10 must include changegroup'))
1806 1806
1807 1807 if kwargs:
1808 1808 raise ValueError(_('unsupported getbundle arguments: %s')
1809 1809 % ', '.join(sorted(kwargs.keys())))
1810 1810 outgoing = _computeoutgoing(repo, heads, common)
1811 1811 info['bundleversion'] = 1
1812 1812 return info, changegroup.makestream(repo, outgoing, '01', source,
1813 1813 bundlecaps=bundlecaps)
1814 1814
1815 1815 # bundle20 case
1816 1816 info['bundleversion'] = 2
1817 1817 b2caps = {}
1818 1818 for bcaps in bundlecaps:
1819 1819 if bcaps.startswith('bundle2='):
1820 1820 blob = urlreq.unquote(bcaps[len('bundle2='):])
1821 1821 b2caps.update(bundle2.decodecaps(blob))
1822 1822 bundler = bundle2.bundle20(repo.ui, b2caps)
1823 1823
1824 1824 kwargs['heads'] = heads
1825 1825 kwargs['common'] = common
1826 1826
1827 1827 for name in getbundle2partsorder:
1828 1828 func = getbundle2partsmapping[name]
1829 1829 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1830 1830 **pycompat.strkwargs(kwargs))
1831 1831
1832 1832 info['prefercompressed'] = bundler.prefercompressed
1833 1833
1834 1834 return info, bundler.getchunks()
1835 1835
1836 1836 @getbundle2partsgenerator('stream2')
1837 1837 def _getbundlestream2(bundler, repo, *args, **kwargs):
1838 1838 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1839 1839
1840 1840 @getbundle2partsgenerator('changegroup')
1841 1841 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1842 1842 b2caps=None, heads=None, common=None, **kwargs):
1843 1843 """add a changegroup part to the requested bundle"""
1844 1844 cgstream = None
1845 1845 if kwargs.get(r'cg', True):
1846 1846 # build changegroup bundle here.
1847 1847 version = '01'
1848 1848 cgversions = b2caps.get('changegroup')
1849 1849 if cgversions: # 3.1 and 3.2 ship with an empty value
1850 1850 cgversions = [v for v in cgversions
1851 1851 if v in changegroup.supportedoutgoingversions(repo)]
1852 1852 if not cgversions:
1853 1853 raise ValueError(_('no common changegroup version'))
1854 1854 version = max(cgversions)
1855 1855 outgoing = _computeoutgoing(repo, heads, common)
1856 1856 if outgoing.missing:
1857 1857 cgstream = changegroup.makestream(repo, outgoing, version, source,
1858 1858 bundlecaps=bundlecaps)
1859 1859
1860 1860 if cgstream:
1861 1861 part = bundler.newpart('changegroup', data=cgstream)
1862 1862 if cgversions:
1863 1863 part.addparam('version', version)
1864 1864 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1865 1865 mandatory=False)
1866 1866 if 'treemanifest' in repo.requirements:
1867 1867 part.addparam('treemanifest', '1')
1868 1868
1869 1869 @getbundle2partsgenerator('bookmarks')
1870 1870 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1871 1871 b2caps=None, **kwargs):
1872 1872 """add a bookmark part to the requested bundle"""
1873 1873 if not kwargs.get(r'bookmarks', False):
1874 1874 return
1875 1875 if 'bookmarks' not in b2caps:
1876 1876 raise ValueError(_('no common bookmarks exchange method'))
1877 1877 books = bookmod.listbinbookmarks(repo)
1878 1878 data = bookmod.binaryencode(books)
1879 1879 if data:
1880 1880 bundler.newpart('bookmarks', data=data)
1881 1881
1882 1882 @getbundle2partsgenerator('listkeys')
1883 1883 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1884 1884 b2caps=None, **kwargs):
1885 1885 """add parts containing listkeys namespaces to the requested bundle"""
1886 1886 listkeys = kwargs.get(r'listkeys', ())
1887 1887 for namespace in listkeys:
1888 1888 part = bundler.newpart('listkeys')
1889 1889 part.addparam('namespace', namespace)
1890 1890 keys = repo.listkeys(namespace).items()
1891 1891 part.data = pushkey.encodekeys(keys)
1892 1892
1893 1893 @getbundle2partsgenerator('obsmarkers')
1894 1894 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1895 1895 b2caps=None, heads=None, **kwargs):
1896 1896 """add an obsolescence markers part to the requested bundle"""
1897 1897 if kwargs.get(r'obsmarkers', False):
1898 1898 if heads is None:
1899 1899 heads = repo.heads()
1900 1900 subset = [c.node() for c in repo.set('::%ln', heads)]
1901 1901 markers = repo.obsstore.relevantmarkers(subset)
1902 1902 markers = sorted(markers)
1903 1903 bundle2.buildobsmarkerspart(bundler, markers)
1904 1904
1905 1905 @getbundle2partsgenerator('phases')
1906 1906 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1907 1907 b2caps=None, heads=None, **kwargs):
1908 1908 """add phase heads part to the requested bundle"""
1909 1909 if kwargs.get(r'phases', False):
1910 1910 if not 'heads' in b2caps.get('phases'):
1911 1911 raise ValueError(_('no common phases exchange method'))
1912 1912 if heads is None:
1913 1913 heads = repo.heads()
1914 1914
1915 1915 headsbyphase = collections.defaultdict(set)
1916 1916 if repo.publishing():
1917 1917 headsbyphase[phases.public] = heads
1918 1918 else:
1919 1919 # find the appropriate heads to move
1920 1920
1921 1921 phase = repo._phasecache.phase
1922 1922 node = repo.changelog.node
1923 1923 rev = repo.changelog.rev
1924 1924 for h in heads:
1925 1925 headsbyphase[phase(repo, rev(h))].add(h)
1926 1926 seenphases = list(headsbyphase.keys())
1927 1927
1928 1928 # We do not handle anything but public and draft phase for now)
1929 1929 if seenphases:
1930 1930 assert max(seenphases) <= phases.draft
1931 1931
1932 1932 # if client is pulling non-public changesets, we need to find
1933 1933 # intermediate public heads.
1934 1934 draftheads = headsbyphase.get(phases.draft, set())
1935 1935 if draftheads:
1936 1936 publicheads = headsbyphase.get(phases.public, set())
1937 1937
1938 1938 revset = 'heads(only(%ln, %ln) and public())'
1939 1939 extraheads = repo.revs(revset, draftheads, publicheads)
1940 1940 for r in extraheads:
1941 1941 headsbyphase[phases.public].add(node(r))
1942 1942
1943 1943 # transform data in a format used by the encoding function
1944 1944 phasemapping = []
1945 1945 for phase in phases.allphases:
1946 1946 phasemapping.append(sorted(headsbyphase[phase]))
1947 1947
1948 1948 # generate the actual part
1949 1949 phasedata = phases.binaryencode(phasemapping)
1950 1950 bundler.newpart('phase-heads', data=phasedata)
1951 1951
1952 1952 @getbundle2partsgenerator('hgtagsfnodes')
1953 1953 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1954 1954 b2caps=None, heads=None, common=None,
1955 1955 **kwargs):
1956 1956 """Transfer the .hgtags filenodes mapping.
1957 1957
1958 1958 Only values for heads in this bundle will be transferred.
1959 1959
1960 1960 The part data consists of pairs of 20 byte changeset node and .hgtags
1961 1961 filenodes raw values.
1962 1962 """
1963 1963 # Don't send unless:
1964 1964 # - changeset are being exchanged,
1965 1965 # - the client supports it.
1966 1966 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
1967 1967 return
1968 1968
1969 1969 outgoing = _computeoutgoing(repo, heads, common)
1970 1970 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1971 1971
1972 1972 @getbundle2partsgenerator('cache:rev-branch-cache')
1973 1973 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
1974 1974 b2caps=None, heads=None, common=None,
1975 1975 **kwargs):
1976 1976 """Transfer the rev-branch-cache mapping
1977 1977
1978 1978 The payload is a series of data related to each branch
1979 1979
1980 1980 1) branch name length
1981 1981 2) number of open heads
1982 1982 3) number of closed heads
1983 1983 4) open heads nodes
1984 1984 5) closed heads nodes
1985 1985 """
1986 1986 # Don't send unless:
1987 1987 # - changeset are being exchanged,
1988 1988 # - the client supports it.
1989 1989 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
1990 1990 return
1991 1991 outgoing = _computeoutgoing(repo, heads, common)
1992 1992 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
1993 1993
1994 1994 def check_heads(repo, their_heads, context):
1995 1995 """check if the heads of a repo have been modified
1996 1996
1997 1997 Used by peer for unbundling.
1998 1998 """
1999 1999 heads = repo.heads()
2000 2000 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2001 2001 if not (their_heads == ['force'] or their_heads == heads or
2002 2002 their_heads == ['hashed', heads_hash]):
2003 2003 # someone else committed/pushed/unbundled while we
2004 2004 # were transferring data
2005 2005 raise error.PushRaced('repository changed while %s - '
2006 2006 'please try again' % context)
2007 2007
2008 2008 def unbundle(repo, cg, heads, source, url):
2009 2009 """Apply a bundle to a repo.
2010 2010
2011 2011 this function makes sure the repo is locked during the application and have
2012 2012 mechanism to check that no push race occurred between the creation of the
2013 2013 bundle and its application.
2014 2014
2015 2015 If the push was raced as PushRaced exception is raised."""
2016 2016 r = 0
2017 2017 # need a transaction when processing a bundle2 stream
2018 2018 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2019 2019 lockandtr = [None, None, None]
2020 2020 recordout = None
2021 2021 # quick fix for output mismatch with bundle2 in 3.4
2022 2022 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2023 2023 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2024 2024 captureoutput = True
2025 2025 try:
2026 2026 # note: outside bundle1, 'heads' is expected to be empty and this
2027 2027 # 'check_heads' call wil be a no-op
2028 2028 check_heads(repo, heads, 'uploading changes')
2029 2029 # push can proceed
2030 2030 if not isinstance(cg, bundle2.unbundle20):
2031 2031 # legacy case: bundle1 (changegroup 01)
2032 2032 txnname = "\n".join([source, util.hidepassword(url)])
2033 2033 with repo.lock(), repo.transaction(txnname) as tr:
2034 2034 op = bundle2.applybundle(repo, cg, tr, source, url)
2035 2035 r = bundle2.combinechangegroupresults(op)
2036 2036 else:
2037 2037 r = None
2038 2038 try:
2039 2039 def gettransaction():
2040 2040 if not lockandtr[2]:
2041 2041 lockandtr[0] = repo.wlock()
2042 2042 lockandtr[1] = repo.lock()
2043 2043 lockandtr[2] = repo.transaction(source)
2044 2044 lockandtr[2].hookargs['source'] = source
2045 2045 lockandtr[2].hookargs['url'] = url
2046 2046 lockandtr[2].hookargs['bundle2'] = '1'
2047 2047 return lockandtr[2]
2048 2048
2049 2049 # Do greedy locking by default until we're satisfied with lazy
2050 2050 # locking.
2051 2051 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2052 2052 gettransaction()
2053 2053
2054 2054 op = bundle2.bundleoperation(repo, gettransaction,
2055 2055 captureoutput=captureoutput)
2056 2056 try:
2057 2057 op = bundle2.processbundle(repo, cg, op=op)
2058 2058 finally:
2059 2059 r = op.reply
2060 2060 if captureoutput and r is not None:
2061 2061 repo.ui.pushbuffer(error=True, subproc=True)
2062 2062 def recordout(output):
2063 2063 r.newpart('output', data=output, mandatory=False)
2064 2064 if lockandtr[2] is not None:
2065 2065 lockandtr[2].close()
2066 2066 except BaseException as exc:
2067 2067 exc.duringunbundle2 = True
2068 2068 if captureoutput and r is not None:
2069 2069 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2070 2070 def recordout(output):
2071 2071 part = bundle2.bundlepart('output', data=output,
2072 2072 mandatory=False)
2073 2073 parts.append(part)
2074 2074 raise
2075 2075 finally:
2076 2076 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2077 2077 if recordout is not None:
2078 2078 recordout(repo.ui.popbuffer())
2079 2079 return r
2080 2080
2081 2081 def _maybeapplyclonebundle(pullop):
2082 2082 """Apply a clone bundle from a remote, if possible."""
2083 2083
2084 2084 repo = pullop.repo
2085 2085 remote = pullop.remote
2086 2086
2087 2087 if not repo.ui.configbool('ui', 'clonebundles'):
2088 2088 return
2089 2089
2090 2090 # Only run if local repo is empty.
2091 2091 if len(repo):
2092 2092 return
2093 2093
2094 2094 if pullop.heads:
2095 2095 return
2096 2096
2097 2097 if not remote.capable('clonebundles'):
2098 2098 return
2099 2099
2100 2100 res = remote._call('clonebundles')
2101 2101
2102 2102 # If we call the wire protocol command, that's good enough to record the
2103 2103 # attempt.
2104 2104 pullop.clonebundleattempted = True
2105 2105
2106 2106 entries = parseclonebundlesmanifest(repo, res)
2107 2107 if not entries:
2108 2108 repo.ui.note(_('no clone bundles available on remote; '
2109 2109 'falling back to regular clone\n'))
2110 2110 return
2111 2111
2112 2112 entries = filterclonebundleentries(
2113 2113 repo, entries, streamclonerequested=pullop.streamclonerequested)
2114 2114
2115 2115 if not entries:
2116 2116 # There is a thundering herd concern here. However, if a server
2117 2117 # operator doesn't advertise bundles appropriate for its clients,
2118 2118 # they deserve what's coming. Furthermore, from a client's
2119 2119 # perspective, no automatic fallback would mean not being able to
2120 2120 # clone!
2121 2121 repo.ui.warn(_('no compatible clone bundles available on server; '
2122 2122 'falling back to regular clone\n'))
2123 2123 repo.ui.warn(_('(you may want to report this to the server '
2124 2124 'operator)\n'))
2125 2125 return
2126 2126
2127 2127 entries = sortclonebundleentries(repo.ui, entries)
2128 2128
2129 2129 url = entries[0]['URL']
2130 2130 repo.ui.status(_('applying clone bundle from %s\n') % url)
2131 2131 if trypullbundlefromurl(repo.ui, repo, url):
2132 2132 repo.ui.status(_('finished applying clone bundle\n'))
2133 2133 # Bundle failed.
2134 2134 #
2135 2135 # We abort by default to avoid the thundering herd of
2136 2136 # clients flooding a server that was expecting expensive
2137 2137 # clone load to be offloaded.
2138 2138 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2139 2139 repo.ui.warn(_('falling back to normal clone\n'))
2140 2140 else:
2141 2141 raise error.Abort(_('error applying bundle'),
2142 2142 hint=_('if this error persists, consider contacting '
2143 2143 'the server operator or disable clone '
2144 2144 'bundles via '
2145 2145 '"--config ui.clonebundles=false"'))
2146 2146
2147 2147 def parseclonebundlesmanifest(repo, s):
2148 2148 """Parses the raw text of a clone bundles manifest.
2149 2149
2150 2150 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2151 2151 to the URL and other keys are the attributes for the entry.
2152 2152 """
2153 2153 m = []
2154 2154 for line in s.splitlines():
2155 2155 fields = line.split()
2156 2156 if not fields:
2157 2157 continue
2158 2158 attrs = {'URL': fields[0]}
2159 2159 for rawattr in fields[1:]:
2160 2160 key, value = rawattr.split('=', 1)
2161 2161 key = urlreq.unquote(key)
2162 2162 value = urlreq.unquote(value)
2163 2163 attrs[key] = value
2164 2164
2165 2165 # Parse BUNDLESPEC into components. This makes client-side
2166 2166 # preferences easier to specify since you can prefer a single
2167 2167 # component of the BUNDLESPEC.
2168 2168 if key == 'BUNDLESPEC':
2169 2169 try:
2170 2170 bundlespec = parsebundlespec(repo, value,
2171 2171 externalnames=True)
2172 2172 attrs['COMPRESSION'] = bundlespec.compression
2173 2173 attrs['VERSION'] = bundlespec.version
2174 2174 except error.InvalidBundleSpecification:
2175 2175 pass
2176 2176 except error.UnsupportedBundleSpecification:
2177 2177 pass
2178 2178
2179 2179 m.append(attrs)
2180 2180
2181 2181 return m
2182 2182
2183 def isstreamclonespec(bundlespec):
2184 # Stream clone v1
2185 if (bundlespec.compression == 'UN' and bundlespec.version == 's1'):
2186 return True
2187
2188 # Stream clone v2
2189 if (bundlespec.compression == 'UN' and bundlespec.version == '02' and \
2190 bundlespec.contentopts.get('streamv2')):
2191 return True
2192
2193 return False
2194
2183 2195 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2184 2196 """Remove incompatible clone bundle manifest entries.
2185 2197
2186 2198 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2187 2199 and returns a new list consisting of only the entries that this client
2188 2200 should be able to apply.
2189 2201
2190 2202 There is no guarantee we'll be able to apply all returned entries because
2191 2203 the metadata we use to filter on may be missing or wrong.
2192 2204 """
2193 2205 newentries = []
2194 2206 for entry in entries:
2195 2207 spec = entry.get('BUNDLESPEC')
2196 2208 if spec:
2197 2209 try:
2198 2210 bundlespec = parsebundlespec(repo, spec, strict=True)
2199 2211
2200 2212 # If a stream clone was requested, filter out non-streamclone
2201 2213 # entries.
2202 comp = bundlespec.compression
2203 version = bundlespec.version
2204 if streamclonerequested and (comp != 'UN' or version != 's1'):
2214 if streamclonerequested and not isstreamclonespec(bundlespec):
2205 2215 repo.ui.debug('filtering %s because not a stream clone\n' %
2206 2216 entry['URL'])
2207 2217 continue
2208 2218
2209 2219 except error.InvalidBundleSpecification as e:
2210 2220 repo.ui.debug(str(e) + '\n')
2211 2221 continue
2212 2222 except error.UnsupportedBundleSpecification as e:
2213 2223 repo.ui.debug('filtering %s because unsupported bundle '
2214 2224 'spec: %s\n' % (
2215 2225 entry['URL'], stringutil.forcebytestr(e)))
2216 2226 continue
2217 2227 # If we don't have a spec and requested a stream clone, we don't know
2218 2228 # what the entry is so don't attempt to apply it.
2219 2229 elif streamclonerequested:
2220 2230 repo.ui.debug('filtering %s because cannot determine if a stream '
2221 2231 'clone bundle\n' % entry['URL'])
2222 2232 continue
2223 2233
2224 2234 if 'REQUIRESNI' in entry and not sslutil.hassni:
2225 2235 repo.ui.debug('filtering %s because SNI not supported\n' %
2226 2236 entry['URL'])
2227 2237 continue
2228 2238
2229 2239 newentries.append(entry)
2230 2240
2231 2241 return newentries
2232 2242
2233 2243 class clonebundleentry(object):
2234 2244 """Represents an item in a clone bundles manifest.
2235 2245
2236 2246 This rich class is needed to support sorting since sorted() in Python 3
2237 2247 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2238 2248 won't work.
2239 2249 """
2240 2250
2241 2251 def __init__(self, value, prefers):
2242 2252 self.value = value
2243 2253 self.prefers = prefers
2244 2254
2245 2255 def _cmp(self, other):
2246 2256 for prefkey, prefvalue in self.prefers:
2247 2257 avalue = self.value.get(prefkey)
2248 2258 bvalue = other.value.get(prefkey)
2249 2259
2250 2260 # Special case for b missing attribute and a matches exactly.
2251 2261 if avalue is not None and bvalue is None and avalue == prefvalue:
2252 2262 return -1
2253 2263
2254 2264 # Special case for a missing attribute and b matches exactly.
2255 2265 if bvalue is not None and avalue is None and bvalue == prefvalue:
2256 2266 return 1
2257 2267
2258 2268 # We can't compare unless attribute present on both.
2259 2269 if avalue is None or bvalue is None:
2260 2270 continue
2261 2271
2262 2272 # Same values should fall back to next attribute.
2263 2273 if avalue == bvalue:
2264 2274 continue
2265 2275
2266 2276 # Exact matches come first.
2267 2277 if avalue == prefvalue:
2268 2278 return -1
2269 2279 if bvalue == prefvalue:
2270 2280 return 1
2271 2281
2272 2282 # Fall back to next attribute.
2273 2283 continue
2274 2284
2275 2285 # If we got here we couldn't sort by attributes and prefers. Fall
2276 2286 # back to index order.
2277 2287 return 0
2278 2288
2279 2289 def __lt__(self, other):
2280 2290 return self._cmp(other) < 0
2281 2291
2282 2292 def __gt__(self, other):
2283 2293 return self._cmp(other) > 0
2284 2294
2285 2295 def __eq__(self, other):
2286 2296 return self._cmp(other) == 0
2287 2297
2288 2298 def __le__(self, other):
2289 2299 return self._cmp(other) <= 0
2290 2300
2291 2301 def __ge__(self, other):
2292 2302 return self._cmp(other) >= 0
2293 2303
2294 2304 def __ne__(self, other):
2295 2305 return self._cmp(other) != 0
2296 2306
2297 2307 def sortclonebundleentries(ui, entries):
2298 2308 prefers = ui.configlist('ui', 'clonebundleprefers')
2299 2309 if not prefers:
2300 2310 return list(entries)
2301 2311
2302 2312 prefers = [p.split('=', 1) for p in prefers]
2303 2313
2304 2314 items = sorted(clonebundleentry(v, prefers) for v in entries)
2305 2315 return [i.value for i in items]
2306 2316
2307 2317 def trypullbundlefromurl(ui, repo, url):
2308 2318 """Attempt to apply a bundle from a URL."""
2309 2319 with repo.lock(), repo.transaction('bundleurl') as tr:
2310 2320 try:
2311 2321 fh = urlmod.open(ui, url)
2312 2322 cg = readbundle(ui, fh, 'stream')
2313 2323
2314 2324 if isinstance(cg, streamclone.streamcloneapplier):
2315 2325 cg.apply(repo)
2316 2326 else:
2317 2327 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2318 2328 return True
2319 2329 except urlerr.httperror as e:
2320 2330 ui.warn(_('HTTP error fetching bundle: %s\n') %
2321 2331 stringutil.forcebytestr(e))
2322 2332 except urlerr.urlerror as e:
2323 2333 ui.warn(_('error fetching bundle: %s\n') %
2324 2334 stringutil.forcebytestr(e.reason))
2325 2335
2326 2336 return False
@@ -1,114 +1,166 b''
1 1 Test creating a consuming stream bundle v2
2 2
3 3 $ getmainid() {
4 4 > hg -R main log --template '{node}\n' --rev "$1"
5 5 > }
6 6
7 7 $ cp $HGRCPATH $TESTTMP/hgrc.orig
8 8
9 9 $ cat >> $HGRCPATH << EOF
10 10 > [experimental]
11 11 > evolution.createmarkers=True
12 12 > evolution.exchange=True
13 13 > bundle2-output-capture=True
14 14 > [ui]
15 15 > ssh="$PYTHON" "$TESTDIR/dummyssh"
16 16 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
17 17 > [web]
18 18 > push_ssl = false
19 19 > allow_push = *
20 20 > [phases]
21 21 > publish=False
22 22 > [extensions]
23 23 > drawdag=$TESTDIR/drawdag.py
24 24 > clonebundles=
25 25 > EOF
26 26
27 27 The extension requires a repo (currently unused)
28 28
29 29 $ hg init main
30 30 $ cd main
31 31
32 32 $ hg debugdrawdag <<'EOF'
33 33 > E
34 34 > |
35 35 > D
36 36 > |
37 37 > C
38 38 > |
39 39 > B
40 40 > |
41 41 > A
42 42 > EOF
43 43
44 44 $ hg bundle -a --type="none-v2;stream=v2" bundle.hg
45 45 $ hg debugbundle bundle.hg
46 46 Stream params: {}
47 47 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Cstore}
48 48 $ hg debugbundle --spec bundle.hg
49 49 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Cstore
50 50
51 51 Test that we can apply the bundle as a stream clone bundle
52 52
53 53 $ cat > .hg/clonebundles.manifest << EOF
54 54 > http://localhost:$HGPORT1/bundle.hg BUNDLESPEC=`hg debugbundle --spec bundle.hg`
55 55 > EOF
56 56
57 57 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
58 58 $ cat hg.pid >> $DAEMON_PIDS
59 59
60 60 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
61 61 $ cat http.pid >> $DAEMON_PIDS
62 62
63 63 $ cd ..
64 64 $ hg clone http://localhost:$HGPORT streamv2-clone-implicit --debug
65 65 using http://localhost:$HGPORT/
66 66 sending capabilities command
67 67 sending clonebundles command
68 68 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
69 69 bundle2-input-bundle: with-transaction
70 70 bundle2-input-part: "stream2" (params: 3 mandatory) supported
71 71 applying stream bundle
72 72 11 files to transfer, 1.65 KB of data
73 73 adding [s] data/A.i (66 bytes)
74 74 adding [s] data/B.i (66 bytes)
75 75 adding [s] data/C.i (66 bytes)
76 76 adding [s] data/D.i (66 bytes)
77 77 adding [s] data/E.i (66 bytes)
78 78 adding [s] 00manifest.i (584 bytes)
79 79 adding [s] 00changelog.i (595 bytes)
80 80 adding [s] phaseroots (43 bytes)
81 81 adding [c] branch2-served (94 bytes)
82 82 adding [c] rbc-names-v1 (7 bytes)
83 83 adding [c] rbc-revs-v1 (40 bytes)
84 84 transferred 1.65 KB in \d\.\d seconds \(.*/sec\) (re)
85 85 bundle2-input-part: total payload size 1840
86 86 bundle2-input-bundle: 0 parts total
87 87 finished applying clone bundle
88 88 query 1; heads
89 89 sending batch command
90 90 searching for changes
91 91 all remote heads known locally
92 92 no changes found
93 93 sending getbundle command
94 94 bundle2-input-bundle: with-transaction
95 95 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
96 96 bundle2-input-part: "phase-heads" supported
97 97 bundle2-input-part: total payload size 24
98 98 bundle2-input-bundle: 1 parts total
99 99 checking for updated bookmarks
100 100 updating to branch default
101 101 resolving manifests
102 102 branchmerge: False, force: False, partial: False
103 103 ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
104 104 A: remote created -> g
105 105 getting A
106 106 B: remote created -> g
107 107 getting B
108 108 C: remote created -> g
109 109 getting C
110 110 D: remote created -> g
111 111 getting D
112 112 E: remote created -> g
113 113 getting E
114 114 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
115
116 $ hg clone --stream http://localhost:$HGPORT streamv2-clone-explicit --debug
117 using http://localhost:$HGPORT/
118 sending capabilities command
119 sending clonebundles command
120 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
121 bundle2-input-bundle: with-transaction
122 bundle2-input-part: "stream2" (params: 3 mandatory) supported
123 applying stream bundle
124 11 files to transfer, 1.65 KB of data
125 adding [s] data/A.i (66 bytes)
126 adding [s] data/B.i (66 bytes)
127 adding [s] data/C.i (66 bytes)
128 adding [s] data/D.i (66 bytes)
129 adding [s] data/E.i (66 bytes)
130 adding [s] 00manifest.i (584 bytes)
131 adding [s] 00changelog.i (595 bytes)
132 adding [s] phaseroots (43 bytes)
133 adding [c] branch2-served (94 bytes)
134 adding [c] rbc-names-v1 (7 bytes)
135 adding [c] rbc-revs-v1 (40 bytes)
136 transferred 1.65 KB in *.* seconds (*/sec) (glob)
137 bundle2-input-part: total payload size 1840
138 bundle2-input-bundle: 0 parts total
139 finished applying clone bundle
140 query 1; heads
141 sending batch command
142 searching for changes
143 all remote heads known locally
144 no changes found
145 sending getbundle command
146 bundle2-input-bundle: with-transaction
147 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
148 bundle2-input-part: "phase-heads" supported
149 bundle2-input-part: total payload size 24
150 bundle2-input-bundle: 1 parts total
151 checking for updated bookmarks
152 updating to branch default
153 resolving manifests
154 branchmerge: False, force: False, partial: False
155 ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
156 A: remote created -> g
157 getting A
158 B: remote created -> g
159 getting B
160 C: remote created -> g
161 getting C
162 D: remote created -> g
163 getting D
164 E: remote created -> g
165 getting E
166 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
General Comments 0
You need to be logged in to leave comments. Login now