##// END OF EJS Templates
getbundle: add support for 'bookmarks' boolean argument...
Boris Feld -
r35268:cb4dcd7f default
parent child Browse files
Show More
@@ -1,2175 +1,2188 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 lock as lockmod,
27 27 obsolete,
28 28 phases,
29 29 pushkey,
30 30 pycompat,
31 31 remotenames,
32 32 scmutil,
33 33 sslutil,
34 34 streamclone,
35 35 url as urlmod,
36 36 util,
37 37 )
38 38
39 39 urlerr = util.urlerr
40 40 urlreq = util.urlreq
41 41
42 42 # Maps bundle version human names to changegroup versions.
43 43 _bundlespeccgversions = {'v1': '01',
44 44 'v2': '02',
45 45 'packed1': 's1',
46 46 'bundle2': '02', #legacy
47 47 }
48 48
49 49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51 51
52 52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 53 """Parse a bundle string specification into parts.
54 54
55 55 Bundle specifications denote a well-defined bundle/exchange format.
56 56 The content of a given specification should not change over time in
57 57 order to ensure that bundles produced by a newer version of Mercurial are
58 58 readable from an older version.
59 59
60 60 The string currently has the form:
61 61
62 62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63 63
64 64 Where <compression> is one of the supported compression formats
65 65 and <type> is (currently) a version string. A ";" can follow the type and
66 66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 67 pairs.
68 68
69 69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 70 it is optional.
71 71
72 72 If ``externalnames`` is False (the default), the human-centric names will
73 73 be converted to their internal representation.
74 74
75 75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 76 be ``None`` if not in strict mode and a compression isn't defined.
77 77
78 78 An ``InvalidBundleSpecification`` is raised when the specification is
79 79 not syntactically well formed.
80 80
81 81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 82 bundle type/version is not recognized.
83 83
84 84 Note: this function will likely eventually return a more complex data
85 85 structure, including bundle2 part information.
86 86 """
87 87 def parseparams(s):
88 88 if ';' not in s:
89 89 return s, {}
90 90
91 91 params = {}
92 92 version, paramstr = s.split(';', 1)
93 93
94 94 for p in paramstr.split(';'):
95 95 if '=' not in p:
96 96 raise error.InvalidBundleSpecification(
97 97 _('invalid bundle specification: '
98 98 'missing "=" in parameter: %s') % p)
99 99
100 100 key, value = p.split('=', 1)
101 101 key = urlreq.unquote(key)
102 102 value = urlreq.unquote(value)
103 103 params[key] = value
104 104
105 105 return version, params
106 106
107 107
108 108 if strict and '-' not in spec:
109 109 raise error.InvalidBundleSpecification(
110 110 _('invalid bundle specification; '
111 111 'must be prefixed with compression: %s') % spec)
112 112
113 113 if '-' in spec:
114 114 compression, version = spec.split('-', 1)
115 115
116 116 if compression not in util.compengines.supportedbundlenames:
117 117 raise error.UnsupportedBundleSpecification(
118 118 _('%s compression is not supported') % compression)
119 119
120 120 version, params = parseparams(version)
121 121
122 122 if version not in _bundlespeccgversions:
123 123 raise error.UnsupportedBundleSpecification(
124 124 _('%s is not a recognized bundle version') % version)
125 125 else:
126 126 # Value could be just the compression or just the version, in which
127 127 # case some defaults are assumed (but only when not in strict mode).
128 128 assert not strict
129 129
130 130 spec, params = parseparams(spec)
131 131
132 132 if spec in util.compengines.supportedbundlenames:
133 133 compression = spec
134 134 version = 'v1'
135 135 # Generaldelta repos require v2.
136 136 if 'generaldelta' in repo.requirements:
137 137 version = 'v2'
138 138 # Modern compression engines require v2.
139 139 if compression not in _bundlespecv1compengines:
140 140 version = 'v2'
141 141 elif spec in _bundlespeccgversions:
142 142 if spec == 'packed1':
143 143 compression = 'none'
144 144 else:
145 145 compression = 'bzip2'
146 146 version = spec
147 147 else:
148 148 raise error.UnsupportedBundleSpecification(
149 149 _('%s is not a recognized bundle specification') % spec)
150 150
151 151 # Bundle version 1 only supports a known set of compression engines.
152 152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 153 raise error.UnsupportedBundleSpecification(
154 154 _('compression engine %s is not supported on v1 bundles') %
155 155 compression)
156 156
157 157 # The specification for packed1 can optionally declare the data formats
158 158 # required to apply it. If we see this metadata, compare against what the
159 159 # repo supports and error if the bundle isn't compatible.
160 160 if version == 'packed1' and 'requirements' in params:
161 161 requirements = set(params['requirements'].split(','))
162 162 missingreqs = requirements - repo.supportedformats
163 163 if missingreqs:
164 164 raise error.UnsupportedBundleSpecification(
165 165 _('missing support for repository features: %s') %
166 166 ', '.join(sorted(missingreqs)))
167 167
168 168 if not externalnames:
169 169 engine = util.compengines.forbundlename(compression)
170 170 compression = engine.bundletype()[1]
171 171 version = _bundlespeccgversions[version]
172 172 return compression, version, params
173 173
174 174 def readbundle(ui, fh, fname, vfs=None):
175 175 header = changegroup.readexactly(fh, 4)
176 176
177 177 alg = None
178 178 if not fname:
179 179 fname = "stream"
180 180 if not header.startswith('HG') and header.startswith('\0'):
181 181 fh = changegroup.headerlessfixup(fh, header)
182 182 header = "HG10"
183 183 alg = 'UN'
184 184 elif vfs:
185 185 fname = vfs.join(fname)
186 186
187 187 magic, version = header[0:2], header[2:4]
188 188
189 189 if magic != 'HG':
190 190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 191 if version == '10':
192 192 if alg is None:
193 193 alg = changegroup.readexactly(fh, 2)
194 194 return changegroup.cg1unpacker(fh, alg)
195 195 elif version.startswith('2'):
196 196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 197 elif version == 'S1':
198 198 return streamclone.streamcloneapplier(fh)
199 199 else:
200 200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201 201
202 202 def getbundlespec(ui, fh):
203 203 """Infer the bundlespec from a bundle file handle.
204 204
205 205 The input file handle is seeked and the original seek position is not
206 206 restored.
207 207 """
208 208 def speccompression(alg):
209 209 try:
210 210 return util.compengines.forbundletype(alg).bundletype()[0]
211 211 except KeyError:
212 212 return None
213 213
214 214 b = readbundle(ui, fh, None)
215 215 if isinstance(b, changegroup.cg1unpacker):
216 216 alg = b._type
217 217 if alg == '_truncatedBZ':
218 218 alg = 'BZ'
219 219 comp = speccompression(alg)
220 220 if not comp:
221 221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
222 222 return '%s-v1' % comp
223 223 elif isinstance(b, bundle2.unbundle20):
224 224 if 'Compression' in b.params:
225 225 comp = speccompression(b.params['Compression'])
226 226 if not comp:
227 227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
228 228 else:
229 229 comp = 'none'
230 230
231 231 version = None
232 232 for part in b.iterparts():
233 233 if part.type == 'changegroup':
234 234 version = part.params['version']
235 235 if version in ('01', '02'):
236 236 version = 'v2'
237 237 else:
238 238 raise error.Abort(_('changegroup version %s does not have '
239 239 'a known bundlespec') % version,
240 240 hint=_('try upgrading your Mercurial '
241 241 'client'))
242 242
243 243 if not version:
244 244 raise error.Abort(_('could not identify changegroup version in '
245 245 'bundle'))
246 246
247 247 return '%s-%s' % (comp, version)
248 248 elif isinstance(b, streamclone.streamcloneapplier):
249 249 requirements = streamclone.readbundle1header(fh)[2]
250 250 params = 'requirements=%s' % ','.join(sorted(requirements))
251 251 return 'none-packed1;%s' % urlreq.quote(params)
252 252 else:
253 253 raise error.Abort(_('unknown bundle type: %s') % b)
254 254
255 255 def _computeoutgoing(repo, heads, common):
256 256 """Computes which revs are outgoing given a set of common
257 257 and a set of heads.
258 258
259 259 This is a separate function so extensions can have access to
260 260 the logic.
261 261
262 262 Returns a discovery.outgoing object.
263 263 """
264 264 cl = repo.changelog
265 265 if common:
266 266 hasnode = cl.hasnode
267 267 common = [n for n in common if hasnode(n)]
268 268 else:
269 269 common = [nullid]
270 270 if not heads:
271 271 heads = cl.heads()
272 272 return discovery.outgoing(repo, common, heads)
273 273
274 274 def _forcebundle1(op):
275 275 """return true if a pull/push must use bundle1
276 276
277 277 This function is used to allow testing of the older bundle version"""
278 278 ui = op.repo.ui
279 279 forcebundle1 = False
280 280 # The goal is this config is to allow developer to choose the bundle
281 281 # version used during exchanged. This is especially handy during test.
282 282 # Value is a list of bundle version to be picked from, highest version
283 283 # should be used.
284 284 #
285 285 # developer config: devel.legacy.exchange
286 286 exchange = ui.configlist('devel', 'legacy.exchange')
287 287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 288 return forcebundle1 or not op.remote.capable('bundle2')
289 289
290 290 class pushoperation(object):
291 291 """A object that represent a single push operation
292 292
293 293 Its purpose is to carry push related state and very common operations.
294 294
295 295 A new pushoperation should be created at the beginning of each push and
296 296 discarded afterward.
297 297 """
298 298
299 299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 300 bookmarks=(), pushvars=None):
301 301 # repo we push from
302 302 self.repo = repo
303 303 self.ui = repo.ui
304 304 # repo we push to
305 305 self.remote = remote
306 306 # force option provided
307 307 self.force = force
308 308 # revs to be pushed (None is "all")
309 309 self.revs = revs
310 310 # bookmark explicitly pushed
311 311 self.bookmarks = bookmarks
312 312 # allow push of new branch
313 313 self.newbranch = newbranch
314 314 # step already performed
315 315 # (used to check what steps have been already performed through bundle2)
316 316 self.stepsdone = set()
317 317 # Integer version of the changegroup push result
318 318 # - None means nothing to push
319 319 # - 0 means HTTP error
320 320 # - 1 means we pushed and remote head count is unchanged *or*
321 321 # we have outgoing changesets but refused to push
322 322 # - other values as described by addchangegroup()
323 323 self.cgresult = None
324 324 # Boolean value for the bookmark push
325 325 self.bkresult = None
326 326 # discover.outgoing object (contains common and outgoing data)
327 327 self.outgoing = None
328 328 # all remote topological heads before the push
329 329 self.remoteheads = None
330 330 # Details of the remote branch pre and post push
331 331 #
332 332 # mapping: {'branch': ([remoteheads],
333 333 # [newheads],
334 334 # [unsyncedheads],
335 335 # [discardedheads])}
336 336 # - branch: the branch name
337 337 # - remoteheads: the list of remote heads known locally
338 338 # None if the branch is new
339 339 # - newheads: the new remote heads (known locally) with outgoing pushed
340 340 # - unsyncedheads: the list of remote heads unknown locally.
341 341 # - discardedheads: the list of remote heads made obsolete by the push
342 342 self.pushbranchmap = None
343 343 # testable as a boolean indicating if any nodes are missing locally.
344 344 self.incoming = None
345 345 # summary of the remote phase situation
346 346 self.remotephases = None
347 347 # phases changes that must be pushed along side the changesets
348 348 self.outdatedphases = None
349 349 # phases changes that must be pushed if changeset push fails
350 350 self.fallbackoutdatedphases = None
351 351 # outgoing obsmarkers
352 352 self.outobsmarkers = set()
353 353 # outgoing bookmarks
354 354 self.outbookmarks = []
355 355 # transaction manager
356 356 self.trmanager = None
357 357 # map { pushkey partid -> callback handling failure}
358 358 # used to handle exception from mandatory pushkey part failure
359 359 self.pkfailcb = {}
360 360 # an iterable of pushvars or None
361 361 self.pushvars = pushvars
362 362
363 363 @util.propertycache
364 364 def futureheads(self):
365 365 """future remote heads if the changeset push succeeds"""
366 366 return self.outgoing.missingheads
367 367
368 368 @util.propertycache
369 369 def fallbackheads(self):
370 370 """future remote heads if the changeset push fails"""
371 371 if self.revs is None:
372 372 # not target to push, all common are relevant
373 373 return self.outgoing.commonheads
374 374 unfi = self.repo.unfiltered()
375 375 # I want cheads = heads(::missingheads and ::commonheads)
376 376 # (missingheads is revs with secret changeset filtered out)
377 377 #
378 378 # This can be expressed as:
379 379 # cheads = ( (missingheads and ::commonheads)
380 380 # + (commonheads and ::missingheads))"
381 381 # )
382 382 #
383 383 # while trying to push we already computed the following:
384 384 # common = (::commonheads)
385 385 # missing = ((commonheads::missingheads) - commonheads)
386 386 #
387 387 # We can pick:
388 388 # * missingheads part of common (::commonheads)
389 389 common = self.outgoing.common
390 390 nm = self.repo.changelog.nodemap
391 391 cheads = [node for node in self.revs if nm[node] in common]
392 392 # and
393 393 # * commonheads parents on missing
394 394 revset = unfi.set('%ln and parents(roots(%ln))',
395 395 self.outgoing.commonheads,
396 396 self.outgoing.missing)
397 397 cheads.extend(c.node() for c in revset)
398 398 return cheads
399 399
400 400 @property
401 401 def commonheads(self):
402 402 """set of all common heads after changeset bundle push"""
403 403 if self.cgresult:
404 404 return self.futureheads
405 405 else:
406 406 return self.fallbackheads
407 407
408 408 # mapping of message used when pushing bookmark
409 409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
410 410 _('updating bookmark %s failed!\n')),
411 411 'export': (_("exporting bookmark %s\n"),
412 412 _('exporting bookmark %s failed!\n')),
413 413 'delete': (_("deleting remote bookmark %s\n"),
414 414 _('deleting remote bookmark %s failed!\n')),
415 415 }
416 416
417 417
418 418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
419 419 opargs=None):
420 420 '''Push outgoing changesets (limited by revs) from a local
421 421 repository to remote. Return an integer:
422 422 - None means nothing to push
423 423 - 0 means HTTP error
424 424 - 1 means we pushed and remote head count is unchanged *or*
425 425 we have outgoing changesets but refused to push
426 426 - other values as described by addchangegroup()
427 427 '''
428 428 if opargs is None:
429 429 opargs = {}
430 430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
431 431 **pycompat.strkwargs(opargs))
432 432 if pushop.remote.local():
433 433 missing = (set(pushop.repo.requirements)
434 434 - pushop.remote.local().supported)
435 435 if missing:
436 436 msg = _("required features are not"
437 437 " supported in the destination:"
438 438 " %s") % (', '.join(sorted(missing)))
439 439 raise error.Abort(msg)
440 440
441 441 if not pushop.remote.canpush():
442 442 raise error.Abort(_("destination does not support push"))
443 443
444 444 if not pushop.remote.capable('unbundle'):
445 445 raise error.Abort(_('cannot push: destination does not support the '
446 446 'unbundle wire protocol command'))
447 447
448 448 # get lock as we might write phase data
449 449 wlock = lock = None
450 450 try:
451 451 # bundle2 push may receive a reply bundle touching bookmarks or other
452 452 # things requiring the wlock. Take it now to ensure proper ordering.
453 453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
454 454 if (not _forcebundle1(pushop)) and maypushback:
455 455 wlock = pushop.repo.wlock()
456 456 lock = pushop.repo.lock()
457 457 pushop.trmanager = transactionmanager(pushop.repo,
458 458 'push-response',
459 459 pushop.remote.url())
460 460 except IOError as err:
461 461 if err.errno != errno.EACCES:
462 462 raise
463 463 # source repo cannot be locked.
464 464 # We do not abort the push, but just disable the local phase
465 465 # synchronisation.
466 466 msg = 'cannot lock source repository: %s\n' % err
467 467 pushop.ui.debug(msg)
468 468
469 469 with wlock or util.nullcontextmanager(), \
470 470 lock or util.nullcontextmanager(), \
471 471 pushop.trmanager or util.nullcontextmanager():
472 472 pushop.repo.checkpush(pushop)
473 473 _pushdiscovery(pushop)
474 474 if not _forcebundle1(pushop):
475 475 _pushbundle2(pushop)
476 476 _pushchangeset(pushop)
477 477 _pushsyncphase(pushop)
478 478 _pushobsolete(pushop)
479 479 _pushbookmark(pushop)
480 480
481 481 return pushop
482 482
483 483 # list of steps to perform discovery before push
484 484 pushdiscoveryorder = []
485 485
486 486 # Mapping between step name and function
487 487 #
488 488 # This exists to help extensions wrap steps if necessary
489 489 pushdiscoverymapping = {}
490 490
491 491 def pushdiscovery(stepname):
492 492 """decorator for function performing discovery before push
493 493
494 494 The function is added to the step -> function mapping and appended to the
495 495 list of steps. Beware that decorated function will be added in order (this
496 496 may matter).
497 497
498 498 You can only use this decorator for a new step, if you want to wrap a step
499 499 from an extension, change the pushdiscovery dictionary directly."""
500 500 def dec(func):
501 501 assert stepname not in pushdiscoverymapping
502 502 pushdiscoverymapping[stepname] = func
503 503 pushdiscoveryorder.append(stepname)
504 504 return func
505 505 return dec
506 506
507 507 def _pushdiscovery(pushop):
508 508 """Run all discovery steps"""
509 509 for stepname in pushdiscoveryorder:
510 510 step = pushdiscoverymapping[stepname]
511 511 step(pushop)
512 512
513 513 @pushdiscovery('changeset')
514 514 def _pushdiscoverychangeset(pushop):
515 515 """discover the changeset that need to be pushed"""
516 516 fci = discovery.findcommonincoming
517 517 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
518 518 common, inc, remoteheads = commoninc
519 519 fco = discovery.findcommonoutgoing
520 520 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
521 521 commoninc=commoninc, force=pushop.force)
522 522 pushop.outgoing = outgoing
523 523 pushop.remoteheads = remoteheads
524 524 pushop.incoming = inc
525 525
526 526 @pushdiscovery('phase')
527 527 def _pushdiscoveryphase(pushop):
528 528 """discover the phase that needs to be pushed
529 529
530 530 (computed for both success and failure case for changesets push)"""
531 531 outgoing = pushop.outgoing
532 532 unfi = pushop.repo.unfiltered()
533 533 remotephases = pushop.remote.listkeys('phases')
534 534 if (pushop.ui.configbool('ui', '_usedassubrepo')
535 535 and remotephases # server supports phases
536 536 and not pushop.outgoing.missing # no changesets to be pushed
537 537 and remotephases.get('publishing', False)):
538 538 # When:
539 539 # - this is a subrepo push
540 540 # - and remote support phase
541 541 # - and no changeset are to be pushed
542 542 # - and remote is publishing
543 543 # We may be in issue 3781 case!
544 544 # We drop the possible phase synchronisation done by
545 545 # courtesy to publish changesets possibly locally draft
546 546 # on the remote.
547 547 pushop.outdatedphases = []
548 548 pushop.fallbackoutdatedphases = []
549 549 return
550 550
551 551 pushop.remotephases = phases.remotephasessummary(pushop.repo,
552 552 pushop.fallbackheads,
553 553 remotephases)
554 554 droots = pushop.remotephases.draftroots
555 555
556 556 extracond = ''
557 557 if not pushop.remotephases.publishing:
558 558 extracond = ' and public()'
559 559 revset = 'heads((%%ln::%%ln) %s)' % extracond
560 560 # Get the list of all revs draft on remote by public here.
561 561 # XXX Beware that revset break if droots is not strictly
562 562 # XXX root we may want to ensure it is but it is costly
563 563 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
564 564 if not outgoing.missing:
565 565 future = fallback
566 566 else:
567 567 # adds changeset we are going to push as draft
568 568 #
569 569 # should not be necessary for publishing server, but because of an
570 570 # issue fixed in xxxxx we have to do it anyway.
571 571 fdroots = list(unfi.set('roots(%ln + %ln::)',
572 572 outgoing.missing, droots))
573 573 fdroots = [f.node() for f in fdroots]
574 574 future = list(unfi.set(revset, fdroots, pushop.futureheads))
575 575 pushop.outdatedphases = future
576 576 pushop.fallbackoutdatedphases = fallback
577 577
578 578 @pushdiscovery('obsmarker')
579 579 def _pushdiscoveryobsmarkers(pushop):
580 580 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
581 581 and pushop.repo.obsstore
582 582 and 'obsolete' in pushop.remote.listkeys('namespaces')):
583 583 repo = pushop.repo
584 584 # very naive computation, that can be quite expensive on big repo.
585 585 # However: evolution is currently slow on them anyway.
586 586 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
587 587 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
588 588
589 589 @pushdiscovery('bookmarks')
590 590 def _pushdiscoverybookmarks(pushop):
591 591 ui = pushop.ui
592 592 repo = pushop.repo.unfiltered()
593 593 remote = pushop.remote
594 594 ui.debug("checking for updated bookmarks\n")
595 595 ancestors = ()
596 596 if pushop.revs:
597 597 revnums = map(repo.changelog.rev, pushop.revs)
598 598 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
599 599 remotebookmark = remote.listkeys('bookmarks')
600 600
601 601 explicit = set([repo._bookmarks.expandname(bookmark)
602 602 for bookmark in pushop.bookmarks])
603 603
604 604 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
605 605 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
606 606
607 607 def safehex(x):
608 608 if x is None:
609 609 return x
610 610 return hex(x)
611 611
612 612 def hexifycompbookmarks(bookmarks):
613 613 for b, scid, dcid in bookmarks:
614 614 yield b, safehex(scid), safehex(dcid)
615 615
616 616 comp = [hexifycompbookmarks(marks) for marks in comp]
617 617 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
618 618
619 619 for b, scid, dcid in advsrc:
620 620 if b in explicit:
621 621 explicit.remove(b)
622 622 if not ancestors or repo[scid].rev() in ancestors:
623 623 pushop.outbookmarks.append((b, dcid, scid))
624 624 # search added bookmark
625 625 for b, scid, dcid in addsrc:
626 626 if b in explicit:
627 627 explicit.remove(b)
628 628 pushop.outbookmarks.append((b, '', scid))
629 629 # search for overwritten bookmark
630 630 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
631 631 if b in explicit:
632 632 explicit.remove(b)
633 633 pushop.outbookmarks.append((b, dcid, scid))
634 634 # search for bookmark to delete
635 635 for b, scid, dcid in adddst:
636 636 if b in explicit:
637 637 explicit.remove(b)
638 638 # treat as "deleted locally"
639 639 pushop.outbookmarks.append((b, dcid, ''))
640 640 # identical bookmarks shouldn't get reported
641 641 for b, scid, dcid in same:
642 642 if b in explicit:
643 643 explicit.remove(b)
644 644
645 645 if explicit:
646 646 explicit = sorted(explicit)
647 647 # we should probably list all of them
648 648 ui.warn(_('bookmark %s does not exist on the local '
649 649 'or remote repository!\n') % explicit[0])
650 650 pushop.bkresult = 2
651 651
652 652 pushop.outbookmarks.sort()
653 653
654 654 def _pushcheckoutgoing(pushop):
655 655 outgoing = pushop.outgoing
656 656 unfi = pushop.repo.unfiltered()
657 657 if not outgoing.missing:
658 658 # nothing to push
659 659 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
660 660 return False
661 661 # something to push
662 662 if not pushop.force:
663 663 # if repo.obsstore == False --> no obsolete
664 664 # then, save the iteration
665 665 if unfi.obsstore:
666 666 # this message are here for 80 char limit reason
667 667 mso = _("push includes obsolete changeset: %s!")
668 668 mspd = _("push includes phase-divergent changeset: %s!")
669 669 mscd = _("push includes content-divergent changeset: %s!")
670 670 mst = {"orphan": _("push includes orphan changeset: %s!"),
671 671 "phase-divergent": mspd,
672 672 "content-divergent": mscd}
673 673 # If we are to push if there is at least one
674 674 # obsolete or unstable changeset in missing, at
675 675 # least one of the missinghead will be obsolete or
676 676 # unstable. So checking heads only is ok
677 677 for node in outgoing.missingheads:
678 678 ctx = unfi[node]
679 679 if ctx.obsolete():
680 680 raise error.Abort(mso % ctx)
681 681 elif ctx.isunstable():
682 682 # TODO print more than one instability in the abort
683 683 # message
684 684 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
685 685
686 686 discovery.checkheads(pushop)
687 687 return True
688 688
689 689 # List of names of steps to perform for an outgoing bundle2, order matters.
690 690 b2partsgenorder = []
691 691
692 692 # Mapping between step name and function
693 693 #
694 694 # This exists to help extensions wrap steps if necessary
695 695 b2partsgenmapping = {}
696 696
697 697 def b2partsgenerator(stepname, idx=None):
698 698 """decorator for function generating bundle2 part
699 699
700 700 The function is added to the step -> function mapping and appended to the
701 701 list of steps. Beware that decorated functions will be added in order
702 702 (this may matter).
703 703
704 704 You can only use this decorator for new steps, if you want to wrap a step
705 705 from an extension, attack the b2partsgenmapping dictionary directly."""
706 706 def dec(func):
707 707 assert stepname not in b2partsgenmapping
708 708 b2partsgenmapping[stepname] = func
709 709 if idx is None:
710 710 b2partsgenorder.append(stepname)
711 711 else:
712 712 b2partsgenorder.insert(idx, stepname)
713 713 return func
714 714 return dec
715 715
716 716 def _pushb2ctxcheckheads(pushop, bundler):
717 717 """Generate race condition checking parts
718 718
719 719 Exists as an independent function to aid extensions
720 720 """
721 721 # * 'force' do not check for push race,
722 722 # * if we don't push anything, there are nothing to check.
723 723 if not pushop.force and pushop.outgoing.missingheads:
724 724 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
725 725 emptyremote = pushop.pushbranchmap is None
726 726 if not allowunrelated or emptyremote:
727 727 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
728 728 else:
729 729 affected = set()
730 730 for branch, heads in pushop.pushbranchmap.iteritems():
731 731 remoteheads, newheads, unsyncedheads, discardedheads = heads
732 732 if remoteheads is not None:
733 733 remote = set(remoteheads)
734 734 affected |= set(discardedheads) & remote
735 735 affected |= remote - set(newheads)
736 736 if affected:
737 737 data = iter(sorted(affected))
738 738 bundler.newpart('check:updated-heads', data=data)
739 739
740 740 def _pushing(pushop):
741 741 """return True if we are pushing anything"""
742 742 return bool(pushop.outgoing.missing
743 743 or pushop.outdatedphases
744 744 or pushop.outobsmarkers
745 745 or pushop.outbookmarks)
746 746
747 747 @b2partsgenerator('check-bookmarks')
748 748 def _pushb2checkbookmarks(pushop, bundler):
749 749 """insert bookmark move checking"""
750 750 if not _pushing(pushop) or pushop.force:
751 751 return
752 752 b2caps = bundle2.bundle2caps(pushop.remote)
753 753 hasbookmarkcheck = 'bookmarks' in b2caps
754 754 if not (pushop.outbookmarks and hasbookmarkcheck):
755 755 return
756 756 data = []
757 757 for book, old, new in pushop.outbookmarks:
758 758 old = bin(old)
759 759 data.append((book, old))
760 760 checkdata = bookmod.binaryencode(data)
761 761 bundler.newpart('check:bookmarks', data=checkdata)
762 762
763 763 @b2partsgenerator('check-phases')
764 764 def _pushb2checkphases(pushop, bundler):
765 765 """insert phase move checking"""
766 766 if not _pushing(pushop) or pushop.force:
767 767 return
768 768 b2caps = bundle2.bundle2caps(pushop.remote)
769 769 hasphaseheads = 'heads' in b2caps.get('phases', ())
770 770 if pushop.remotephases is not None and hasphaseheads:
771 771 # check that the remote phase has not changed
772 772 checks = [[] for p in phases.allphases]
773 773 checks[phases.public].extend(pushop.remotephases.publicheads)
774 774 checks[phases.draft].extend(pushop.remotephases.draftroots)
775 775 if any(checks):
776 776 for nodes in checks:
777 777 nodes.sort()
778 778 checkdata = phases.binaryencode(checks)
779 779 bundler.newpart('check:phases', data=checkdata)
780 780
781 781 @b2partsgenerator('changeset')
782 782 def _pushb2ctx(pushop, bundler):
783 783 """handle changegroup push through bundle2
784 784
785 785 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
786 786 """
787 787 if 'changesets' in pushop.stepsdone:
788 788 return
789 789 pushop.stepsdone.add('changesets')
790 790 # Send known heads to the server for race detection.
791 791 if not _pushcheckoutgoing(pushop):
792 792 return
793 793 pushop.repo.prepushoutgoinghooks(pushop)
794 794
795 795 _pushb2ctxcheckheads(pushop, bundler)
796 796
797 797 b2caps = bundle2.bundle2caps(pushop.remote)
798 798 version = '01'
799 799 cgversions = b2caps.get('changegroup')
800 800 if cgversions: # 3.1 and 3.2 ship with an empty value
801 801 cgversions = [v for v in cgversions
802 802 if v in changegroup.supportedoutgoingversions(
803 803 pushop.repo)]
804 804 if not cgversions:
805 805 raise ValueError(_('no common changegroup version'))
806 806 version = max(cgversions)
807 807 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
808 808 'push')
809 809 cgpart = bundler.newpart('changegroup', data=cgstream)
810 810 if cgversions:
811 811 cgpart.addparam('version', version)
812 812 if 'treemanifest' in pushop.repo.requirements:
813 813 cgpart.addparam('treemanifest', '1')
814 814 def handlereply(op):
815 815 """extract addchangegroup returns from server reply"""
816 816 cgreplies = op.records.getreplies(cgpart.id)
817 817 assert len(cgreplies['changegroup']) == 1
818 818 pushop.cgresult = cgreplies['changegroup'][0]['return']
819 819 return handlereply
820 820
821 821 @b2partsgenerator('phase')
822 822 def _pushb2phases(pushop, bundler):
823 823 """handle phase push through bundle2"""
824 824 if 'phases' in pushop.stepsdone:
825 825 return
826 826 b2caps = bundle2.bundle2caps(pushop.remote)
827 827 ui = pushop.repo.ui
828 828
829 829 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
830 830 haspushkey = 'pushkey' in b2caps
831 831 hasphaseheads = 'heads' in b2caps.get('phases', ())
832 832
833 833 if hasphaseheads and not legacyphase:
834 834 return _pushb2phaseheads(pushop, bundler)
835 835 elif haspushkey:
836 836 return _pushb2phasespushkey(pushop, bundler)
837 837
838 838 def _pushb2phaseheads(pushop, bundler):
839 839 """push phase information through a bundle2 - binary part"""
840 840 pushop.stepsdone.add('phases')
841 841 if pushop.outdatedphases:
842 842 updates = [[] for p in phases.allphases]
843 843 updates[0].extend(h.node() for h in pushop.outdatedphases)
844 844 phasedata = phases.binaryencode(updates)
845 845 bundler.newpart('phase-heads', data=phasedata)
846 846
847 847 def _pushb2phasespushkey(pushop, bundler):
848 848 """push phase information through a bundle2 - pushkey part"""
849 849 pushop.stepsdone.add('phases')
850 850 part2node = []
851 851
852 852 def handlefailure(pushop, exc):
853 853 targetid = int(exc.partid)
854 854 for partid, node in part2node:
855 855 if partid == targetid:
856 856 raise error.Abort(_('updating %s to public failed') % node)
857 857
858 858 enc = pushkey.encode
859 859 for newremotehead in pushop.outdatedphases:
860 860 part = bundler.newpart('pushkey')
861 861 part.addparam('namespace', enc('phases'))
862 862 part.addparam('key', enc(newremotehead.hex()))
863 863 part.addparam('old', enc('%d' % phases.draft))
864 864 part.addparam('new', enc('%d' % phases.public))
865 865 part2node.append((part.id, newremotehead))
866 866 pushop.pkfailcb[part.id] = handlefailure
867 867
868 868 def handlereply(op):
869 869 for partid, node in part2node:
870 870 partrep = op.records.getreplies(partid)
871 871 results = partrep['pushkey']
872 872 assert len(results) <= 1
873 873 msg = None
874 874 if not results:
875 875 msg = _('server ignored update of %s to public!\n') % node
876 876 elif not int(results[0]['return']):
877 877 msg = _('updating %s to public failed!\n') % node
878 878 if msg is not None:
879 879 pushop.ui.warn(msg)
880 880 return handlereply
881 881
882 882 @b2partsgenerator('obsmarkers')
883 883 def _pushb2obsmarkers(pushop, bundler):
884 884 if 'obsmarkers' in pushop.stepsdone:
885 885 return
886 886 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
887 887 if obsolete.commonversion(remoteversions) is None:
888 888 return
889 889 pushop.stepsdone.add('obsmarkers')
890 890 if pushop.outobsmarkers:
891 891 markers = sorted(pushop.outobsmarkers)
892 892 bundle2.buildobsmarkerspart(bundler, markers)
893 893
894 894 @b2partsgenerator('bookmarks')
895 895 def _pushb2bookmarks(pushop, bundler):
896 896 """handle bookmark push through bundle2"""
897 897 if 'bookmarks' in pushop.stepsdone:
898 898 return
899 899 b2caps = bundle2.bundle2caps(pushop.remote)
900 900
901 901 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
902 902 legacybooks = 'bookmarks' in legacy
903 903
904 904 if not legacybooks and 'bookmarks' in b2caps:
905 905 return _pushb2bookmarkspart(pushop, bundler)
906 906 elif 'pushkey' in b2caps:
907 907 return _pushb2bookmarkspushkey(pushop, bundler)
908 908
909 909 def _bmaction(old, new):
910 910 """small utility for bookmark pushing"""
911 911 if not old:
912 912 return 'export'
913 913 elif not new:
914 914 return 'delete'
915 915 return 'update'
916 916
917 917 def _pushb2bookmarkspart(pushop, bundler):
918 918 pushop.stepsdone.add('bookmarks')
919 919 if not pushop.outbookmarks:
920 920 return
921 921
922 922 allactions = []
923 923 data = []
924 924 for book, old, new in pushop.outbookmarks:
925 925 new = bin(new)
926 926 data.append((book, new))
927 927 allactions.append((book, _bmaction(old, new)))
928 928 checkdata = bookmod.binaryencode(data)
929 929 bundler.newpart('bookmarks', data=checkdata)
930 930
931 931 def handlereply(op):
932 932 ui = pushop.ui
933 933 # if success
934 934 for book, action in allactions:
935 935 ui.status(bookmsgmap[action][0] % book)
936 936
937 937 return handlereply
938 938
939 939 def _pushb2bookmarkspushkey(pushop, bundler):
940 940 pushop.stepsdone.add('bookmarks')
941 941 part2book = []
942 942 enc = pushkey.encode
943 943
944 944 def handlefailure(pushop, exc):
945 945 targetid = int(exc.partid)
946 946 for partid, book, action in part2book:
947 947 if partid == targetid:
948 948 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
949 949 # we should not be called for part we did not generated
950 950 assert False
951 951
952 952 for book, old, new in pushop.outbookmarks:
953 953 part = bundler.newpart('pushkey')
954 954 part.addparam('namespace', enc('bookmarks'))
955 955 part.addparam('key', enc(book))
956 956 part.addparam('old', enc(old))
957 957 part.addparam('new', enc(new))
958 958 action = 'update'
959 959 if not old:
960 960 action = 'export'
961 961 elif not new:
962 962 action = 'delete'
963 963 part2book.append((part.id, book, action))
964 964 pushop.pkfailcb[part.id] = handlefailure
965 965
966 966 def handlereply(op):
967 967 ui = pushop.ui
968 968 for partid, book, action in part2book:
969 969 partrep = op.records.getreplies(partid)
970 970 results = partrep['pushkey']
971 971 assert len(results) <= 1
972 972 if not results:
973 973 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
974 974 else:
975 975 ret = int(results[0]['return'])
976 976 if ret:
977 977 ui.status(bookmsgmap[action][0] % book)
978 978 else:
979 979 ui.warn(bookmsgmap[action][1] % book)
980 980 if pushop.bkresult is not None:
981 981 pushop.bkresult = 1
982 982 return handlereply
983 983
984 984 @b2partsgenerator('pushvars', idx=0)
985 985 def _getbundlesendvars(pushop, bundler):
986 986 '''send shellvars via bundle2'''
987 987 pushvars = pushop.pushvars
988 988 if pushvars:
989 989 shellvars = {}
990 990 for raw in pushvars:
991 991 if '=' not in raw:
992 992 msg = ("unable to parse variable '%s', should follow "
993 993 "'KEY=VALUE' or 'KEY=' format")
994 994 raise error.Abort(msg % raw)
995 995 k, v = raw.split('=', 1)
996 996 shellvars[k] = v
997 997
998 998 part = bundler.newpart('pushvars')
999 999
1000 1000 for key, value in shellvars.iteritems():
1001 1001 part.addparam(key, value, mandatory=False)
1002 1002
1003 1003 def _pushbundle2(pushop):
1004 1004 """push data to the remote using bundle2
1005 1005
1006 1006 The only currently supported type of data is changegroup but this will
1007 1007 evolve in the future."""
1008 1008 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1009 1009 pushback = (pushop.trmanager
1010 1010 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1011 1011
1012 1012 # create reply capability
1013 1013 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1014 1014 allowpushback=pushback))
1015 1015 bundler.newpart('replycaps', data=capsblob)
1016 1016 replyhandlers = []
1017 1017 for partgenname in b2partsgenorder:
1018 1018 partgen = b2partsgenmapping[partgenname]
1019 1019 ret = partgen(pushop, bundler)
1020 1020 if callable(ret):
1021 1021 replyhandlers.append(ret)
1022 1022 # do not push if nothing to push
1023 1023 if bundler.nbparts <= 1:
1024 1024 return
1025 1025 stream = util.chunkbuffer(bundler.getchunks())
1026 1026 try:
1027 1027 try:
1028 1028 reply = pushop.remote.unbundle(
1029 1029 stream, ['force'], pushop.remote.url())
1030 1030 except error.BundleValueError as exc:
1031 1031 raise error.Abort(_('missing support for %s') % exc)
1032 1032 try:
1033 1033 trgetter = None
1034 1034 if pushback:
1035 1035 trgetter = pushop.trmanager.transaction
1036 1036 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1037 1037 except error.BundleValueError as exc:
1038 1038 raise error.Abort(_('missing support for %s') % exc)
1039 1039 except bundle2.AbortFromPart as exc:
1040 1040 pushop.ui.status(_('remote: %s\n') % exc)
1041 1041 if exc.hint is not None:
1042 1042 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1043 1043 raise error.Abort(_('push failed on remote'))
1044 1044 except error.PushkeyFailed as exc:
1045 1045 partid = int(exc.partid)
1046 1046 if partid not in pushop.pkfailcb:
1047 1047 raise
1048 1048 pushop.pkfailcb[partid](pushop, exc)
1049 1049 for rephand in replyhandlers:
1050 1050 rephand(op)
1051 1051
1052 1052 def _pushchangeset(pushop):
1053 1053 """Make the actual push of changeset bundle to remote repo"""
1054 1054 if 'changesets' in pushop.stepsdone:
1055 1055 return
1056 1056 pushop.stepsdone.add('changesets')
1057 1057 if not _pushcheckoutgoing(pushop):
1058 1058 return
1059 1059
1060 1060 # Should have verified this in push().
1061 1061 assert pushop.remote.capable('unbundle')
1062 1062
1063 1063 pushop.repo.prepushoutgoinghooks(pushop)
1064 1064 outgoing = pushop.outgoing
1065 1065 # TODO: get bundlecaps from remote
1066 1066 bundlecaps = None
1067 1067 # create a changegroup from local
1068 1068 if pushop.revs is None and not (outgoing.excluded
1069 1069 or pushop.repo.changelog.filteredrevs):
1070 1070 # push everything,
1071 1071 # use the fast path, no race possible on push
1072 1072 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1073 1073 fastpath=True, bundlecaps=bundlecaps)
1074 1074 else:
1075 1075 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1076 1076 'push', bundlecaps=bundlecaps)
1077 1077
1078 1078 # apply changegroup to remote
1079 1079 # local repo finds heads on server, finds out what
1080 1080 # revs it must push. once revs transferred, if server
1081 1081 # finds it has different heads (someone else won
1082 1082 # commit/push race), server aborts.
1083 1083 if pushop.force:
1084 1084 remoteheads = ['force']
1085 1085 else:
1086 1086 remoteheads = pushop.remoteheads
1087 1087 # ssh: return remote's addchangegroup()
1088 1088 # http: return remote's addchangegroup() or 0 for error
1089 1089 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1090 1090 pushop.repo.url())
1091 1091
1092 1092 def _pushsyncphase(pushop):
1093 1093 """synchronise phase information locally and remotely"""
1094 1094 cheads = pushop.commonheads
1095 1095 # even when we don't push, exchanging phase data is useful
1096 1096 remotephases = pushop.remote.listkeys('phases')
1097 1097 if (pushop.ui.configbool('ui', '_usedassubrepo')
1098 1098 and remotephases # server supports phases
1099 1099 and pushop.cgresult is None # nothing was pushed
1100 1100 and remotephases.get('publishing', False)):
1101 1101 # When:
1102 1102 # - this is a subrepo push
1103 1103 # - and remote support phase
1104 1104 # - and no changeset was pushed
1105 1105 # - and remote is publishing
1106 1106 # We may be in issue 3871 case!
1107 1107 # We drop the possible phase synchronisation done by
1108 1108 # courtesy to publish changesets possibly locally draft
1109 1109 # on the remote.
1110 1110 remotephases = {'publishing': 'True'}
1111 1111 if not remotephases: # old server or public only reply from non-publishing
1112 1112 _localphasemove(pushop, cheads)
1113 1113 # don't push any phase data as there is nothing to push
1114 1114 else:
1115 1115 ana = phases.analyzeremotephases(pushop.repo, cheads,
1116 1116 remotephases)
1117 1117 pheads, droots = ana
1118 1118 ### Apply remote phase on local
1119 1119 if remotephases.get('publishing', False):
1120 1120 _localphasemove(pushop, cheads)
1121 1121 else: # publish = False
1122 1122 _localphasemove(pushop, pheads)
1123 1123 _localphasemove(pushop, cheads, phases.draft)
1124 1124 ### Apply local phase on remote
1125 1125
1126 1126 if pushop.cgresult:
1127 1127 if 'phases' in pushop.stepsdone:
1128 1128 # phases already pushed though bundle2
1129 1129 return
1130 1130 outdated = pushop.outdatedphases
1131 1131 else:
1132 1132 outdated = pushop.fallbackoutdatedphases
1133 1133
1134 1134 pushop.stepsdone.add('phases')
1135 1135
1136 1136 # filter heads already turned public by the push
1137 1137 outdated = [c for c in outdated if c.node() not in pheads]
1138 1138 # fallback to independent pushkey command
1139 1139 for newremotehead in outdated:
1140 1140 r = pushop.remote.pushkey('phases',
1141 1141 newremotehead.hex(),
1142 1142 str(phases.draft),
1143 1143 str(phases.public))
1144 1144 if not r:
1145 1145 pushop.ui.warn(_('updating %s to public failed!\n')
1146 1146 % newremotehead)
1147 1147
1148 1148 def _localphasemove(pushop, nodes, phase=phases.public):
1149 1149 """move <nodes> to <phase> in the local source repo"""
1150 1150 if pushop.trmanager:
1151 1151 phases.advanceboundary(pushop.repo,
1152 1152 pushop.trmanager.transaction(),
1153 1153 phase,
1154 1154 nodes)
1155 1155 else:
1156 1156 # repo is not locked, do not change any phases!
1157 1157 # Informs the user that phases should have been moved when
1158 1158 # applicable.
1159 1159 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1160 1160 phasestr = phases.phasenames[phase]
1161 1161 if actualmoves:
1162 1162 pushop.ui.status(_('cannot lock source repo, skipping '
1163 1163 'local %s phase update\n') % phasestr)
1164 1164
1165 1165 def _pushobsolete(pushop):
1166 1166 """utility function to push obsolete markers to a remote"""
1167 1167 if 'obsmarkers' in pushop.stepsdone:
1168 1168 return
1169 1169 repo = pushop.repo
1170 1170 remote = pushop.remote
1171 1171 pushop.stepsdone.add('obsmarkers')
1172 1172 if pushop.outobsmarkers:
1173 1173 pushop.ui.debug('try to push obsolete markers to remote\n')
1174 1174 rslts = []
1175 1175 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1176 1176 for key in sorted(remotedata, reverse=True):
1177 1177 # reverse sort to ensure we end with dump0
1178 1178 data = remotedata[key]
1179 1179 rslts.append(remote.pushkey('obsolete', key, '', data))
1180 1180 if [r for r in rslts if not r]:
1181 1181 msg = _('failed to push some obsolete markers!\n')
1182 1182 repo.ui.warn(msg)
1183 1183
1184 1184 def _pushbookmark(pushop):
1185 1185 """Update bookmark position on remote"""
1186 1186 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1187 1187 return
1188 1188 pushop.stepsdone.add('bookmarks')
1189 1189 ui = pushop.ui
1190 1190 remote = pushop.remote
1191 1191
1192 1192 for b, old, new in pushop.outbookmarks:
1193 1193 action = 'update'
1194 1194 if not old:
1195 1195 action = 'export'
1196 1196 elif not new:
1197 1197 action = 'delete'
1198 1198 if remote.pushkey('bookmarks', b, old, new):
1199 1199 ui.status(bookmsgmap[action][0] % b)
1200 1200 else:
1201 1201 ui.warn(bookmsgmap[action][1] % b)
1202 1202 # discovery can have set the value form invalid entry
1203 1203 if pushop.bkresult is not None:
1204 1204 pushop.bkresult = 1
1205 1205
1206 1206 class pulloperation(object):
1207 1207 """A object that represent a single pull operation
1208 1208
1209 1209 It purpose is to carry pull related state and very common operation.
1210 1210
1211 1211 A new should be created at the beginning of each pull and discarded
1212 1212 afterward.
1213 1213 """
1214 1214
1215 1215 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1216 1216 remotebookmarks=None, streamclonerequested=None):
1217 1217 # repo we pull into
1218 1218 self.repo = repo
1219 1219 # repo we pull from
1220 1220 self.remote = remote
1221 1221 # revision we try to pull (None is "all")
1222 1222 self.heads = heads
1223 1223 # bookmark pulled explicitly
1224 1224 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1225 1225 for bookmark in bookmarks]
1226 1226 # do we force pull?
1227 1227 self.force = force
1228 1228 # whether a streaming clone was requested
1229 1229 self.streamclonerequested = streamclonerequested
1230 1230 # transaction manager
1231 1231 self.trmanager = None
1232 1232 # set of common changeset between local and remote before pull
1233 1233 self.common = None
1234 1234 # set of pulled head
1235 1235 self.rheads = None
1236 1236 # list of missing changeset to fetch remotely
1237 1237 self.fetch = None
1238 1238 # remote bookmarks data
1239 1239 self.remotebookmarks = remotebookmarks
1240 1240 # result of changegroup pulling (used as return code by pull)
1241 1241 self.cgresult = None
1242 1242 # list of step already done
1243 1243 self.stepsdone = set()
1244 1244 # Whether we attempted a clone from pre-generated bundles.
1245 1245 self.clonebundleattempted = False
1246 1246
1247 1247 @util.propertycache
1248 1248 def pulledsubset(self):
1249 1249 """heads of the set of changeset target by the pull"""
1250 1250 # compute target subset
1251 1251 if self.heads is None:
1252 1252 # We pulled every thing possible
1253 1253 # sync on everything common
1254 1254 c = set(self.common)
1255 1255 ret = list(self.common)
1256 1256 for n in self.rheads:
1257 1257 if n not in c:
1258 1258 ret.append(n)
1259 1259 return ret
1260 1260 else:
1261 1261 # We pulled a specific subset
1262 1262 # sync on this subset
1263 1263 return self.heads
1264 1264
1265 1265 @util.propertycache
1266 1266 def canusebundle2(self):
1267 1267 return not _forcebundle1(self)
1268 1268
1269 1269 @util.propertycache
1270 1270 def remotebundle2caps(self):
1271 1271 return bundle2.bundle2caps(self.remote)
1272 1272
1273 1273 def gettransaction(self):
1274 1274 # deprecated; talk to trmanager directly
1275 1275 return self.trmanager.transaction()
1276 1276
1277 1277 class transactionmanager(util.transactional):
1278 1278 """An object to manage the life cycle of a transaction
1279 1279
1280 1280 It creates the transaction on demand and calls the appropriate hooks when
1281 1281 closing the transaction."""
1282 1282 def __init__(self, repo, source, url):
1283 1283 self.repo = repo
1284 1284 self.source = source
1285 1285 self.url = url
1286 1286 self._tr = None
1287 1287
1288 1288 def transaction(self):
1289 1289 """Return an open transaction object, constructing if necessary"""
1290 1290 if not self._tr:
1291 1291 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1292 1292 self._tr = self.repo.transaction(trname)
1293 1293 self._tr.hookargs['source'] = self.source
1294 1294 self._tr.hookargs['url'] = self.url
1295 1295 return self._tr
1296 1296
1297 1297 def close(self):
1298 1298 """close transaction if created"""
1299 1299 if self._tr is not None:
1300 1300 self._tr.close()
1301 1301
1302 1302 def release(self):
1303 1303 """release transaction if created"""
1304 1304 if self._tr is not None:
1305 1305 self._tr.release()
1306 1306
1307 1307 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1308 1308 streamclonerequested=None):
1309 1309 """Fetch repository data from a remote.
1310 1310
1311 1311 This is the main function used to retrieve data from a remote repository.
1312 1312
1313 1313 ``repo`` is the local repository to clone into.
1314 1314 ``remote`` is a peer instance.
1315 1315 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1316 1316 default) means to pull everything from the remote.
1317 1317 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1318 1318 default, all remote bookmarks are pulled.
1319 1319 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1320 1320 initialization.
1321 1321 ``streamclonerequested`` is a boolean indicating whether a "streaming
1322 1322 clone" is requested. A "streaming clone" is essentially a raw file copy
1323 1323 of revlogs from the server. This only works when the local repository is
1324 1324 empty. The default value of ``None`` means to respect the server
1325 1325 configuration for preferring stream clones.
1326 1326
1327 1327 Returns the ``pulloperation`` created for this pull.
1328 1328 """
1329 1329 if opargs is None:
1330 1330 opargs = {}
1331 1331 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1332 1332 streamclonerequested=streamclonerequested, **opargs)
1333 1333
1334 1334 peerlocal = pullop.remote.local()
1335 1335 if peerlocal:
1336 1336 missing = set(peerlocal.requirements) - pullop.repo.supported
1337 1337 if missing:
1338 1338 msg = _("required features are not"
1339 1339 " supported in the destination:"
1340 1340 " %s") % (', '.join(sorted(missing)))
1341 1341 raise error.Abort(msg)
1342 1342
1343 1343 wlock = lock = None
1344 1344 try:
1345 1345 wlock = pullop.repo.wlock()
1346 1346 lock = pullop.repo.lock()
1347 1347 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1348 1348 # This should ideally be in _pullbundle2(). However, it needs to run
1349 1349 # before discovery to avoid extra work.
1350 1350 _maybeapplyclonebundle(pullop)
1351 1351 streamclone.maybeperformlegacystreamclone(pullop)
1352 1352 _pulldiscovery(pullop)
1353 1353 if pullop.canusebundle2:
1354 1354 _pullbundle2(pullop)
1355 1355 _pullchangeset(pullop)
1356 1356 _pullphase(pullop)
1357 1357 _pullbookmarks(pullop)
1358 1358 _pullobsolete(pullop)
1359 1359 pullop.trmanager.close()
1360 1360 finally:
1361 1361 lockmod.release(pullop.trmanager, lock, wlock)
1362 1362
1363 1363 # storing remotenames
1364 1364 if repo.ui.configbool('experimental', 'remotenames'):
1365 1365 remotenames.pullremotenames(repo, remote)
1366 1366
1367 1367 return pullop
1368 1368
1369 1369 # list of steps to perform discovery before pull
1370 1370 pulldiscoveryorder = []
1371 1371
1372 1372 # Mapping between step name and function
1373 1373 #
1374 1374 # This exists to help extensions wrap steps if necessary
1375 1375 pulldiscoverymapping = {}
1376 1376
1377 1377 def pulldiscovery(stepname):
1378 1378 """decorator for function performing discovery before pull
1379 1379
1380 1380 The function is added to the step -> function mapping and appended to the
1381 1381 list of steps. Beware that decorated function will be added in order (this
1382 1382 may matter).
1383 1383
1384 1384 You can only use this decorator for a new step, if you want to wrap a step
1385 1385 from an extension, change the pulldiscovery dictionary directly."""
1386 1386 def dec(func):
1387 1387 assert stepname not in pulldiscoverymapping
1388 1388 pulldiscoverymapping[stepname] = func
1389 1389 pulldiscoveryorder.append(stepname)
1390 1390 return func
1391 1391 return dec
1392 1392
1393 1393 def _pulldiscovery(pullop):
1394 1394 """Run all discovery steps"""
1395 1395 for stepname in pulldiscoveryorder:
1396 1396 step = pulldiscoverymapping[stepname]
1397 1397 step(pullop)
1398 1398
1399 1399 @pulldiscovery('b1:bookmarks')
1400 1400 def _pullbookmarkbundle1(pullop):
1401 1401 """fetch bookmark data in bundle1 case
1402 1402
1403 1403 If not using bundle2, we have to fetch bookmarks before changeset
1404 1404 discovery to reduce the chance and impact of race conditions."""
1405 1405 if pullop.remotebookmarks is not None:
1406 1406 return
1407 1407 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1408 1408 # all known bundle2 servers now support listkeys, but lets be nice with
1409 1409 # new implementation.
1410 1410 return
1411 1411 books = pullop.remote.listkeys('bookmarks')
1412 1412 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1413 1413
1414 1414
1415 1415 @pulldiscovery('changegroup')
1416 1416 def _pulldiscoverychangegroup(pullop):
1417 1417 """discovery phase for the pull
1418 1418
1419 1419 Current handle changeset discovery only, will change handle all discovery
1420 1420 at some point."""
1421 1421 tmp = discovery.findcommonincoming(pullop.repo,
1422 1422 pullop.remote,
1423 1423 heads=pullop.heads,
1424 1424 force=pullop.force)
1425 1425 common, fetch, rheads = tmp
1426 1426 nm = pullop.repo.unfiltered().changelog.nodemap
1427 1427 if fetch and rheads:
1428 1428 # If a remote heads is filtered locally, put in back in common.
1429 1429 #
1430 1430 # This is a hackish solution to catch most of "common but locally
1431 1431 # hidden situation". We do not performs discovery on unfiltered
1432 1432 # repository because it end up doing a pathological amount of round
1433 1433 # trip for w huge amount of changeset we do not care about.
1434 1434 #
1435 1435 # If a set of such "common but filtered" changeset exist on the server
1436 1436 # but are not including a remote heads, we'll not be able to detect it,
1437 1437 scommon = set(common)
1438 1438 for n in rheads:
1439 1439 if n in nm:
1440 1440 if n not in scommon:
1441 1441 common.append(n)
1442 1442 if set(rheads).issubset(set(common)):
1443 1443 fetch = []
1444 1444 pullop.common = common
1445 1445 pullop.fetch = fetch
1446 1446 pullop.rheads = rheads
1447 1447
1448 1448 def _pullbundle2(pullop):
1449 1449 """pull data using bundle2
1450 1450
1451 1451 For now, the only supported data are changegroup."""
1452 1452 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1453 1453
1454 1454 # At the moment we don't do stream clones over bundle2. If that is
1455 1455 # implemented then here's where the check for that will go.
1456 1456 streaming = False
1457 1457
1458 1458 # pulling changegroup
1459 1459 pullop.stepsdone.add('changegroup')
1460 1460
1461 1461 kwargs['common'] = pullop.common
1462 1462 kwargs['heads'] = pullop.heads or pullop.rheads
1463 1463 kwargs['cg'] = pullop.fetch
1464 1464
1465 1465 ui = pullop.repo.ui
1466 1466 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1467 1467 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1468 1468 if (not legacyphase and hasbinaryphase):
1469 1469 kwargs['phases'] = True
1470 1470 pullop.stepsdone.add('phases')
1471 1471
1472 1472 if 'listkeys' in pullop.remotebundle2caps:
1473 1473 if 'phases' not in pullop.stepsdone:
1474 1474 kwargs['listkeys'] = ['phases']
1475 1475 if pullop.remotebookmarks is None:
1476 1476 # make sure to always includes bookmark data when migrating
1477 1477 # `hg incoming --bundle` to using this function.
1478 1478 kwargs.setdefault('listkeys', []).append('bookmarks')
1479 1479
1480 1480 # If this is a full pull / clone and the server supports the clone bundles
1481 1481 # feature, tell the server whether we attempted a clone bundle. The
1482 1482 # presence of this flag indicates the client supports clone bundles. This
1483 1483 # will enable the server to treat clients that support clone bundles
1484 1484 # differently from those that don't.
1485 1485 if (pullop.remote.capable('clonebundles')
1486 1486 and pullop.heads is None and list(pullop.common) == [nullid]):
1487 1487 kwargs['cbattempted'] = pullop.clonebundleattempted
1488 1488
1489 1489 if streaming:
1490 1490 pullop.repo.ui.status(_('streaming all changes\n'))
1491 1491 elif not pullop.fetch:
1492 1492 pullop.repo.ui.status(_("no changes found\n"))
1493 1493 pullop.cgresult = 0
1494 1494 else:
1495 1495 if pullop.heads is None and list(pullop.common) == [nullid]:
1496 1496 pullop.repo.ui.status(_("requesting all changes\n"))
1497 1497 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1498 1498 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1499 1499 if obsolete.commonversion(remoteversions) is not None:
1500 1500 kwargs['obsmarkers'] = True
1501 1501 pullop.stepsdone.add('obsmarkers')
1502 1502 _pullbundle2extraprepare(pullop, kwargs)
1503 1503 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1504 1504 try:
1505 1505 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1506 1506 except bundle2.AbortFromPart as exc:
1507 1507 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1508 1508 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1509 1509 except error.BundleValueError as exc:
1510 1510 raise error.Abort(_('missing support for %s') % exc)
1511 1511
1512 1512 if pullop.fetch:
1513 1513 pullop.cgresult = bundle2.combinechangegroupresults(op)
1514 1514
1515 1515 # processing phases change
1516 1516 for namespace, value in op.records['listkeys']:
1517 1517 if namespace == 'phases':
1518 1518 _pullapplyphases(pullop, value)
1519 1519
1520 1520 # processing bookmark update
1521 1521 for namespace, value in op.records['listkeys']:
1522 1522 if namespace == 'bookmarks':
1523 1523 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1524 1524
1525 1525 # bookmark data were either already there or pulled in the bundle
1526 1526 if pullop.remotebookmarks is not None:
1527 1527 _pullbookmarks(pullop)
1528 1528
1529 1529 def _pullbundle2extraprepare(pullop, kwargs):
1530 1530 """hook function so that extensions can extend the getbundle call"""
1531 1531
1532 1532 def _pullchangeset(pullop):
1533 1533 """pull changeset from unbundle into the local repo"""
1534 1534 # We delay the open of the transaction as late as possible so we
1535 1535 # don't open transaction for nothing or you break future useful
1536 1536 # rollback call
1537 1537 if 'changegroup' in pullop.stepsdone:
1538 1538 return
1539 1539 pullop.stepsdone.add('changegroup')
1540 1540 if not pullop.fetch:
1541 1541 pullop.repo.ui.status(_("no changes found\n"))
1542 1542 pullop.cgresult = 0
1543 1543 return
1544 1544 tr = pullop.gettransaction()
1545 1545 if pullop.heads is None and list(pullop.common) == [nullid]:
1546 1546 pullop.repo.ui.status(_("requesting all changes\n"))
1547 1547 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1548 1548 # issue1320, avoid a race if remote changed after discovery
1549 1549 pullop.heads = pullop.rheads
1550 1550
1551 1551 if pullop.remote.capable('getbundle'):
1552 1552 # TODO: get bundlecaps from remote
1553 1553 cg = pullop.remote.getbundle('pull', common=pullop.common,
1554 1554 heads=pullop.heads or pullop.rheads)
1555 1555 elif pullop.heads is None:
1556 1556 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1557 1557 elif not pullop.remote.capable('changegroupsubset'):
1558 1558 raise error.Abort(_("partial pull cannot be done because "
1559 1559 "other repository doesn't support "
1560 1560 "changegroupsubset."))
1561 1561 else:
1562 1562 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1563 1563 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1564 1564 pullop.remote.url())
1565 1565 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1566 1566
1567 1567 def _pullphase(pullop):
1568 1568 # Get remote phases data from remote
1569 1569 if 'phases' in pullop.stepsdone:
1570 1570 return
1571 1571 remotephases = pullop.remote.listkeys('phases')
1572 1572 _pullapplyphases(pullop, remotephases)
1573 1573
1574 1574 def _pullapplyphases(pullop, remotephases):
1575 1575 """apply phase movement from observed remote state"""
1576 1576 if 'phases' in pullop.stepsdone:
1577 1577 return
1578 1578 pullop.stepsdone.add('phases')
1579 1579 publishing = bool(remotephases.get('publishing', False))
1580 1580 if remotephases and not publishing:
1581 1581 # remote is new and non-publishing
1582 1582 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1583 1583 pullop.pulledsubset,
1584 1584 remotephases)
1585 1585 dheads = pullop.pulledsubset
1586 1586 else:
1587 1587 # Remote is old or publishing all common changesets
1588 1588 # should be seen as public
1589 1589 pheads = pullop.pulledsubset
1590 1590 dheads = []
1591 1591 unfi = pullop.repo.unfiltered()
1592 1592 phase = unfi._phasecache.phase
1593 1593 rev = unfi.changelog.nodemap.get
1594 1594 public = phases.public
1595 1595 draft = phases.draft
1596 1596
1597 1597 # exclude changesets already public locally and update the others
1598 1598 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1599 1599 if pheads:
1600 1600 tr = pullop.gettransaction()
1601 1601 phases.advanceboundary(pullop.repo, tr, public, pheads)
1602 1602
1603 1603 # exclude changesets already draft locally and update the others
1604 1604 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1605 1605 if dheads:
1606 1606 tr = pullop.gettransaction()
1607 1607 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1608 1608
1609 1609 def _pullbookmarks(pullop):
1610 1610 """process the remote bookmark information to update the local one"""
1611 1611 if 'bookmarks' in pullop.stepsdone:
1612 1612 return
1613 1613 pullop.stepsdone.add('bookmarks')
1614 1614 repo = pullop.repo
1615 1615 remotebookmarks = pullop.remotebookmarks
1616 1616 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1617 1617 pullop.remote.url(),
1618 1618 pullop.gettransaction,
1619 1619 explicit=pullop.explicitbookmarks)
1620 1620
1621 1621 def _pullobsolete(pullop):
1622 1622 """utility function to pull obsolete markers from a remote
1623 1623
1624 1624 The `gettransaction` is function that return the pull transaction, creating
1625 1625 one if necessary. We return the transaction to inform the calling code that
1626 1626 a new transaction have been created (when applicable).
1627 1627
1628 1628 Exists mostly to allow overriding for experimentation purpose"""
1629 1629 if 'obsmarkers' in pullop.stepsdone:
1630 1630 return
1631 1631 pullop.stepsdone.add('obsmarkers')
1632 1632 tr = None
1633 1633 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1634 1634 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1635 1635 remoteobs = pullop.remote.listkeys('obsolete')
1636 1636 if 'dump0' in remoteobs:
1637 1637 tr = pullop.gettransaction()
1638 1638 markers = []
1639 1639 for key in sorted(remoteobs, reverse=True):
1640 1640 if key.startswith('dump'):
1641 1641 data = util.b85decode(remoteobs[key])
1642 1642 version, newmarks = obsolete._readmarkers(data)
1643 1643 markers += newmarks
1644 1644 if markers:
1645 1645 pullop.repo.obsstore.add(tr, markers)
1646 1646 pullop.repo.invalidatevolatilesets()
1647 1647 return tr
1648 1648
1649 1649 def caps20to10(repo):
1650 1650 """return a set with appropriate options to use bundle20 during getbundle"""
1651 1651 caps = {'HG20'}
1652 1652 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1653 1653 caps.add('bundle2=' + urlreq.quote(capsblob))
1654 1654 return caps
1655 1655
1656 1656 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1657 1657 getbundle2partsorder = []
1658 1658
1659 1659 # Mapping between step name and function
1660 1660 #
1661 1661 # This exists to help extensions wrap steps if necessary
1662 1662 getbundle2partsmapping = {}
1663 1663
1664 1664 def getbundle2partsgenerator(stepname, idx=None):
1665 1665 """decorator for function generating bundle2 part for getbundle
1666 1666
1667 1667 The function is added to the step -> function mapping and appended to the
1668 1668 list of steps. Beware that decorated functions will be added in order
1669 1669 (this may matter).
1670 1670
1671 1671 You can only use this decorator for new steps, if you want to wrap a step
1672 1672 from an extension, attack the getbundle2partsmapping dictionary directly."""
1673 1673 def dec(func):
1674 1674 assert stepname not in getbundle2partsmapping
1675 1675 getbundle2partsmapping[stepname] = func
1676 1676 if idx is None:
1677 1677 getbundle2partsorder.append(stepname)
1678 1678 else:
1679 1679 getbundle2partsorder.insert(idx, stepname)
1680 1680 return func
1681 1681 return dec
1682 1682
1683 1683 def bundle2requested(bundlecaps):
1684 1684 if bundlecaps is not None:
1685 1685 return any(cap.startswith('HG2') for cap in bundlecaps)
1686 1686 return False
1687 1687
1688 1688 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1689 1689 **kwargs):
1690 1690 """Return chunks constituting a bundle's raw data.
1691 1691
1692 1692 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1693 1693 passed.
1694 1694
1695 1695 Returns an iterator over raw chunks (of varying sizes).
1696 1696 """
1697 1697 kwargs = pycompat.byteskwargs(kwargs)
1698 1698 usebundle2 = bundle2requested(bundlecaps)
1699 1699 # bundle10 case
1700 1700 if not usebundle2:
1701 1701 if bundlecaps and not kwargs.get('cg', True):
1702 1702 raise ValueError(_('request for bundle10 must include changegroup'))
1703 1703
1704 1704 if kwargs:
1705 1705 raise ValueError(_('unsupported getbundle arguments: %s')
1706 1706 % ', '.join(sorted(kwargs.keys())))
1707 1707 outgoing = _computeoutgoing(repo, heads, common)
1708 1708 return changegroup.makestream(repo, outgoing, '01', source,
1709 1709 bundlecaps=bundlecaps)
1710 1710
1711 1711 # bundle20 case
1712 1712 b2caps = {}
1713 1713 for bcaps in bundlecaps:
1714 1714 if bcaps.startswith('bundle2='):
1715 1715 blob = urlreq.unquote(bcaps[len('bundle2='):])
1716 1716 b2caps.update(bundle2.decodecaps(blob))
1717 1717 bundler = bundle2.bundle20(repo.ui, b2caps)
1718 1718
1719 1719 kwargs['heads'] = heads
1720 1720 kwargs['common'] = common
1721 1721
1722 1722 for name in getbundle2partsorder:
1723 1723 func = getbundle2partsmapping[name]
1724 1724 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1725 1725 **pycompat.strkwargs(kwargs))
1726 1726
1727 1727 return bundler.getchunks()
1728 1728
1729 1729 @getbundle2partsgenerator('changegroup')
1730 1730 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1731 1731 b2caps=None, heads=None, common=None, **kwargs):
1732 1732 """add a changegroup part to the requested bundle"""
1733 1733 cgstream = None
1734 1734 if kwargs.get('cg', True):
1735 1735 # build changegroup bundle here.
1736 1736 version = '01'
1737 1737 cgversions = b2caps.get('changegroup')
1738 1738 if cgversions: # 3.1 and 3.2 ship with an empty value
1739 1739 cgversions = [v for v in cgversions
1740 1740 if v in changegroup.supportedoutgoingversions(repo)]
1741 1741 if not cgversions:
1742 1742 raise ValueError(_('no common changegroup version'))
1743 1743 version = max(cgversions)
1744 1744 outgoing = _computeoutgoing(repo, heads, common)
1745 1745 if outgoing.missing:
1746 1746 cgstream = changegroup.makestream(repo, outgoing, version, source,
1747 1747 bundlecaps=bundlecaps)
1748 1748
1749 1749 if cgstream:
1750 1750 part = bundler.newpart('changegroup', data=cgstream)
1751 1751 if cgversions:
1752 1752 part.addparam('version', version)
1753 1753 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1754 1754 mandatory=False)
1755 1755 if 'treemanifest' in repo.requirements:
1756 1756 part.addparam('treemanifest', '1')
1757 1757
1758 @getbundle2partsgenerator('bookmarks')
1759 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1760 b2caps=None, **kwargs):
1761 """add a bookmark part to the requested bundle"""
1762 if not kwargs.get('bookmarks', False):
1763 return
1764 if 'bookmarks' not in b2caps:
1765 raise ValueError(_('no common bookmarks exchange method'))
1766 books = bookmod.listbinbookmarks(repo)
1767 data = bookmod.binaryencode(books)
1768 if data:
1769 bundler.newpart('bookmarks', data=data)
1770
1758 1771 @getbundle2partsgenerator('listkeys')
1759 1772 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1760 1773 b2caps=None, **kwargs):
1761 1774 """add parts containing listkeys namespaces to the requested bundle"""
1762 1775 listkeys = kwargs.get('listkeys', ())
1763 1776 for namespace in listkeys:
1764 1777 part = bundler.newpart('listkeys')
1765 1778 part.addparam('namespace', namespace)
1766 1779 keys = repo.listkeys(namespace).items()
1767 1780 part.data = pushkey.encodekeys(keys)
1768 1781
1769 1782 @getbundle2partsgenerator('obsmarkers')
1770 1783 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1771 1784 b2caps=None, heads=None, **kwargs):
1772 1785 """add an obsolescence markers part to the requested bundle"""
1773 1786 if kwargs.get('obsmarkers', False):
1774 1787 if heads is None:
1775 1788 heads = repo.heads()
1776 1789 subset = [c.node() for c in repo.set('::%ln', heads)]
1777 1790 markers = repo.obsstore.relevantmarkers(subset)
1778 1791 markers = sorted(markers)
1779 1792 bundle2.buildobsmarkerspart(bundler, markers)
1780 1793
1781 1794 @getbundle2partsgenerator('phases')
1782 1795 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1783 1796 b2caps=None, heads=None, **kwargs):
1784 1797 """add phase heads part to the requested bundle"""
1785 1798 if kwargs.get('phases', False):
1786 1799 if not 'heads' in b2caps.get('phases'):
1787 1800 raise ValueError(_('no common phases exchange method'))
1788 1801 if heads is None:
1789 1802 heads = repo.heads()
1790 1803
1791 1804 headsbyphase = collections.defaultdict(set)
1792 1805 if repo.publishing():
1793 1806 headsbyphase[phases.public] = heads
1794 1807 else:
1795 1808 # find the appropriate heads to move
1796 1809
1797 1810 phase = repo._phasecache.phase
1798 1811 node = repo.changelog.node
1799 1812 rev = repo.changelog.rev
1800 1813 for h in heads:
1801 1814 headsbyphase[phase(repo, rev(h))].add(h)
1802 1815 seenphases = list(headsbyphase.keys())
1803 1816
1804 1817 # We do not handle anything but public and draft phase for now)
1805 1818 if seenphases:
1806 1819 assert max(seenphases) <= phases.draft
1807 1820
1808 1821 # if client is pulling non-public changesets, we need to find
1809 1822 # intermediate public heads.
1810 1823 draftheads = headsbyphase.get(phases.draft, set())
1811 1824 if draftheads:
1812 1825 publicheads = headsbyphase.get(phases.public, set())
1813 1826
1814 1827 revset = 'heads(only(%ln, %ln) and public())'
1815 1828 extraheads = repo.revs(revset, draftheads, publicheads)
1816 1829 for r in extraheads:
1817 1830 headsbyphase[phases.public].add(node(r))
1818 1831
1819 1832 # transform data in a format used by the encoding function
1820 1833 phasemapping = []
1821 1834 for phase in phases.allphases:
1822 1835 phasemapping.append(sorted(headsbyphase[phase]))
1823 1836
1824 1837 # generate the actual part
1825 1838 phasedata = phases.binaryencode(phasemapping)
1826 1839 bundler.newpart('phase-heads', data=phasedata)
1827 1840
1828 1841 @getbundle2partsgenerator('hgtagsfnodes')
1829 1842 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1830 1843 b2caps=None, heads=None, common=None,
1831 1844 **kwargs):
1832 1845 """Transfer the .hgtags filenodes mapping.
1833 1846
1834 1847 Only values for heads in this bundle will be transferred.
1835 1848
1836 1849 The part data consists of pairs of 20 byte changeset node and .hgtags
1837 1850 filenodes raw values.
1838 1851 """
1839 1852 # Don't send unless:
1840 1853 # - changeset are being exchanged,
1841 1854 # - the client supports it.
1842 1855 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1843 1856 return
1844 1857
1845 1858 outgoing = _computeoutgoing(repo, heads, common)
1846 1859 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1847 1860
1848 1861 def check_heads(repo, their_heads, context):
1849 1862 """check if the heads of a repo have been modified
1850 1863
1851 1864 Used by peer for unbundling.
1852 1865 """
1853 1866 heads = repo.heads()
1854 1867 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1855 1868 if not (their_heads == ['force'] or their_heads == heads or
1856 1869 their_heads == ['hashed', heads_hash]):
1857 1870 # someone else committed/pushed/unbundled while we
1858 1871 # were transferring data
1859 1872 raise error.PushRaced('repository changed while %s - '
1860 1873 'please try again' % context)
1861 1874
1862 1875 def unbundle(repo, cg, heads, source, url):
1863 1876 """Apply a bundle to a repo.
1864 1877
1865 1878 this function makes sure the repo is locked during the application and have
1866 1879 mechanism to check that no push race occurred between the creation of the
1867 1880 bundle and its application.
1868 1881
1869 1882 If the push was raced as PushRaced exception is raised."""
1870 1883 r = 0
1871 1884 # need a transaction when processing a bundle2 stream
1872 1885 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1873 1886 lockandtr = [None, None, None]
1874 1887 recordout = None
1875 1888 # quick fix for output mismatch with bundle2 in 3.4
1876 1889 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1877 1890 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1878 1891 captureoutput = True
1879 1892 try:
1880 1893 # note: outside bundle1, 'heads' is expected to be empty and this
1881 1894 # 'check_heads' call wil be a no-op
1882 1895 check_heads(repo, heads, 'uploading changes')
1883 1896 # push can proceed
1884 1897 if not isinstance(cg, bundle2.unbundle20):
1885 1898 # legacy case: bundle1 (changegroup 01)
1886 1899 txnname = "\n".join([source, util.hidepassword(url)])
1887 1900 with repo.lock(), repo.transaction(txnname) as tr:
1888 1901 op = bundle2.applybundle(repo, cg, tr, source, url)
1889 1902 r = bundle2.combinechangegroupresults(op)
1890 1903 else:
1891 1904 r = None
1892 1905 try:
1893 1906 def gettransaction():
1894 1907 if not lockandtr[2]:
1895 1908 lockandtr[0] = repo.wlock()
1896 1909 lockandtr[1] = repo.lock()
1897 1910 lockandtr[2] = repo.transaction(source)
1898 1911 lockandtr[2].hookargs['source'] = source
1899 1912 lockandtr[2].hookargs['url'] = url
1900 1913 lockandtr[2].hookargs['bundle2'] = '1'
1901 1914 return lockandtr[2]
1902 1915
1903 1916 # Do greedy locking by default until we're satisfied with lazy
1904 1917 # locking.
1905 1918 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1906 1919 gettransaction()
1907 1920
1908 1921 op = bundle2.bundleoperation(repo, gettransaction,
1909 1922 captureoutput=captureoutput)
1910 1923 try:
1911 1924 op = bundle2.processbundle(repo, cg, op=op)
1912 1925 finally:
1913 1926 r = op.reply
1914 1927 if captureoutput and r is not None:
1915 1928 repo.ui.pushbuffer(error=True, subproc=True)
1916 1929 def recordout(output):
1917 1930 r.newpart('output', data=output, mandatory=False)
1918 1931 if lockandtr[2] is not None:
1919 1932 lockandtr[2].close()
1920 1933 except BaseException as exc:
1921 1934 exc.duringunbundle2 = True
1922 1935 if captureoutput and r is not None:
1923 1936 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1924 1937 def recordout(output):
1925 1938 part = bundle2.bundlepart('output', data=output,
1926 1939 mandatory=False)
1927 1940 parts.append(part)
1928 1941 raise
1929 1942 finally:
1930 1943 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1931 1944 if recordout is not None:
1932 1945 recordout(repo.ui.popbuffer())
1933 1946 return r
1934 1947
1935 1948 def _maybeapplyclonebundle(pullop):
1936 1949 """Apply a clone bundle from a remote, if possible."""
1937 1950
1938 1951 repo = pullop.repo
1939 1952 remote = pullop.remote
1940 1953
1941 1954 if not repo.ui.configbool('ui', 'clonebundles'):
1942 1955 return
1943 1956
1944 1957 # Only run if local repo is empty.
1945 1958 if len(repo):
1946 1959 return
1947 1960
1948 1961 if pullop.heads:
1949 1962 return
1950 1963
1951 1964 if not remote.capable('clonebundles'):
1952 1965 return
1953 1966
1954 1967 res = remote._call('clonebundles')
1955 1968
1956 1969 # If we call the wire protocol command, that's good enough to record the
1957 1970 # attempt.
1958 1971 pullop.clonebundleattempted = True
1959 1972
1960 1973 entries = parseclonebundlesmanifest(repo, res)
1961 1974 if not entries:
1962 1975 repo.ui.note(_('no clone bundles available on remote; '
1963 1976 'falling back to regular clone\n'))
1964 1977 return
1965 1978
1966 1979 entries = filterclonebundleentries(
1967 1980 repo, entries, streamclonerequested=pullop.streamclonerequested)
1968 1981
1969 1982 if not entries:
1970 1983 # There is a thundering herd concern here. However, if a server
1971 1984 # operator doesn't advertise bundles appropriate for its clients,
1972 1985 # they deserve what's coming. Furthermore, from a client's
1973 1986 # perspective, no automatic fallback would mean not being able to
1974 1987 # clone!
1975 1988 repo.ui.warn(_('no compatible clone bundles available on server; '
1976 1989 'falling back to regular clone\n'))
1977 1990 repo.ui.warn(_('(you may want to report this to the server '
1978 1991 'operator)\n'))
1979 1992 return
1980 1993
1981 1994 entries = sortclonebundleentries(repo.ui, entries)
1982 1995
1983 1996 url = entries[0]['URL']
1984 1997 repo.ui.status(_('applying clone bundle from %s\n') % url)
1985 1998 if trypullbundlefromurl(repo.ui, repo, url):
1986 1999 repo.ui.status(_('finished applying clone bundle\n'))
1987 2000 # Bundle failed.
1988 2001 #
1989 2002 # We abort by default to avoid the thundering herd of
1990 2003 # clients flooding a server that was expecting expensive
1991 2004 # clone load to be offloaded.
1992 2005 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1993 2006 repo.ui.warn(_('falling back to normal clone\n'))
1994 2007 else:
1995 2008 raise error.Abort(_('error applying bundle'),
1996 2009 hint=_('if this error persists, consider contacting '
1997 2010 'the server operator or disable clone '
1998 2011 'bundles via '
1999 2012 '"--config ui.clonebundles=false"'))
2000 2013
2001 2014 def parseclonebundlesmanifest(repo, s):
2002 2015 """Parses the raw text of a clone bundles manifest.
2003 2016
2004 2017 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2005 2018 to the URL and other keys are the attributes for the entry.
2006 2019 """
2007 2020 m = []
2008 2021 for line in s.splitlines():
2009 2022 fields = line.split()
2010 2023 if not fields:
2011 2024 continue
2012 2025 attrs = {'URL': fields[0]}
2013 2026 for rawattr in fields[1:]:
2014 2027 key, value = rawattr.split('=', 1)
2015 2028 key = urlreq.unquote(key)
2016 2029 value = urlreq.unquote(value)
2017 2030 attrs[key] = value
2018 2031
2019 2032 # Parse BUNDLESPEC into components. This makes client-side
2020 2033 # preferences easier to specify since you can prefer a single
2021 2034 # component of the BUNDLESPEC.
2022 2035 if key == 'BUNDLESPEC':
2023 2036 try:
2024 2037 comp, version, params = parsebundlespec(repo, value,
2025 2038 externalnames=True)
2026 2039 attrs['COMPRESSION'] = comp
2027 2040 attrs['VERSION'] = version
2028 2041 except error.InvalidBundleSpecification:
2029 2042 pass
2030 2043 except error.UnsupportedBundleSpecification:
2031 2044 pass
2032 2045
2033 2046 m.append(attrs)
2034 2047
2035 2048 return m
2036 2049
2037 2050 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2038 2051 """Remove incompatible clone bundle manifest entries.
2039 2052
2040 2053 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2041 2054 and returns a new list consisting of only the entries that this client
2042 2055 should be able to apply.
2043 2056
2044 2057 There is no guarantee we'll be able to apply all returned entries because
2045 2058 the metadata we use to filter on may be missing or wrong.
2046 2059 """
2047 2060 newentries = []
2048 2061 for entry in entries:
2049 2062 spec = entry.get('BUNDLESPEC')
2050 2063 if spec:
2051 2064 try:
2052 2065 comp, version, params = parsebundlespec(repo, spec, strict=True)
2053 2066
2054 2067 # If a stream clone was requested, filter out non-streamclone
2055 2068 # entries.
2056 2069 if streamclonerequested and (comp != 'UN' or version != 's1'):
2057 2070 repo.ui.debug('filtering %s because not a stream clone\n' %
2058 2071 entry['URL'])
2059 2072 continue
2060 2073
2061 2074 except error.InvalidBundleSpecification as e:
2062 2075 repo.ui.debug(str(e) + '\n')
2063 2076 continue
2064 2077 except error.UnsupportedBundleSpecification as e:
2065 2078 repo.ui.debug('filtering %s because unsupported bundle '
2066 2079 'spec: %s\n' % (entry['URL'], str(e)))
2067 2080 continue
2068 2081 # If we don't have a spec and requested a stream clone, we don't know
2069 2082 # what the entry is so don't attempt to apply it.
2070 2083 elif streamclonerequested:
2071 2084 repo.ui.debug('filtering %s because cannot determine if a stream '
2072 2085 'clone bundle\n' % entry['URL'])
2073 2086 continue
2074 2087
2075 2088 if 'REQUIRESNI' in entry and not sslutil.hassni:
2076 2089 repo.ui.debug('filtering %s because SNI not supported\n' %
2077 2090 entry['URL'])
2078 2091 continue
2079 2092
2080 2093 newentries.append(entry)
2081 2094
2082 2095 return newentries
2083 2096
2084 2097 class clonebundleentry(object):
2085 2098 """Represents an item in a clone bundles manifest.
2086 2099
2087 2100 This rich class is needed to support sorting since sorted() in Python 3
2088 2101 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2089 2102 won't work.
2090 2103 """
2091 2104
2092 2105 def __init__(self, value, prefers):
2093 2106 self.value = value
2094 2107 self.prefers = prefers
2095 2108
2096 2109 def _cmp(self, other):
2097 2110 for prefkey, prefvalue in self.prefers:
2098 2111 avalue = self.value.get(prefkey)
2099 2112 bvalue = other.value.get(prefkey)
2100 2113
2101 2114 # Special case for b missing attribute and a matches exactly.
2102 2115 if avalue is not None and bvalue is None and avalue == prefvalue:
2103 2116 return -1
2104 2117
2105 2118 # Special case for a missing attribute and b matches exactly.
2106 2119 if bvalue is not None and avalue is None and bvalue == prefvalue:
2107 2120 return 1
2108 2121
2109 2122 # We can't compare unless attribute present on both.
2110 2123 if avalue is None or bvalue is None:
2111 2124 continue
2112 2125
2113 2126 # Same values should fall back to next attribute.
2114 2127 if avalue == bvalue:
2115 2128 continue
2116 2129
2117 2130 # Exact matches come first.
2118 2131 if avalue == prefvalue:
2119 2132 return -1
2120 2133 if bvalue == prefvalue:
2121 2134 return 1
2122 2135
2123 2136 # Fall back to next attribute.
2124 2137 continue
2125 2138
2126 2139 # If we got here we couldn't sort by attributes and prefers. Fall
2127 2140 # back to index order.
2128 2141 return 0
2129 2142
2130 2143 def __lt__(self, other):
2131 2144 return self._cmp(other) < 0
2132 2145
2133 2146 def __gt__(self, other):
2134 2147 return self._cmp(other) > 0
2135 2148
2136 2149 def __eq__(self, other):
2137 2150 return self._cmp(other) == 0
2138 2151
2139 2152 def __le__(self, other):
2140 2153 return self._cmp(other) <= 0
2141 2154
2142 2155 def __ge__(self, other):
2143 2156 return self._cmp(other) >= 0
2144 2157
2145 2158 def __ne__(self, other):
2146 2159 return self._cmp(other) != 0
2147 2160
2148 2161 def sortclonebundleentries(ui, entries):
2149 2162 prefers = ui.configlist('ui', 'clonebundleprefers')
2150 2163 if not prefers:
2151 2164 return list(entries)
2152 2165
2153 2166 prefers = [p.split('=', 1) for p in prefers]
2154 2167
2155 2168 items = sorted(clonebundleentry(v, prefers) for v in entries)
2156 2169 return [i.value for i in items]
2157 2170
2158 2171 def trypullbundlefromurl(ui, repo, url):
2159 2172 """Attempt to apply a bundle from a URL."""
2160 2173 with repo.lock(), repo.transaction('bundleurl') as tr:
2161 2174 try:
2162 2175 fh = urlmod.open(ui, url)
2163 2176 cg = readbundle(ui, fh, 'stream')
2164 2177
2165 2178 if isinstance(cg, streamclone.streamcloneapplier):
2166 2179 cg.apply(repo)
2167 2180 else:
2168 2181 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2169 2182 return True
2170 2183 except urlerr.httperror as e:
2171 2184 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2172 2185 except urlerr.urlerror as e:
2173 2186 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2174 2187
2175 2188 return False
@@ -1,923 +1,925 b''
1 1 The Mercurial wire protocol is a request-response based protocol
2 2 with multiple wire representations.
3 3
4 4 Each request is modeled as a command name, a dictionary of arguments, and
5 5 optional raw input. Command arguments and their types are intrinsic
6 6 properties of commands. So is the response type of the command. This means
7 7 clients can't always send arbitrary arguments to servers and servers can't
8 8 return multiple response types.
9 9
10 10 The protocol is synchronous and does not support multiplexing (concurrent
11 11 commands).
12 12
13 13 Transport Protocols
14 14 ===================
15 15
16 16 HTTP Transport
17 17 --------------
18 18
19 19 Commands are issued as HTTP/1.0 or HTTP/1.1 requests. Commands are
20 20 sent to the base URL of the repository with the command name sent in
21 21 the ``cmd`` query string parameter. e.g.
22 22 ``https://example.com/repo?cmd=capabilities``. The HTTP method is ``GET``
23 23 or ``POST`` depending on the command and whether there is a request
24 24 body.
25 25
26 26 Command arguments can be sent multiple ways.
27 27
28 28 The simplest is part of the URL query string using ``x-www-form-urlencoded``
29 29 encoding (see Python's ``urllib.urlencode()``. However, many servers impose
30 30 length limitations on the URL. So this mechanism is typically only used if
31 31 the server doesn't support other mechanisms.
32 32
33 33 If the server supports the ``httpheader`` capability, command arguments can
34 34 be sent in HTTP request headers named ``X-HgArg-<N>`` where ``<N>`` is an
35 35 integer starting at 1. A ``x-www-form-urlencoded`` representation of the
36 36 arguments is obtained. This full string is then split into chunks and sent
37 37 in numbered ``X-HgArg-<N>`` headers. The maximum length of each HTTP header
38 38 is defined by the server in the ``httpheader`` capability value, which defaults
39 39 to ``1024``. The server reassembles the encoded arguments string by
40 40 concatenating the ``X-HgArg-<N>`` headers then URL decodes them into a
41 41 dictionary.
42 42
43 43 The list of ``X-HgArg-<N>`` headers should be added to the ``Vary`` request
44 44 header to instruct caches to take these headers into consideration when caching
45 45 requests.
46 46
47 47 If the server supports the ``httppostargs`` capability, the client
48 48 may send command arguments in the HTTP request body as part of an
49 49 HTTP POST request. The command arguments will be URL encoded just like
50 50 they would for sending them via HTTP headers. However, no splitting is
51 51 performed: the raw arguments are included in the HTTP request body.
52 52
53 53 The client sends a ``X-HgArgs-Post`` header with the string length of the
54 54 encoded arguments data. Additional data may be included in the HTTP
55 55 request body immediately following the argument data. The offset of the
56 56 non-argument data is defined by the ``X-HgArgs-Post`` header. The
57 57 ``X-HgArgs-Post`` header is not required if there is no argument data.
58 58
59 59 Additional command data can be sent as part of the HTTP request body. The
60 60 default ``Content-Type`` when sending data is ``application/mercurial-0.1``.
61 61 A ``Content-Length`` header is currently always sent.
62 62
63 63 Example HTTP requests::
64 64
65 65 GET /repo?cmd=capabilities
66 66 X-HgArg-1: foo=bar&baz=hello%20world
67 67
68 68 The request media type should be chosen based on server support. If the
69 69 ``httpmediatype`` server capability is present, the client should send
70 70 the newest mutually supported media type. If this capability is absent,
71 71 the client must assume the server only supports the
72 72 ``application/mercurial-0.1`` media type.
73 73
74 74 The ``Content-Type`` HTTP response header identifies the response as coming
75 75 from Mercurial and can also be used to signal an error has occurred.
76 76
77 77 The ``application/mercurial-*`` media types indicate a generic Mercurial
78 78 data type.
79 79
80 80 The ``application/mercurial-0.1`` media type is raw Mercurial data. It is the
81 81 predecessor of the format below.
82 82
83 83 The ``application/mercurial-0.2`` media type is compression framed Mercurial
84 84 data. The first byte of the payload indicates the length of the compression
85 85 format identifier that follows. Next are N bytes indicating the compression
86 86 format. e.g. ``zlib``. The remaining bytes are compressed according to that
87 87 compression format. The decompressed data behaves the same as with
88 88 ``application/mercurial-0.1``.
89 89
90 90 The ``application/hg-error`` media type indicates a generic error occurred.
91 91 The content of the HTTP response body typically holds text describing the
92 92 error.
93 93
94 94 The ``application/hg-changegroup`` media type indicates a changegroup response
95 95 type.
96 96
97 97 Clients also accept the ``text/plain`` media type. All other media
98 98 types should cause the client to error.
99 99
100 100 Behavior of media types is further described in the ``Content Negotiation``
101 101 section below.
102 102
103 103 Clients should issue a ``User-Agent`` request header that identifies the client.
104 104 The server should not use the ``User-Agent`` for feature detection.
105 105
106 106 A command returning a ``string`` response issues a
107 107 ``application/mercurial-0.*`` media type and the HTTP response body contains
108 108 the raw string value (after compression decoding, if used). A
109 109 ``Content-Length`` header is typically issued, but not required.
110 110
111 111 A command returning a ``stream`` response issues a
112 112 ``application/mercurial-0.*`` media type and the HTTP response is typically
113 113 using *chunked transfer* (``Transfer-Encoding: chunked``).
114 114
115 115 SSH Transport
116 116 =============
117 117
118 118 The SSH transport is a custom text-based protocol suitable for use over any
119 119 bi-directional stream transport. It is most commonly used with SSH.
120 120
121 121 A SSH transport server can be started with ``hg serve --stdio``. The stdin,
122 122 stderr, and stdout file descriptors of the started process are used to exchange
123 123 data. When Mercurial connects to a remote server over SSH, it actually starts
124 124 a ``hg serve --stdio`` process on the remote server.
125 125
126 126 Commands are issued by sending the command name followed by a trailing newline
127 127 ``\n`` to the server. e.g. ``capabilities\n``.
128 128
129 129 Command arguments are sent in the following format::
130 130
131 131 <argument> <length>\n<value>
132 132
133 133 That is, the argument string name followed by a space followed by the
134 134 integer length of the value (expressed as a string) followed by a newline
135 135 (``\n``) followed by the raw argument value.
136 136
137 137 Dictionary arguments are encoded differently::
138 138
139 139 <argument> <# elements>\n
140 140 <key1> <length1>\n<value1>
141 141 <key2> <length2>\n<value2>
142 142 ...
143 143
144 144 Non-argument data is sent immediately after the final argument value. It is
145 145 encoded in chunks::
146 146
147 147 <length>\n<data>
148 148
149 149 Each command declares a list of supported arguments and their types. If a
150 150 client sends an unknown argument to the server, the server should abort
151 151 immediately. The special argument ``*`` in a command's definition indicates
152 152 that all argument names are allowed.
153 153
154 154 The definition of supported arguments and types is initially made when a
155 155 new command is implemented. The client and server must initially independently
156 156 agree on the arguments and their types. This initial set of arguments can be
157 157 supplemented through the presence of *capabilities* advertised by the server.
158 158
159 159 Each command has a defined expected response type.
160 160
161 161 A ``string`` response type is a length framed value. The response consists of
162 162 the string encoded integer length of a value followed by a newline (``\n``)
163 163 followed by the value. Empty values are allowed (and are represented as
164 164 ``0\n``).
165 165
166 166 A ``stream`` response type consists of raw bytes of data. There is no framing.
167 167
168 168 A generic error response type is also supported. It consists of a an error
169 169 message written to ``stderr`` followed by ``\n-\n``. In addition, ``\n`` is
170 170 written to ``stdout``.
171 171
172 172 If the server receives an unknown command, it will send an empty ``string``
173 173 response.
174 174
175 175 The server terminates if it receives an empty command (a ``\n`` character).
176 176
177 177 Capabilities
178 178 ============
179 179
180 180 Servers advertise supported wire protocol features. This allows clients to
181 181 probe for server features before blindly calling a command or passing a
182 182 specific argument.
183 183
184 184 The server's features are exposed via a *capabilities* string. This is a
185 185 space-delimited string of tokens/features. Some features are single words
186 186 like ``lookup`` or ``batch``. Others are complicated key-value pairs
187 187 advertising sub-features. e.g. ``httpheader=2048``. When complex, non-word
188 188 values are used, each feature name can define its own encoding of sub-values.
189 189 Comma-delimited and ``x-www-form-urlencoded`` values are common.
190 190
191 191 The following document capabilities defined by the canonical Mercurial server
192 192 implementation.
193 193
194 194 batch
195 195 -----
196 196
197 197 Whether the server supports the ``batch`` command.
198 198
199 199 This capability/command was introduced in Mercurial 1.9 (released July 2011).
200 200
201 201 branchmap
202 202 ---------
203 203
204 204 Whether the server supports the ``branchmap`` command.
205 205
206 206 This capability/command was introduced in Mercurial 1.3 (released July 2009).
207 207
208 208 bundle2-exp
209 209 -----------
210 210
211 211 Precursor to ``bundle2`` capability that was used before bundle2 was a
212 212 stable feature.
213 213
214 214 This capability was introduced in Mercurial 3.0 behind an experimental
215 215 flag. This capability should not be observed in the wild.
216 216
217 217 bundle2
218 218 -------
219 219
220 220 Indicates whether the server supports the ``bundle2`` data exchange format.
221 221
222 222 The value of the capability is a URL quoted, newline (``\n``) delimited
223 223 list of keys or key-value pairs.
224 224
225 225 A key is simply a URL encoded string.
226 226
227 227 A key-value pair is a URL encoded key separated from a URL encoded value by
228 228 an ``=``. If the value is a list, elements are delimited by a ``,`` after
229 229 URL encoding.
230 230
231 231 For example, say we have the values::
232 232
233 233 {'HG20': [], 'changegroup': ['01', '02'], 'digests': ['sha1', 'sha512']}
234 234
235 235 We would first construct a string::
236 236
237 237 HG20\nchangegroup=01,02\ndigests=sha1,sha512
238 238
239 239 We would then URL quote this string::
240 240
241 241 HG20%0Achangegroup%3D01%2C02%0Adigests%3Dsha1%2Csha512
242 242
243 243 This capability was introduced in Mercurial 3.4 (released May 2015).
244 244
245 245 changegroupsubset
246 246 -----------------
247 247
248 248 Whether the server supports the ``changegroupsubset`` command.
249 249
250 250 This capability was introduced in Mercurial 0.9.2 (released December
251 251 2006).
252 252
253 253 This capability was introduced at the same time as the ``lookup``
254 254 capability/command.
255 255
256 256 compression
257 257 -----------
258 258
259 259 Declares support for negotiating compression formats.
260 260
261 261 Presence of this capability indicates the server supports dynamic selection
262 262 of compression formats based on the client request.
263 263
264 264 Servers advertising this capability are required to support the
265 265 ``application/mercurial-0.2`` media type in response to commands returning
266 266 streams. Servers may support this media type on any command.
267 267
268 268 The value of the capability is a comma-delimited list of strings declaring
269 269 supported compression formats. The order of the compression formats is in
270 270 server-preferred order, most preferred first.
271 271
272 272 The identifiers used by the official Mercurial distribution are:
273 273
274 274 bzip2
275 275 bzip2
276 276 none
277 277 uncompressed / raw data
278 278 zlib
279 279 zlib (no gzip header)
280 280 zstd
281 281 zstd
282 282
283 283 This capability was introduced in Mercurial 4.1 (released February 2017).
284 284
285 285 getbundle
286 286 ---------
287 287
288 288 Whether the server supports the ``getbundle`` command.
289 289
290 290 This capability was introduced in Mercurial 1.9 (released July 2011).
291 291
292 292 httpheader
293 293 ----------
294 294
295 295 Whether the server supports receiving command arguments via HTTP request
296 296 headers.
297 297
298 298 The value of the capability is an integer describing the max header
299 299 length that clients should send. Clients should ignore any content after a
300 300 comma in the value, as this is reserved for future use.
301 301
302 302 This capability was introduced in Mercurial 1.9 (released July 2011).
303 303
304 304 httpmediatype
305 305 -------------
306 306
307 307 Indicates which HTTP media types (``Content-Type`` header) the server is
308 308 capable of receiving and sending.
309 309
310 310 The value of the capability is a comma-delimited list of strings identifying
311 311 support for media type and transmission direction. The following strings may
312 312 be present:
313 313
314 314 0.1rx
315 315 Indicates server support for receiving ``application/mercurial-0.1`` media
316 316 types.
317 317
318 318 0.1tx
319 319 Indicates server support for sending ``application/mercurial-0.1`` media
320 320 types.
321 321
322 322 0.2rx
323 323 Indicates server support for receiving ``application/mercurial-0.2`` media
324 324 types.
325 325
326 326 0.2tx
327 327 Indicates server support for sending ``application/mercurial-0.2`` media
328 328 types.
329 329
330 330 minrx=X
331 331 Minimum media type version the server is capable of receiving. Value is a
332 332 string like ``0.2``.
333 333
334 334 This capability can be used by servers to limit connections from legacy
335 335 clients not using the latest supported media type. However, only clients
336 336 with knowledge of this capability will know to consult this value. This
337 337 capability is present so the client may issue a more user-friendly error
338 338 when the server has locked out a legacy client.
339 339
340 340 mintx=X
341 341 Minimum media type version the server is capable of sending. Value is a
342 342 string like ``0.1``.
343 343
344 344 Servers advertising support for the ``application/mercurial-0.2`` media type
345 345 should also advertise the ``compression`` capability.
346 346
347 347 This capability was introduced in Mercurial 4.1 (released February 2017).
348 348
349 349 httppostargs
350 350 ------------
351 351
352 352 **Experimental**
353 353
354 354 Indicates that the server supports and prefers clients send command arguments
355 355 via a HTTP POST request as part of the request body.
356 356
357 357 This capability was introduced in Mercurial 3.8 (released May 2016).
358 358
359 359 known
360 360 -----
361 361
362 362 Whether the server supports the ``known`` command.
363 363
364 364 This capability/command was introduced in Mercurial 1.9 (released July 2011).
365 365
366 366 lookup
367 367 ------
368 368
369 369 Whether the server supports the ``lookup`` command.
370 370
371 371 This capability was introduced in Mercurial 0.9.2 (released December
372 372 2006).
373 373
374 374 This capability was introduced at the same time as the ``changegroupsubset``
375 375 capability/command.
376 376
377 377 pushkey
378 378 -------
379 379
380 380 Whether the server supports the ``pushkey`` and ``listkeys`` commands.
381 381
382 382 This capability was introduced in Mercurial 1.6 (released July 2010).
383 383
384 384 standardbundle
385 385 --------------
386 386
387 387 **Unsupported**
388 388
389 389 This capability was introduced during the Mercurial 0.9.2 development cycle in
390 390 2006. It was never present in a release, as it was replaced by the ``unbundle``
391 391 capability. This capability should not be encountered in the wild.
392 392
393 393 stream-preferred
394 394 ----------------
395 395
396 396 If present the server prefers that clients clone using the streaming clone
397 397 protocol (``hg clone --stream``) rather than the standard
398 398 changegroup/bundle based protocol.
399 399
400 400 This capability was introduced in Mercurial 2.2 (released May 2012).
401 401
402 402 streamreqs
403 403 ----------
404 404
405 405 Indicates whether the server supports *streaming clones* and the *requirements*
406 406 that clients must support to receive it.
407 407
408 408 If present, the server supports the ``stream_out`` command, which transmits
409 409 raw revlogs from the repository instead of changegroups. This provides a faster
410 410 cloning mechanism at the expense of more bandwidth used.
411 411
412 412 The value of this capability is a comma-delimited list of repo format
413 413 *requirements*. These are requirements that impact the reading of data in
414 414 the ``.hg/store`` directory. An example value is
415 415 ``streamreqs=generaldelta,revlogv1`` indicating the server repo requires
416 416 the ``revlogv1`` and ``generaldelta`` requirements.
417 417
418 418 If the only format requirement is ``revlogv1``, the server may expose the
419 419 ``stream`` capability instead of the ``streamreqs`` capability.
420 420
421 421 This capability was introduced in Mercurial 1.7 (released November 2010).
422 422
423 423 stream
424 424 ------
425 425
426 426 Whether the server supports *streaming clones* from ``revlogv1`` repos.
427 427
428 428 If present, the server supports the ``stream_out`` command, which transmits
429 429 raw revlogs from the repository instead of changegroups. This provides a faster
430 430 cloning mechanism at the expense of more bandwidth used.
431 431
432 432 This capability was introduced in Mercurial 0.9.1 (released July 2006).
433 433
434 434 When initially introduced, the value of the capability was the numeric
435 435 revlog revision. e.g. ``stream=1``. This indicates the changegroup is using
436 436 ``revlogv1``. This simple integer value wasn't powerful enough, so the
437 437 ``streamreqs`` capability was invented to handle cases where the repo
438 438 requirements have more than just ``revlogv1``. Newer servers omit the
439 439 ``=1`` since it was the only value supported and the value of ``1`` can
440 440 be implied by clients.
441 441
442 442 unbundlehash
443 443 ------------
444 444
445 445 Whether the ``unbundle`` commands supports receiving a hash of all the
446 446 heads instead of a list.
447 447
448 448 For more, see the documentation for the ``unbundle`` command.
449 449
450 450 This capability was introduced in Mercurial 1.9 (released July 2011).
451 451
452 452 unbundle
453 453 --------
454 454
455 455 Whether the server supports pushing via the ``unbundle`` command.
456 456
457 457 This capability/command has been present since Mercurial 0.9.1 (released
458 458 July 2006).
459 459
460 460 Mercurial 0.9.2 (released December 2006) added values to the capability
461 461 indicating which bundle types the server supports receiving. This value is a
462 462 comma-delimited list. e.g. ``HG10GZ,HG10BZ,HG10UN``. The order of values
463 463 reflects the priority/preference of that type, where the first value is the
464 464 most preferred type.
465 465
466 466 Handshake Protocol
467 467 ==================
468 468
469 469 While not explicitly required, it is common for clients to perform a
470 470 *handshake* when connecting to a server. The handshake accomplishes 2 things:
471 471
472 472 * Obtaining capabilities and other server features
473 473 * Flushing extra server output (e.g. SSH servers may print extra text
474 474 when connecting that may confuse the wire protocol)
475 475
476 476 This isn't a traditional *handshake* as far as network protocols go because
477 477 there is no persistent state as a result of the handshake: the handshake is
478 478 simply the issuing of commands and commands are stateless.
479 479
480 480 The canonical clients perform a capabilities lookup at connection establishment
481 481 time. This is because clients must assume a server only supports the features
482 482 of the original Mercurial server implementation until proven otherwise (from
483 483 advertised capabilities). Nearly every server running today supports features
484 484 that weren't present in the original Mercurial server implementation. Rather
485 485 than wait for a client to perform functionality that needs to consult
486 486 capabilities, it issues the lookup at connection start to avoid any delay later.
487 487
488 488 For HTTP servers, the client sends a ``capabilities`` command request as
489 489 soon as the connection is established. The server responds with a capabilities
490 490 string, which the client parses.
491 491
492 492 For SSH servers, the client sends the ``hello`` command (no arguments)
493 493 and a ``between`` command with the ``pairs`` argument having the value
494 494 ``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``.
495 495
496 496 The ``between`` command has been supported since the original Mercurial
497 497 server. Requesting the empty range will return a ``\n`` string response,
498 498 which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline
499 499 followed by the value, which happens to be a newline).
500 500
501 501 The ``hello`` command was later introduced. Servers supporting it will issue
502 502 a response to that command before sending the ``1\n\n`` response to the
503 503 ``between`` command. Servers not supporting ``hello`` will send an empty
504 504 response (``0\n``).
505 505
506 506 In addition to the expected output from the ``hello`` and ``between`` commands,
507 507 servers may also send other output, such as *message of the day (MOTD)*
508 508 announcements. Clients assume servers will send this output before the
509 509 Mercurial server replies to the client-issued commands. So any server output
510 510 not conforming to the expected command responses is assumed to be not related
511 511 to Mercurial and can be ignored.
512 512
513 513 Content Negotiation
514 514 ===================
515 515
516 516 The wire protocol has some mechanisms to help peers determine what content
517 517 types and encoding the other side will accept. Historically, these mechanisms
518 518 have been built into commands themselves because most commands only send a
519 519 well-defined response type and only certain commands needed to support
520 520 functionality like compression.
521 521
522 522 Currently, only the HTTP transport supports content negotiation at the protocol
523 523 layer.
524 524
525 525 HTTP requests advertise supported response formats via the ``X-HgProto-<N>``
526 526 request header, where ``<N>`` is an integer starting at 1 allowing the logical
527 527 value to span multiple headers. This value consists of a list of
528 528 space-delimited parameters. Each parameter denotes a feature or capability.
529 529
530 530 The following parameters are defined:
531 531
532 532 0.1
533 533 Indicates the client supports receiving ``application/mercurial-0.1``
534 534 responses.
535 535
536 536 0.2
537 537 Indicates the client supports receiving ``application/mercurial-0.2``
538 538 responses.
539 539
540 540 comp
541 541 Indicates compression formats the client can decode. Value is a list of
542 542 comma delimited strings identifying compression formats ordered from
543 543 most preferential to least preferential. e.g. ``comp=zstd,zlib,none``.
544 544
545 545 This parameter does not have an effect if only the ``0.1`` parameter
546 546 is defined, as support for ``application/mercurial-0.2`` or greater is
547 547 required to use arbitrary compression formats.
548 548
549 549 If this parameter is not advertised, the server interprets this as
550 550 equivalent to ``zlib,none``.
551 551
552 552 Clients may choose to only send this header if the ``httpmediatype``
553 553 server capability is present, as currently all server-side features
554 554 consulting this header require the client to opt in to new protocol features
555 555 advertised via the ``httpmediatype`` capability.
556 556
557 557 A server that doesn't receive an ``X-HgProto-<N>`` header should infer a
558 558 value of ``0.1``. This is compatible with legacy clients.
559 559
560 560 A server receiving a request indicating support for multiple media type
561 561 versions may respond with any of the supported media types. Not all servers
562 562 may support all media types on all commands.
563 563
564 564 Commands
565 565 ========
566 566
567 567 This section contains a list of all wire protocol commands implemented by
568 568 the canonical Mercurial server.
569 569
570 570 batch
571 571 -----
572 572
573 573 Issue multiple commands while sending a single command request. The purpose
574 574 of this command is to allow a client to issue multiple commands while avoiding
575 575 multiple round trips to the server therefore enabling commands to complete
576 576 quicker.
577 577
578 578 The command accepts a ``cmds`` argument that contains a list of commands to
579 579 execute.
580 580
581 581 The value of ``cmds`` is a ``;`` delimited list of strings. Each string has the
582 582 form ``<command> <arguments>``. That is, the command name followed by a space
583 583 followed by an argument string.
584 584
585 585 The argument string is a ``,`` delimited list of ``<key>=<value>`` values
586 586 corresponding to command arguments. Both the argument name and value are
587 587 escaped using a special substitution map::
588 588
589 589 : -> :c
590 590 , -> :o
591 591 ; -> :s
592 592 = -> :e
593 593
594 594 The response type for this command is ``string``. The value contains a
595 595 ``;`` delimited list of responses for each requested command. Each value
596 596 in this list is escaped using the same substitution map used for arguments.
597 597
598 598 If an error occurs, the generic error response may be sent.
599 599
600 600 between
601 601 -------
602 602
603 603 (Legacy command used for discovery in old clients)
604 604
605 605 Obtain nodes between pairs of nodes.
606 606
607 607 The ``pairs`` arguments contains a space-delimited list of ``-`` delimited
608 608 hex node pairs. e.g.::
609 609
610 610 a072279d3f7fd3a4aa7ffa1a5af8efc573e1c896-6dc58916e7c070f678682bfe404d2e2d68291a18
611 611
612 612 Return type is a ``string``. Value consists of lines corresponding to each
613 613 requested range. Each line contains a space-delimited list of hex nodes.
614 614 A newline ``\n`` terminates each line, including the last one.
615 615
616 616 branchmap
617 617 ---------
618 618
619 619 Obtain heads in named branches.
620 620
621 621 Accepts no arguments. Return type is a ``string``.
622 622
623 623 Return value contains lines with URL encoded branch names followed by a space
624 624 followed by a space-delimited list of hex nodes of heads on that branch.
625 625 e.g.::
626 626
627 627 default a072279d3f7fd3a4aa7ffa1a5af8efc573e1c896 6dc58916e7c070f678682bfe404d2e2d68291a18
628 628 stable baae3bf31522f41dd5e6d7377d0edd8d1cf3fccc
629 629
630 630 There is no trailing newline.
631 631
632 632 branches
633 633 --------
634 634
635 635 (Legacy command used for discovery in old clients. Clients with ``getbundle``
636 636 use the ``known`` and ``heads`` commands instead.)
637 637
638 638 Obtain ancestor changesets of specific nodes back to a branch point.
639 639
640 640 Despite the name, this command has nothing to do with Mercurial named branches.
641 641 Instead, it is related to DAG branches.
642 642
643 643 The command accepts a ``nodes`` argument, which is a string of space-delimited
644 644 hex nodes.
645 645
646 646 For each node requested, the server will find the first ancestor node that is
647 647 a DAG root or is a merge.
648 648
649 649 Return type is a ``string``. Return value contains lines with result data for
650 650 each requested node. Each line contains space-delimited nodes followed by a
651 651 newline (``\n``). The 4 nodes reported on each line correspond to the requested
652 652 node, the ancestor node found, and its 2 parent nodes (which may be the null
653 653 node).
654 654
655 655 capabilities
656 656 ------------
657 657
658 658 Obtain the capabilities string for the repo.
659 659
660 660 Unlike the ``hello`` command, the capabilities string is not prefixed.
661 661 There is no trailing newline.
662 662
663 663 This command does not accept any arguments. Return type is a ``string``.
664 664
665 665 changegroup
666 666 -----------
667 667
668 668 (Legacy command: use ``getbundle`` instead)
669 669
670 670 Obtain a changegroup version 1 with data for changesets that are
671 671 descendants of client-specified changesets.
672 672
673 673 The ``roots`` arguments contains a list of space-delimited hex nodes.
674 674
675 675 The server responds with a changegroup version 1 containing all
676 676 changesets between the requested root/base nodes and the repo's head nodes
677 677 at the time of the request.
678 678
679 679 The return type is a ``stream``.
680 680
681 681 changegroupsubset
682 682 -----------------
683 683
684 684 (Legacy command: use ``getbundle`` instead)
685 685
686 686 Obtain a changegroup version 1 with data for changesetsets between
687 687 client specified base and head nodes.
688 688
689 689 The ``bases`` argument contains a list of space-delimited hex nodes.
690 690 The ``heads`` argument contains a list of space-delimited hex nodes.
691 691
692 692 The server responds with a changegroup version 1 containing all
693 693 changesets between the requested base and head nodes at the time of the
694 694 request.
695 695
696 696 The return type is a ``stream``.
697 697
698 698 clonebundles
699 699 ------------
700 700
701 701 Obtains a manifest of bundle URLs available to seed clones.
702 702
703 703 Each returned line contains a URL followed by metadata. See the
704 704 documentation in the ``clonebundles`` extension for more.
705 705
706 706 The return type is a ``string``.
707 707
708 708 getbundle
709 709 ---------
710 710
711 711 Obtain a bundle containing repository data.
712 712
713 713 This command accepts the following arguments:
714 714
715 715 heads
716 716 List of space-delimited hex nodes of heads to retrieve.
717 717 common
718 718 List of space-delimited hex nodes that the client has in common with the
719 719 server.
720 720 obsmarkers
721 721 Boolean indicating whether to include obsolescence markers as part
722 722 of the response. Only works with bundle2.
723 723 bundlecaps
724 724 Comma-delimited set of strings defining client bundle capabilities.
725 725 listkeys
726 726 Comma-delimited list of strings of ``pushkey`` namespaces. For each
727 727 namespace listed, a bundle2 part will be included with the content of
728 728 that namespace.
729 729 cg
730 730 Boolean indicating whether changegroup data is requested.
731 731 cbattempted
732 732 Boolean indicating whether the client attempted to use the *clone bundles*
733 733 feature before performing this request.
734 bookmarks
735 Boolean indicating whether bookmark data is requested.
734 736 phases
735 737 Boolean indicating whether phases data is requested.
736 738
737 739 The return type on success is a ``stream`` where the value is bundle.
738 740 On the HTTP transport, the response is zlib compressed.
739 741
740 742 If an error occurs, a generic error response can be sent.
741 743
742 744 Unless the client sends a false value for the ``cg`` argument, the returned
743 745 bundle contains a changegroup with the nodes between the specified ``common``
744 746 and ``heads`` nodes. Depending on the command arguments, the type and content
745 747 of the returned bundle can vary significantly.
746 748
747 749 The default behavior is for the server to send a raw changegroup version
748 750 ``01`` response.
749 751
750 752 If the ``bundlecaps`` provided by the client contain a value beginning
751 753 with ``HG2``, a bundle2 will be returned. The bundle2 data may contain
752 754 additional repository data, such as ``pushkey`` namespace values.
753 755
754 756 heads
755 757 -----
756 758
757 759 Returns a list of space-delimited hex nodes of repository heads followed
758 760 by a newline. e.g.
759 761 ``a9eeb3adc7ddb5006c088e9eda61791c777cbf7c 31f91a3da534dc849f0d6bfc00a395a97cf218a1\n``
760 762
761 763 This command does not accept any arguments. The return type is a ``string``.
762 764
763 765 hello
764 766 -----
765 767
766 768 Returns lines describing interesting things about the server in an RFC-822
767 769 like format.
768 770
769 771 Currently, the only line defines the server capabilities. It has the form::
770 772
771 773 capabilities: <value>
772 774
773 775 See above for more about the capabilities string.
774 776
775 777 SSH clients typically issue this command as soon as a connection is
776 778 established.
777 779
778 780 This command does not accept any arguments. The return type is a ``string``.
779 781
780 782 listkeys
781 783 --------
782 784
783 785 List values in a specified ``pushkey`` namespace.
784 786
785 787 The ``namespace`` argument defines the pushkey namespace to operate on.
786 788
787 789 The return type is a ``string``. The value is an encoded dictionary of keys.
788 790
789 791 Key-value pairs are delimited by newlines (``\n``). Within each line, keys and
790 792 values are separated by a tab (``\t``). Keys and values are both strings.
791 793
792 794 lookup
793 795 ------
794 796
795 797 Try to resolve a value to a known repository revision.
796 798
797 799 The ``key`` argument is converted from bytes to an
798 800 ``encoding.localstr`` instance then passed into
799 801 ``localrepository.__getitem__`` in an attempt to resolve it.
800 802
801 803 The return type is a ``string``.
802 804
803 805 Upon successful resolution, returns ``1 <hex node>\n``. On failure,
804 806 returns ``0 <error string>\n``. e.g.::
805 807
806 808 1 273ce12ad8f155317b2c078ec75a4eba507f1fba\n
807 809
808 810 0 unknown revision 'foo'\n
809 811
810 812 known
811 813 -----
812 814
813 815 Determine whether multiple nodes are known.
814 816
815 817 The ``nodes`` argument is a list of space-delimited hex nodes to check
816 818 for existence.
817 819
818 820 The return type is ``string``.
819 821
820 822 Returns a string consisting of ``0``s and ``1``s indicating whether nodes
821 823 are known. If the Nth node specified in the ``nodes`` argument is known,
822 824 a ``1`` will be returned at byte offset N. If the node isn't known, ``0``
823 825 will be present at byte offset N.
824 826
825 827 There is no trailing newline.
826 828
827 829 pushkey
828 830 -------
829 831
830 832 Set a value using the ``pushkey`` protocol.
831 833
832 834 Accepts arguments ``namespace``, ``key``, ``old``, and ``new``, which
833 835 correspond to the pushkey namespace to operate on, the key within that
834 836 namespace to change, the old value (which may be empty), and the new value.
835 837 All arguments are string types.
836 838
837 839 The return type is a ``string``. The value depends on the transport protocol.
838 840
839 841 The SSH transport sends a string encoded integer followed by a newline
840 842 (``\n``) which indicates operation result. The server may send additional
841 843 output on the ``stderr`` stream that should be displayed to the user.
842 844
843 845 The HTTP transport sends a string encoded integer followed by a newline
844 846 followed by additional server output that should be displayed to the user.
845 847 This may include output from hooks, etc.
846 848
847 849 The integer result varies by namespace. ``0`` means an error has occurred
848 850 and there should be additional output to display to the user.
849 851
850 852 stream_out
851 853 ----------
852 854
853 855 Obtain *streaming clone* data.
854 856
855 857 The return type is either a ``string`` or a ``stream``, depending on
856 858 whether the request was fulfilled properly.
857 859
858 860 A return value of ``1\n`` indicates the server is not configured to serve
859 861 this data. If this is seen by the client, they may not have verified the
860 862 ``stream`` capability is set before making the request.
861 863
862 864 A return value of ``2\n`` indicates the server was unable to lock the
863 865 repository to generate data.
864 866
865 867 All other responses are a ``stream`` of bytes. The first line of this data
866 868 contains 2 space-delimited integers corresponding to the path count and
867 869 payload size, respectively::
868 870
869 871 <path count> <payload size>\n
870 872
871 873 The ``<payload size>`` is the total size of path data: it does not include
872 874 the size of the per-path header lines.
873 875
874 876 Following that header are ``<path count>`` entries. Each entry consists of a
875 877 line with metadata followed by raw revlog data. The line consists of::
876 878
877 879 <store path>\0<size>\n
878 880
879 881 The ``<store path>`` is the encoded store path of the data that follows.
880 882 ``<size>`` is the amount of data for this store path/revlog that follows the
881 883 newline.
882 884
883 885 There is no trailer to indicate end of data. Instead, the client should stop
884 886 reading after ``<path count>`` entries are consumed.
885 887
886 888 unbundle
887 889 --------
888 890
889 891 Send a bundle containing data (usually changegroup data) to the server.
890 892
891 893 Accepts the argument ``heads``, which is a space-delimited list of hex nodes
892 894 corresponding to server repository heads observed by the client. This is used
893 895 to detect race conditions and abort push operations before a server performs
894 896 too much work or a client transfers too much data.
895 897
896 898 The request payload consists of a bundle to be applied to the repository,
897 899 similarly to as if :hg:`unbundle` were called.
898 900
899 901 In most scenarios, a special ``push response`` type is returned. This type
900 902 contains an integer describing the change in heads as a result of the
901 903 operation. A value of ``0`` indicates nothing changed. ``1`` means the number
902 904 of heads remained the same. Values ``2`` and larger indicate the number of
903 905 added heads minus 1. e.g. ``3`` means 2 heads were added. Negative values
904 906 indicate the number of fewer heads, also off by 1. e.g. ``-2`` means there
905 907 is 1 fewer head.
906 908
907 909 The encoding of the ``push response`` type varies by transport.
908 910
909 911 For the SSH transport, this type is composed of 2 ``string`` responses: an
910 912 empty response (``0\n``) followed by the integer result value. e.g.
911 913 ``1\n2``. So the full response might be ``0\n1\n2``.
912 914
913 915 For the HTTP transport, the response is a ``string`` type composed of an
914 916 integer result value followed by a newline (``\n``) followed by string
915 917 content holding server output that should be displayed on the client (output
916 918 hooks, etc).
917 919
918 920 In some cases, the server may respond with a ``bundle2`` bundle. In this
919 921 case, the response type is ``stream``. For the HTTP transport, the response
920 922 is zlib compressed.
921 923
922 924 The server may also respond with a generic error type, which contains a string
923 925 indicating the failure.
@@ -1,1069 +1,1070 b''
1 1 # wireproto.py - generic wire protocol support functions
2 2 #
3 3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11 import os
12 12 import tempfile
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20
21 21 from . import (
22 22 bundle2,
23 23 changegroup as changegroupmod,
24 24 discovery,
25 25 encoding,
26 26 error,
27 27 exchange,
28 28 peer,
29 29 pushkey as pushkeymod,
30 30 pycompat,
31 31 repository,
32 32 streamclone,
33 33 util,
34 34 )
35 35
36 36 urlerr = util.urlerr
37 37 urlreq = util.urlreq
38 38
39 39 bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
40 40 bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
41 41 'IncompatibleClient')
42 42 bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
43 43
44 44 class abstractserverproto(object):
45 45 """abstract class that summarizes the protocol API
46 46
47 47 Used as reference and documentation.
48 48 """
49 49
50 50 def getargs(self, args):
51 51 """return the value for arguments in <args>
52 52
53 53 returns a list of values (same order as <args>)"""
54 54 raise NotImplementedError()
55 55
56 56 def getfile(self, fp):
57 57 """write the whole content of a file into a file like object
58 58
59 59 The file is in the form::
60 60
61 61 (<chunk-size>\n<chunk>)+0\n
62 62
63 63 chunk size is the ascii version of the int.
64 64 """
65 65 raise NotImplementedError()
66 66
67 67 def redirect(self):
68 68 """may setup interception for stdout and stderr
69 69
70 70 See also the `restore` method."""
71 71 raise NotImplementedError()
72 72
73 73 # If the `redirect` function does install interception, the `restore`
74 74 # function MUST be defined. If interception is not used, this function
75 75 # MUST NOT be defined.
76 76 #
77 77 # left commented here on purpose
78 78 #
79 79 #def restore(self):
80 80 # """reinstall previous stdout and stderr and return intercepted stdout
81 81 # """
82 82 # raise NotImplementedError()
83 83
84 84 class remoteiterbatcher(peer.iterbatcher):
85 85 def __init__(self, remote):
86 86 super(remoteiterbatcher, self).__init__()
87 87 self._remote = remote
88 88
89 89 def __getattr__(self, name):
90 90 # Validate this method is batchable, since submit() only supports
91 91 # batchable methods.
92 92 fn = getattr(self._remote, name)
93 93 if not getattr(fn, 'batchable', None):
94 94 raise error.ProgrammingError('Attempted to batch a non-batchable '
95 95 'call to %r' % name)
96 96
97 97 return super(remoteiterbatcher, self).__getattr__(name)
98 98
99 99 def submit(self):
100 100 """Break the batch request into many patch calls and pipeline them.
101 101
102 102 This is mostly valuable over http where request sizes can be
103 103 limited, but can be used in other places as well.
104 104 """
105 105 # 2-tuple of (command, arguments) that represents what will be
106 106 # sent over the wire.
107 107 requests = []
108 108
109 109 # 4-tuple of (command, final future, @batchable generator, remote
110 110 # future).
111 111 results = []
112 112
113 113 for command, args, opts, finalfuture in self.calls:
114 114 mtd = getattr(self._remote, command)
115 115 batchable = mtd.batchable(mtd.__self__, *args, **opts)
116 116
117 117 commandargs, fremote = next(batchable)
118 118 assert fremote
119 119 requests.append((command, commandargs))
120 120 results.append((command, finalfuture, batchable, fremote))
121 121
122 122 if requests:
123 123 self._resultiter = self._remote._submitbatch(requests)
124 124
125 125 self._results = results
126 126
127 127 def results(self):
128 128 for command, finalfuture, batchable, remotefuture in self._results:
129 129 # Get the raw result, set it in the remote future, feed it
130 130 # back into the @batchable generator so it can be decoded, and
131 131 # set the result on the final future to this value.
132 132 remoteresult = next(self._resultiter)
133 133 remotefuture.set(remoteresult)
134 134 finalfuture.set(next(batchable))
135 135
136 136 # Verify our @batchable generators only emit 2 values.
137 137 try:
138 138 next(batchable)
139 139 except StopIteration:
140 140 pass
141 141 else:
142 142 raise error.ProgrammingError('%s @batchable generator emitted '
143 143 'unexpected value count' % command)
144 144
145 145 yield finalfuture.value
146 146
147 147 # Forward a couple of names from peer to make wireproto interactions
148 148 # slightly more sensible.
149 149 batchable = peer.batchable
150 150 future = peer.future
151 151
152 152 # list of nodes encoding / decoding
153 153
154 154 def decodelist(l, sep=' '):
155 155 if l:
156 156 return [bin(v) for v in l.split(sep)]
157 157 return []
158 158
159 159 def encodelist(l, sep=' '):
160 160 try:
161 161 return sep.join(map(hex, l))
162 162 except TypeError:
163 163 raise
164 164
165 165 # batched call argument encoding
166 166
167 167 def escapearg(plain):
168 168 return (plain
169 169 .replace(':', ':c')
170 170 .replace(',', ':o')
171 171 .replace(';', ':s')
172 172 .replace('=', ':e'))
173 173
174 174 def unescapearg(escaped):
175 175 return (escaped
176 176 .replace(':e', '=')
177 177 .replace(':s', ';')
178 178 .replace(':o', ',')
179 179 .replace(':c', ':'))
180 180
181 181 def encodebatchcmds(req):
182 182 """Return a ``cmds`` argument value for the ``batch`` command."""
183 183 cmds = []
184 184 for op, argsdict in req:
185 185 # Old servers didn't properly unescape argument names. So prevent
186 186 # the sending of argument names that may not be decoded properly by
187 187 # servers.
188 188 assert all(escapearg(k) == k for k in argsdict)
189 189
190 190 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
191 191 for k, v in argsdict.iteritems())
192 192 cmds.append('%s %s' % (op, args))
193 193
194 194 return ';'.join(cmds)
195 195
196 196 # mapping of options accepted by getbundle and their types
197 197 #
198 198 # Meant to be extended by extensions. It is extensions responsibility to ensure
199 199 # such options are properly processed in exchange.getbundle.
200 200 #
201 201 # supported types are:
202 202 #
203 203 # :nodes: list of binary nodes
204 204 # :csv: list of comma-separated values
205 205 # :scsv: list of comma-separated values return as set
206 206 # :plain: string with no transformation needed.
207 207 gboptsmap = {'heads': 'nodes',
208 'bookmarks': 'boolean',
208 209 'common': 'nodes',
209 210 'obsmarkers': 'boolean',
210 211 'phases': 'boolean',
211 212 'bundlecaps': 'scsv',
212 213 'listkeys': 'csv',
213 214 'cg': 'boolean',
214 215 'cbattempted': 'boolean'}
215 216
216 217 # client side
217 218
218 219 class wirepeer(repository.legacypeer):
219 220 """Client-side interface for communicating with a peer repository.
220 221
221 222 Methods commonly call wire protocol commands of the same name.
222 223
223 224 See also httppeer.py and sshpeer.py for protocol-specific
224 225 implementations of this interface.
225 226 """
226 227 # Begin of basewirepeer interface.
227 228
228 229 def iterbatch(self):
229 230 return remoteiterbatcher(self)
230 231
231 232 @batchable
232 233 def lookup(self, key):
233 234 self.requirecap('lookup', _('look up remote revision'))
234 235 f = future()
235 236 yield {'key': encoding.fromlocal(key)}, f
236 237 d = f.value
237 238 success, data = d[:-1].split(" ", 1)
238 239 if int(success):
239 240 yield bin(data)
240 241 else:
241 242 self._abort(error.RepoError(data))
242 243
243 244 @batchable
244 245 def heads(self):
245 246 f = future()
246 247 yield {}, f
247 248 d = f.value
248 249 try:
249 250 yield decodelist(d[:-1])
250 251 except ValueError:
251 252 self._abort(error.ResponseError(_("unexpected response:"), d))
252 253
253 254 @batchable
254 255 def known(self, nodes):
255 256 f = future()
256 257 yield {'nodes': encodelist(nodes)}, f
257 258 d = f.value
258 259 try:
259 260 yield [bool(int(b)) for b in d]
260 261 except ValueError:
261 262 self._abort(error.ResponseError(_("unexpected response:"), d))
262 263
263 264 @batchable
264 265 def branchmap(self):
265 266 f = future()
266 267 yield {}, f
267 268 d = f.value
268 269 try:
269 270 branchmap = {}
270 271 for branchpart in d.splitlines():
271 272 branchname, branchheads = branchpart.split(' ', 1)
272 273 branchname = encoding.tolocal(urlreq.unquote(branchname))
273 274 branchheads = decodelist(branchheads)
274 275 branchmap[branchname] = branchheads
275 276 yield branchmap
276 277 except TypeError:
277 278 self._abort(error.ResponseError(_("unexpected response:"), d))
278 279
279 280 @batchable
280 281 def listkeys(self, namespace):
281 282 if not self.capable('pushkey'):
282 283 yield {}, None
283 284 f = future()
284 285 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
285 286 yield {'namespace': encoding.fromlocal(namespace)}, f
286 287 d = f.value
287 288 self.ui.debug('received listkey for "%s": %i bytes\n'
288 289 % (namespace, len(d)))
289 290 yield pushkeymod.decodekeys(d)
290 291
291 292 @batchable
292 293 def pushkey(self, namespace, key, old, new):
293 294 if not self.capable('pushkey'):
294 295 yield False, None
295 296 f = future()
296 297 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
297 298 yield {'namespace': encoding.fromlocal(namespace),
298 299 'key': encoding.fromlocal(key),
299 300 'old': encoding.fromlocal(old),
300 301 'new': encoding.fromlocal(new)}, f
301 302 d = f.value
302 303 d, output = d.split('\n', 1)
303 304 try:
304 305 d = bool(int(d))
305 306 except ValueError:
306 307 raise error.ResponseError(
307 308 _('push failed (unexpected response):'), d)
308 309 for l in output.splitlines(True):
309 310 self.ui.status(_('remote: '), l)
310 311 yield d
311 312
312 313 def stream_out(self):
313 314 return self._callstream('stream_out')
314 315
315 316 def getbundle(self, source, **kwargs):
316 317 kwargs = pycompat.byteskwargs(kwargs)
317 318 self.requirecap('getbundle', _('look up remote changes'))
318 319 opts = {}
319 320 bundlecaps = kwargs.get('bundlecaps')
320 321 if bundlecaps is not None:
321 322 kwargs['bundlecaps'] = sorted(bundlecaps)
322 323 else:
323 324 bundlecaps = () # kwargs could have it to None
324 325 for key, value in kwargs.iteritems():
325 326 if value is None:
326 327 continue
327 328 keytype = gboptsmap.get(key)
328 329 if keytype is None:
329 330 raise error.ProgrammingError(
330 331 'Unexpectedly None keytype for key %s' % key)
331 332 elif keytype == 'nodes':
332 333 value = encodelist(value)
333 334 elif keytype in ('csv', 'scsv'):
334 335 value = ','.join(value)
335 336 elif keytype == 'boolean':
336 337 value = '%i' % bool(value)
337 338 elif keytype != 'plain':
338 339 raise KeyError('unknown getbundle option type %s'
339 340 % keytype)
340 341 opts[key] = value
341 342 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
342 343 if any((cap.startswith('HG2') for cap in bundlecaps)):
343 344 return bundle2.getunbundler(self.ui, f)
344 345 else:
345 346 return changegroupmod.cg1unpacker(f, 'UN')
346 347
347 348 def unbundle(self, cg, heads, url):
348 349 '''Send cg (a readable file-like object representing the
349 350 changegroup to push, typically a chunkbuffer object) to the
350 351 remote server as a bundle.
351 352
352 353 When pushing a bundle10 stream, return an integer indicating the
353 354 result of the push (see changegroup.apply()).
354 355
355 356 When pushing a bundle20 stream, return a bundle20 stream.
356 357
357 358 `url` is the url the client thinks it's pushing to, which is
358 359 visible to hooks.
359 360 '''
360 361
361 362 if heads != ['force'] and self.capable('unbundlehash'):
362 363 heads = encodelist(['hashed',
363 364 hashlib.sha1(''.join(sorted(heads))).digest()])
364 365 else:
365 366 heads = encodelist(heads)
366 367
367 368 if util.safehasattr(cg, 'deltaheader'):
368 369 # this a bundle10, do the old style call sequence
369 370 ret, output = self._callpush("unbundle", cg, heads=heads)
370 371 if ret == "":
371 372 raise error.ResponseError(
372 373 _('push failed:'), output)
373 374 try:
374 375 ret = int(ret)
375 376 except ValueError:
376 377 raise error.ResponseError(
377 378 _('push failed (unexpected response):'), ret)
378 379
379 380 for l in output.splitlines(True):
380 381 self.ui.status(_('remote: '), l)
381 382 else:
382 383 # bundle2 push. Send a stream, fetch a stream.
383 384 stream = self._calltwowaystream('unbundle', cg, heads=heads)
384 385 ret = bundle2.getunbundler(self.ui, stream)
385 386 return ret
386 387
387 388 # End of basewirepeer interface.
388 389
389 390 # Begin of baselegacywirepeer interface.
390 391
391 392 def branches(self, nodes):
392 393 n = encodelist(nodes)
393 394 d = self._call("branches", nodes=n)
394 395 try:
395 396 br = [tuple(decodelist(b)) for b in d.splitlines()]
396 397 return br
397 398 except ValueError:
398 399 self._abort(error.ResponseError(_("unexpected response:"), d))
399 400
400 401 def between(self, pairs):
401 402 batch = 8 # avoid giant requests
402 403 r = []
403 404 for i in xrange(0, len(pairs), batch):
404 405 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
405 406 d = self._call("between", pairs=n)
406 407 try:
407 408 r.extend(l and decodelist(l) or [] for l in d.splitlines())
408 409 except ValueError:
409 410 self._abort(error.ResponseError(_("unexpected response:"), d))
410 411 return r
411 412
412 413 def changegroup(self, nodes, kind):
413 414 n = encodelist(nodes)
414 415 f = self._callcompressable("changegroup", roots=n)
415 416 return changegroupmod.cg1unpacker(f, 'UN')
416 417
417 418 def changegroupsubset(self, bases, heads, kind):
418 419 self.requirecap('changegroupsubset', _('look up remote changes'))
419 420 bases = encodelist(bases)
420 421 heads = encodelist(heads)
421 422 f = self._callcompressable("changegroupsubset",
422 423 bases=bases, heads=heads)
423 424 return changegroupmod.cg1unpacker(f, 'UN')
424 425
425 426 # End of baselegacywirepeer interface.
426 427
427 428 def _submitbatch(self, req):
428 429 """run batch request <req> on the server
429 430
430 431 Returns an iterator of the raw responses from the server.
431 432 """
432 433 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
433 434 chunk = rsp.read(1024)
434 435 work = [chunk]
435 436 while chunk:
436 437 while ';' not in chunk and chunk:
437 438 chunk = rsp.read(1024)
438 439 work.append(chunk)
439 440 merged = ''.join(work)
440 441 while ';' in merged:
441 442 one, merged = merged.split(';', 1)
442 443 yield unescapearg(one)
443 444 chunk = rsp.read(1024)
444 445 work = [merged, chunk]
445 446 yield unescapearg(''.join(work))
446 447
447 448 def _submitone(self, op, args):
448 449 return self._call(op, **pycompat.strkwargs(args))
449 450
450 451 def debugwireargs(self, one, two, three=None, four=None, five=None):
451 452 # don't pass optional arguments left at their default value
452 453 opts = {}
453 454 if three is not None:
454 455 opts['three'] = three
455 456 if four is not None:
456 457 opts['four'] = four
457 458 return self._call('debugwireargs', one=one, two=two, **opts)
458 459
459 460 def _call(self, cmd, **args):
460 461 """execute <cmd> on the server
461 462
462 463 The command is expected to return a simple string.
463 464
464 465 returns the server reply as a string."""
465 466 raise NotImplementedError()
466 467
467 468 def _callstream(self, cmd, **args):
468 469 """execute <cmd> on the server
469 470
470 471 The command is expected to return a stream. Note that if the
471 472 command doesn't return a stream, _callstream behaves
472 473 differently for ssh and http peers.
473 474
474 475 returns the server reply as a file like object.
475 476 """
476 477 raise NotImplementedError()
477 478
478 479 def _callcompressable(self, cmd, **args):
479 480 """execute <cmd> on the server
480 481
481 482 The command is expected to return a stream.
482 483
483 484 The stream may have been compressed in some implementations. This
484 485 function takes care of the decompression. This is the only difference
485 486 with _callstream.
486 487
487 488 returns the server reply as a file like object.
488 489 """
489 490 raise NotImplementedError()
490 491
491 492 def _callpush(self, cmd, fp, **args):
492 493 """execute a <cmd> on server
493 494
494 495 The command is expected to be related to a push. Push has a special
495 496 return method.
496 497
497 498 returns the server reply as a (ret, output) tuple. ret is either
498 499 empty (error) or a stringified int.
499 500 """
500 501 raise NotImplementedError()
501 502
502 503 def _calltwowaystream(self, cmd, fp, **args):
503 504 """execute <cmd> on server
504 505
505 506 The command will send a stream to the server and get a stream in reply.
506 507 """
507 508 raise NotImplementedError()
508 509
509 510 def _abort(self, exception):
510 511 """clearly abort the wire protocol connection and raise the exception
511 512 """
512 513 raise NotImplementedError()
513 514
514 515 # server side
515 516
516 517 # wire protocol command can either return a string or one of these classes.
517 518 class streamres(object):
518 519 """wireproto reply: binary stream
519 520
520 521 The call was successful and the result is a stream.
521 522
522 523 Accepts either a generator or an object with a ``read(size)`` method.
523 524
524 525 ``v1compressible`` indicates whether this data can be compressed to
525 526 "version 1" clients (technically: HTTP peers using
526 527 application/mercurial-0.1 media type). This flag should NOT be used on
527 528 new commands because new clients should support a more modern compression
528 529 mechanism.
529 530 """
530 531 def __init__(self, gen=None, reader=None, v1compressible=False):
531 532 self.gen = gen
532 533 self.reader = reader
533 534 self.v1compressible = v1compressible
534 535
535 536 class pushres(object):
536 537 """wireproto reply: success with simple integer return
537 538
538 539 The call was successful and returned an integer contained in `self.res`.
539 540 """
540 541 def __init__(self, res):
541 542 self.res = res
542 543
543 544 class pusherr(object):
544 545 """wireproto reply: failure
545 546
546 547 The call failed. The `self.res` attribute contains the error message.
547 548 """
548 549 def __init__(self, res):
549 550 self.res = res
550 551
551 552 class ooberror(object):
552 553 """wireproto reply: failure of a batch of operation
553 554
554 555 Something failed during a batch call. The error message is stored in
555 556 `self.message`.
556 557 """
557 558 def __init__(self, message):
558 559 self.message = message
559 560
560 561 def getdispatchrepo(repo, proto, command):
561 562 """Obtain the repo used for processing wire protocol commands.
562 563
563 564 The intent of this function is to serve as a monkeypatch point for
564 565 extensions that need commands to operate on different repo views under
565 566 specialized circumstances.
566 567 """
567 568 return repo.filtered('served')
568 569
569 570 def dispatch(repo, proto, command):
570 571 repo = getdispatchrepo(repo, proto, command)
571 572 func, spec = commands[command]
572 573 args = proto.getargs(spec)
573 574 return func(repo, proto, *args)
574 575
575 576 def options(cmd, keys, others):
576 577 opts = {}
577 578 for k in keys:
578 579 if k in others:
579 580 opts[k] = others[k]
580 581 del others[k]
581 582 if others:
582 583 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
583 584 % (cmd, ",".join(others)))
584 585 return opts
585 586
586 587 def bundle1allowed(repo, action):
587 588 """Whether a bundle1 operation is allowed from the server.
588 589
589 590 Priority is:
590 591
591 592 1. server.bundle1gd.<action> (if generaldelta active)
592 593 2. server.bundle1.<action>
593 594 3. server.bundle1gd (if generaldelta active)
594 595 4. server.bundle1
595 596 """
596 597 ui = repo.ui
597 598 gd = 'generaldelta' in repo.requirements
598 599
599 600 if gd:
600 601 v = ui.configbool('server', 'bundle1gd.%s' % action)
601 602 if v is not None:
602 603 return v
603 604
604 605 v = ui.configbool('server', 'bundle1.%s' % action)
605 606 if v is not None:
606 607 return v
607 608
608 609 if gd:
609 610 v = ui.configbool('server', 'bundle1gd')
610 611 if v is not None:
611 612 return v
612 613
613 614 return ui.configbool('server', 'bundle1')
614 615
615 616 def supportedcompengines(ui, proto, role):
616 617 """Obtain the list of supported compression engines for a request."""
617 618 assert role in (util.CLIENTROLE, util.SERVERROLE)
618 619
619 620 compengines = util.compengines.supportedwireengines(role)
620 621
621 622 # Allow config to override default list and ordering.
622 623 if role == util.SERVERROLE:
623 624 configengines = ui.configlist('server', 'compressionengines')
624 625 config = 'server.compressionengines'
625 626 else:
626 627 # This is currently implemented mainly to facilitate testing. In most
627 628 # cases, the server should be in charge of choosing a compression engine
628 629 # because a server has the most to lose from a sub-optimal choice. (e.g.
629 630 # CPU DoS due to an expensive engine or a network DoS due to poor
630 631 # compression ratio).
631 632 configengines = ui.configlist('experimental',
632 633 'clientcompressionengines')
633 634 config = 'experimental.clientcompressionengines'
634 635
635 636 # No explicit config. Filter out the ones that aren't supposed to be
636 637 # advertised and return default ordering.
637 638 if not configengines:
638 639 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
639 640 return [e for e in compengines
640 641 if getattr(e.wireprotosupport(), attr) > 0]
641 642
642 643 # If compression engines are listed in the config, assume there is a good
643 644 # reason for it (like server operators wanting to achieve specific
644 645 # performance characteristics). So fail fast if the config references
645 646 # unusable compression engines.
646 647 validnames = set(e.name() for e in compengines)
647 648 invalidnames = set(e for e in configengines if e not in validnames)
648 649 if invalidnames:
649 650 raise error.Abort(_('invalid compression engine defined in %s: %s') %
650 651 (config, ', '.join(sorted(invalidnames))))
651 652
652 653 compengines = [e for e in compengines if e.name() in configengines]
653 654 compengines = sorted(compengines,
654 655 key=lambda e: configengines.index(e.name()))
655 656
656 657 if not compengines:
657 658 raise error.Abort(_('%s config option does not specify any known '
658 659 'compression engines') % config,
659 660 hint=_('usable compression engines: %s') %
660 661 ', '.sorted(validnames))
661 662
662 663 return compengines
663 664
664 665 # list of commands
665 666 commands = {}
666 667
667 668 def wireprotocommand(name, args=''):
668 669 """decorator for wire protocol command"""
669 670 def register(func):
670 671 commands[name] = (func, args)
671 672 return func
672 673 return register
673 674
674 675 @wireprotocommand('batch', 'cmds *')
675 676 def batch(repo, proto, cmds, others):
676 677 repo = repo.filtered("served")
677 678 res = []
678 679 for pair in cmds.split(';'):
679 680 op, args = pair.split(' ', 1)
680 681 vals = {}
681 682 for a in args.split(','):
682 683 if a:
683 684 n, v = a.split('=')
684 685 vals[unescapearg(n)] = unescapearg(v)
685 686 func, spec = commands[op]
686 687 if spec:
687 688 keys = spec.split()
688 689 data = {}
689 690 for k in keys:
690 691 if k == '*':
691 692 star = {}
692 693 for key in vals.keys():
693 694 if key not in keys:
694 695 star[key] = vals[key]
695 696 data['*'] = star
696 697 else:
697 698 data[k] = vals[k]
698 699 result = func(repo, proto, *[data[k] for k in keys])
699 700 else:
700 701 result = func(repo, proto)
701 702 if isinstance(result, ooberror):
702 703 return result
703 704 res.append(escapearg(result))
704 705 return ';'.join(res)
705 706
706 707 @wireprotocommand('between', 'pairs')
707 708 def between(repo, proto, pairs):
708 709 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
709 710 r = []
710 711 for b in repo.between(pairs):
711 712 r.append(encodelist(b) + "\n")
712 713 return "".join(r)
713 714
714 715 @wireprotocommand('branchmap')
715 716 def branchmap(repo, proto):
716 717 branchmap = repo.branchmap()
717 718 heads = []
718 719 for branch, nodes in branchmap.iteritems():
719 720 branchname = urlreq.quote(encoding.fromlocal(branch))
720 721 branchnodes = encodelist(nodes)
721 722 heads.append('%s %s' % (branchname, branchnodes))
722 723 return '\n'.join(heads)
723 724
724 725 @wireprotocommand('branches', 'nodes')
725 726 def branches(repo, proto, nodes):
726 727 nodes = decodelist(nodes)
727 728 r = []
728 729 for b in repo.branches(nodes):
729 730 r.append(encodelist(b) + "\n")
730 731 return "".join(r)
731 732
732 733 @wireprotocommand('clonebundles', '')
733 734 def clonebundles(repo, proto):
734 735 """Server command for returning info for available bundles to seed clones.
735 736
736 737 Clients will parse this response and determine what bundle to fetch.
737 738
738 739 Extensions may wrap this command to filter or dynamically emit data
739 740 depending on the request. e.g. you could advertise URLs for the closest
740 741 data center given the client's IP address.
741 742 """
742 743 return repo.vfs.tryread('clonebundles.manifest')
743 744
744 745 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
745 746 'known', 'getbundle', 'unbundlehash', 'batch']
746 747
747 748 def _capabilities(repo, proto):
748 749 """return a list of capabilities for a repo
749 750
750 751 This function exists to allow extensions to easily wrap capabilities
751 752 computation
752 753
753 754 - returns a lists: easy to alter
754 755 - change done here will be propagated to both `capabilities` and `hello`
755 756 command without any other action needed.
756 757 """
757 758 # copy to prevent modification of the global list
758 759 caps = list(wireprotocaps)
759 760 if streamclone.allowservergeneration(repo):
760 761 if repo.ui.configbool('server', 'preferuncompressed'):
761 762 caps.append('stream-preferred')
762 763 requiredformats = repo.requirements & repo.supportedformats
763 764 # if our local revlogs are just revlogv1, add 'stream' cap
764 765 if not requiredformats - {'revlogv1'}:
765 766 caps.append('stream')
766 767 # otherwise, add 'streamreqs' detailing our local revlog format
767 768 else:
768 769 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
769 770 if repo.ui.configbool('experimental', 'bundle2-advertise'):
770 771 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
771 772 caps.append('bundle2=' + urlreq.quote(capsblob))
772 773 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
773 774
774 775 if proto.name == 'http':
775 776 caps.append('httpheader=%d' %
776 777 repo.ui.configint('server', 'maxhttpheaderlen'))
777 778 if repo.ui.configbool('experimental', 'httppostargs'):
778 779 caps.append('httppostargs')
779 780
780 781 # FUTURE advertise 0.2rx once support is implemented
781 782 # FUTURE advertise minrx and mintx after consulting config option
782 783 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
783 784
784 785 compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
785 786 if compengines:
786 787 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
787 788 for e in compengines)
788 789 caps.append('compression=%s' % comptypes)
789 790
790 791 return caps
791 792
792 793 # If you are writing an extension and consider wrapping this function. Wrap
793 794 # `_capabilities` instead.
794 795 @wireprotocommand('capabilities')
795 796 def capabilities(repo, proto):
796 797 return ' '.join(_capabilities(repo, proto))
797 798
798 799 @wireprotocommand('changegroup', 'roots')
799 800 def changegroup(repo, proto, roots):
800 801 nodes = decodelist(roots)
801 802 outgoing = discovery.outgoing(repo, missingroots=nodes,
802 803 missingheads=repo.heads())
803 804 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
804 805 return streamres(reader=cg, v1compressible=True)
805 806
806 807 @wireprotocommand('changegroupsubset', 'bases heads')
807 808 def changegroupsubset(repo, proto, bases, heads):
808 809 bases = decodelist(bases)
809 810 heads = decodelist(heads)
810 811 outgoing = discovery.outgoing(repo, missingroots=bases,
811 812 missingheads=heads)
812 813 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
813 814 return streamres(reader=cg, v1compressible=True)
814 815
815 816 @wireprotocommand('debugwireargs', 'one two *')
816 817 def debugwireargs(repo, proto, one, two, others):
817 818 # only accept optional args from the known set
818 819 opts = options('debugwireargs', ['three', 'four'], others)
819 820 return repo.debugwireargs(one, two, **opts)
820 821
821 822 @wireprotocommand('getbundle', '*')
822 823 def getbundle(repo, proto, others):
823 824 opts = options('getbundle', gboptsmap.keys(), others)
824 825 for k, v in opts.iteritems():
825 826 keytype = gboptsmap[k]
826 827 if keytype == 'nodes':
827 828 opts[k] = decodelist(v)
828 829 elif keytype == 'csv':
829 830 opts[k] = list(v.split(','))
830 831 elif keytype == 'scsv':
831 832 opts[k] = set(v.split(','))
832 833 elif keytype == 'boolean':
833 834 # Client should serialize False as '0', which is a non-empty string
834 835 # so it evaluates as a True bool.
835 836 if v == '0':
836 837 opts[k] = False
837 838 else:
838 839 opts[k] = bool(v)
839 840 elif keytype != 'plain':
840 841 raise KeyError('unknown getbundle option type %s'
841 842 % keytype)
842 843
843 844 if not bundle1allowed(repo, 'pull'):
844 845 if not exchange.bundle2requested(opts.get('bundlecaps')):
845 846 if proto.name == 'http':
846 847 return ooberror(bundle2required)
847 848 raise error.Abort(bundle2requiredmain,
848 849 hint=bundle2requiredhint)
849 850
850 851 try:
851 852 if repo.ui.configbool('server', 'disablefullbundle'):
852 853 # Check to see if this is a full clone.
853 854 clheads = set(repo.changelog.heads())
854 855 heads = set(opts.get('heads', set()))
855 856 common = set(opts.get('common', set()))
856 857 common.discard(nullid)
857 858 if not common and clheads == heads:
858 859 raise error.Abort(
859 860 _('server has pull-based clones disabled'),
860 861 hint=_('remove --pull if specified or upgrade Mercurial'))
861 862
862 863 chunks = exchange.getbundlechunks(repo, 'serve',
863 864 **pycompat.strkwargs(opts))
864 865 except error.Abort as exc:
865 866 # cleanly forward Abort error to the client
866 867 if not exchange.bundle2requested(opts.get('bundlecaps')):
867 868 if proto.name == 'http':
868 869 return ooberror(str(exc) + '\n')
869 870 raise # cannot do better for bundle1 + ssh
870 871 # bundle2 request expect a bundle2 reply
871 872 bundler = bundle2.bundle20(repo.ui)
872 873 manargs = [('message', str(exc))]
873 874 advargs = []
874 875 if exc.hint is not None:
875 876 advargs.append(('hint', exc.hint))
876 877 bundler.addpart(bundle2.bundlepart('error:abort',
877 878 manargs, advargs))
878 879 return streamres(gen=bundler.getchunks(), v1compressible=True)
879 880 return streamres(gen=chunks, v1compressible=True)
880 881
881 882 @wireprotocommand('heads')
882 883 def heads(repo, proto):
883 884 h = repo.heads()
884 885 return encodelist(h) + "\n"
885 886
886 887 @wireprotocommand('hello')
887 888 def hello(repo, proto):
888 889 '''the hello command returns a set of lines describing various
889 890 interesting things about the server, in an RFC822-like format.
890 891 Currently the only one defined is "capabilities", which
891 892 consists of a line in the form:
892 893
893 894 capabilities: space separated list of tokens
894 895 '''
895 896 return "capabilities: %s\n" % (capabilities(repo, proto))
896 897
897 898 @wireprotocommand('listkeys', 'namespace')
898 899 def listkeys(repo, proto, namespace):
899 900 d = repo.listkeys(encoding.tolocal(namespace)).items()
900 901 return pushkeymod.encodekeys(d)
901 902
902 903 @wireprotocommand('lookup', 'key')
903 904 def lookup(repo, proto, key):
904 905 try:
905 906 k = encoding.tolocal(key)
906 907 c = repo[k]
907 908 r = c.hex()
908 909 success = 1
909 910 except Exception as inst:
910 911 r = str(inst)
911 912 success = 0
912 913 return "%d %s\n" % (success, r)
913 914
914 915 @wireprotocommand('known', 'nodes *')
915 916 def known(repo, proto, nodes, others):
916 917 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
917 918
918 919 @wireprotocommand('pushkey', 'namespace key old new')
919 920 def pushkey(repo, proto, namespace, key, old, new):
920 921 # compatibility with pre-1.8 clients which were accidentally
921 922 # sending raw binary nodes rather than utf-8-encoded hex
922 923 if len(new) == 20 and util.escapestr(new) != new:
923 924 # looks like it could be a binary node
924 925 try:
925 926 new.decode('utf-8')
926 927 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
927 928 except UnicodeDecodeError:
928 929 pass # binary, leave unmodified
929 930 else:
930 931 new = encoding.tolocal(new) # normal path
931 932
932 933 if util.safehasattr(proto, 'restore'):
933 934
934 935 proto.redirect()
935 936
936 937 try:
937 938 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
938 939 encoding.tolocal(old), new) or False
939 940 except error.Abort:
940 941 r = False
941 942
942 943 output = proto.restore()
943 944
944 945 return '%s\n%s' % (int(r), output)
945 946
946 947 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
947 948 encoding.tolocal(old), new)
948 949 return '%s\n' % int(r)
949 950
950 951 @wireprotocommand('stream_out')
951 952 def stream(repo, proto):
952 953 '''If the server supports streaming clone, it advertises the "stream"
953 954 capability with a value representing the version and flags of the repo
954 955 it is serving. Client checks to see if it understands the format.
955 956 '''
956 957 if not streamclone.allowservergeneration(repo):
957 958 return '1\n'
958 959
959 960 def getstream(it):
960 961 yield '0\n'
961 962 for chunk in it:
962 963 yield chunk
963 964
964 965 try:
965 966 # LockError may be raised before the first result is yielded. Don't
966 967 # emit output until we're sure we got the lock successfully.
967 968 it = streamclone.generatev1wireproto(repo)
968 969 return streamres(gen=getstream(it))
969 970 except error.LockError:
970 971 return '2\n'
971 972
972 973 @wireprotocommand('unbundle', 'heads')
973 974 def unbundle(repo, proto, heads):
974 975 their_heads = decodelist(heads)
975 976
976 977 try:
977 978 proto.redirect()
978 979
979 980 exchange.check_heads(repo, their_heads, 'preparing changes')
980 981
981 982 # write bundle data to temporary file because it can be big
982 983 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
983 984 fp = os.fdopen(fd, pycompat.sysstr('wb+'))
984 985 r = 0
985 986 try:
986 987 proto.getfile(fp)
987 988 fp.seek(0)
988 989 gen = exchange.readbundle(repo.ui, fp, None)
989 990 if (isinstance(gen, changegroupmod.cg1unpacker)
990 991 and not bundle1allowed(repo, 'push')):
991 992 if proto.name == 'http':
992 993 # need to special case http because stderr do not get to
993 994 # the http client on failed push so we need to abuse some
994 995 # other error type to make sure the message get to the
995 996 # user.
996 997 return ooberror(bundle2required)
997 998 raise error.Abort(bundle2requiredmain,
998 999 hint=bundle2requiredhint)
999 1000
1000 1001 r = exchange.unbundle(repo, gen, their_heads, 'serve',
1001 1002 proto._client())
1002 1003 if util.safehasattr(r, 'addpart'):
1003 1004 # The return looks streamable, we are in the bundle2 case and
1004 1005 # should return a stream.
1005 1006 return streamres(gen=r.getchunks())
1006 1007 return pushres(r)
1007 1008
1008 1009 finally:
1009 1010 fp.close()
1010 1011 os.unlink(tempname)
1011 1012
1012 1013 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
1013 1014 # handle non-bundle2 case first
1014 1015 if not getattr(exc, 'duringunbundle2', False):
1015 1016 try:
1016 1017 raise
1017 1018 except error.Abort:
1018 1019 # The old code we moved used util.stderr directly.
1019 1020 # We did not change it to minimise code change.
1020 1021 # This need to be moved to something proper.
1021 1022 # Feel free to do it.
1022 1023 util.stderr.write("abort: %s\n" % exc)
1023 1024 if exc.hint is not None:
1024 1025 util.stderr.write("(%s)\n" % exc.hint)
1025 1026 return pushres(0)
1026 1027 except error.PushRaced:
1027 1028 return pusherr(str(exc))
1028 1029
1029 1030 bundler = bundle2.bundle20(repo.ui)
1030 1031 for out in getattr(exc, '_bundle2salvagedoutput', ()):
1031 1032 bundler.addpart(out)
1032 1033 try:
1033 1034 try:
1034 1035 raise
1035 1036 except error.PushkeyFailed as exc:
1036 1037 # check client caps
1037 1038 remotecaps = getattr(exc, '_replycaps', None)
1038 1039 if (remotecaps is not None
1039 1040 and 'pushkey' not in remotecaps.get('error', ())):
1040 1041 # no support remote side, fallback to Abort handler.
1041 1042 raise
1042 1043 part = bundler.newpart('error:pushkey')
1043 1044 part.addparam('in-reply-to', exc.partid)
1044 1045 if exc.namespace is not None:
1045 1046 part.addparam('namespace', exc.namespace, mandatory=False)
1046 1047 if exc.key is not None:
1047 1048 part.addparam('key', exc.key, mandatory=False)
1048 1049 if exc.new is not None:
1049 1050 part.addparam('new', exc.new, mandatory=False)
1050 1051 if exc.old is not None:
1051 1052 part.addparam('old', exc.old, mandatory=False)
1052 1053 if exc.ret is not None:
1053 1054 part.addparam('ret', exc.ret, mandatory=False)
1054 1055 except error.BundleValueError as exc:
1055 1056 errpart = bundler.newpart('error:unsupportedcontent')
1056 1057 if exc.parttype is not None:
1057 1058 errpart.addparam('parttype', exc.parttype)
1058 1059 if exc.params:
1059 1060 errpart.addparam('params', '\0'.join(exc.params))
1060 1061 except error.Abort as exc:
1061 1062 manargs = [('message', str(exc))]
1062 1063 advargs = []
1063 1064 if exc.hint is not None:
1064 1065 advargs.append(('hint', exc.hint))
1065 1066 bundler.addpart(bundle2.bundlepart('error:abort',
1066 1067 manargs, advargs))
1067 1068 except error.PushRaced as exc:
1068 1069 bundler.newpart('error:pushraced', [('message', str(exc))])
1069 1070 return streamres(gen=bundler.getchunks())
General Comments 0
You need to be logged in to leave comments. Login now