##// END OF EJS Templates
py3: use pycompat.strkwargs() to convert kwargs keys to str before passing
Pulkit Goyal -
r32896:e14484e7 default
parent child Browse files
Show More
@@ -1,2009 +1,2010
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 bookmarks as bookmod,
20 20 bundle2,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 obsolete,
26 26 phases,
27 27 pushkey,
28 pycompat,
28 29 scmutil,
29 30 sslutil,
30 31 streamclone,
31 32 url as urlmod,
32 33 util,
33 34 )
34 35
35 36 urlerr = util.urlerr
36 37 urlreq = util.urlreq
37 38
38 39 # Maps bundle version human names to changegroup versions.
39 40 _bundlespeccgversions = {'v1': '01',
40 41 'v2': '02',
41 42 'packed1': 's1',
42 43 'bundle2': '02', #legacy
43 44 }
44 45
45 46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
46 47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
47 48
48 49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
49 50 """Parse a bundle string specification into parts.
50 51
51 52 Bundle specifications denote a well-defined bundle/exchange format.
52 53 The content of a given specification should not change over time in
53 54 order to ensure that bundles produced by a newer version of Mercurial are
54 55 readable from an older version.
55 56
56 57 The string currently has the form:
57 58
58 59 <compression>-<type>[;<parameter0>[;<parameter1>]]
59 60
60 61 Where <compression> is one of the supported compression formats
61 62 and <type> is (currently) a version string. A ";" can follow the type and
62 63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
63 64 pairs.
64 65
65 66 If ``strict`` is True (the default) <compression> is required. Otherwise,
66 67 it is optional.
67 68
68 69 If ``externalnames`` is False (the default), the human-centric names will
69 70 be converted to their internal representation.
70 71
71 72 Returns a 3-tuple of (compression, version, parameters). Compression will
72 73 be ``None`` if not in strict mode and a compression isn't defined.
73 74
74 75 An ``InvalidBundleSpecification`` is raised when the specification is
75 76 not syntactically well formed.
76 77
77 78 An ``UnsupportedBundleSpecification`` is raised when the compression or
78 79 bundle type/version is not recognized.
79 80
80 81 Note: this function will likely eventually return a more complex data
81 82 structure, including bundle2 part information.
82 83 """
83 84 def parseparams(s):
84 85 if ';' not in s:
85 86 return s, {}
86 87
87 88 params = {}
88 89 version, paramstr = s.split(';', 1)
89 90
90 91 for p in paramstr.split(';'):
91 92 if '=' not in p:
92 93 raise error.InvalidBundleSpecification(
93 94 _('invalid bundle specification: '
94 95 'missing "=" in parameter: %s') % p)
95 96
96 97 key, value = p.split('=', 1)
97 98 key = urlreq.unquote(key)
98 99 value = urlreq.unquote(value)
99 100 params[key] = value
100 101
101 102 return version, params
102 103
103 104
104 105 if strict and '-' not in spec:
105 106 raise error.InvalidBundleSpecification(
106 107 _('invalid bundle specification; '
107 108 'must be prefixed with compression: %s') % spec)
108 109
109 110 if '-' in spec:
110 111 compression, version = spec.split('-', 1)
111 112
112 113 if compression not in util.compengines.supportedbundlenames:
113 114 raise error.UnsupportedBundleSpecification(
114 115 _('%s compression is not supported') % compression)
115 116
116 117 version, params = parseparams(version)
117 118
118 119 if version not in _bundlespeccgversions:
119 120 raise error.UnsupportedBundleSpecification(
120 121 _('%s is not a recognized bundle version') % version)
121 122 else:
122 123 # Value could be just the compression or just the version, in which
123 124 # case some defaults are assumed (but only when not in strict mode).
124 125 assert not strict
125 126
126 127 spec, params = parseparams(spec)
127 128
128 129 if spec in util.compengines.supportedbundlenames:
129 130 compression = spec
130 131 version = 'v1'
131 132 # Generaldelta repos require v2.
132 133 if 'generaldelta' in repo.requirements:
133 134 version = 'v2'
134 135 # Modern compression engines require v2.
135 136 if compression not in _bundlespecv1compengines:
136 137 version = 'v2'
137 138 elif spec in _bundlespeccgversions:
138 139 if spec == 'packed1':
139 140 compression = 'none'
140 141 else:
141 142 compression = 'bzip2'
142 143 version = spec
143 144 else:
144 145 raise error.UnsupportedBundleSpecification(
145 146 _('%s is not a recognized bundle specification') % spec)
146 147
147 148 # Bundle version 1 only supports a known set of compression engines.
148 149 if version == 'v1' and compression not in _bundlespecv1compengines:
149 150 raise error.UnsupportedBundleSpecification(
150 151 _('compression engine %s is not supported on v1 bundles') %
151 152 compression)
152 153
153 154 # The specification for packed1 can optionally declare the data formats
154 155 # required to apply it. If we see this metadata, compare against what the
155 156 # repo supports and error if the bundle isn't compatible.
156 157 if version == 'packed1' and 'requirements' in params:
157 158 requirements = set(params['requirements'].split(','))
158 159 missingreqs = requirements - repo.supportedformats
159 160 if missingreqs:
160 161 raise error.UnsupportedBundleSpecification(
161 162 _('missing support for repository features: %s') %
162 163 ', '.join(sorted(missingreqs)))
163 164
164 165 if not externalnames:
165 166 engine = util.compengines.forbundlename(compression)
166 167 compression = engine.bundletype()[1]
167 168 version = _bundlespeccgversions[version]
168 169 return compression, version, params
169 170
170 171 def readbundle(ui, fh, fname, vfs=None):
171 172 header = changegroup.readexactly(fh, 4)
172 173
173 174 alg = None
174 175 if not fname:
175 176 fname = "stream"
176 177 if not header.startswith('HG') and header.startswith('\0'):
177 178 fh = changegroup.headerlessfixup(fh, header)
178 179 header = "HG10"
179 180 alg = 'UN'
180 181 elif vfs:
181 182 fname = vfs.join(fname)
182 183
183 184 magic, version = header[0:2], header[2:4]
184 185
185 186 if magic != 'HG':
186 187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
187 188 if version == '10':
188 189 if alg is None:
189 190 alg = changegroup.readexactly(fh, 2)
190 191 return changegroup.cg1unpacker(fh, alg)
191 192 elif version.startswith('2'):
192 193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
193 194 elif version == 'S1':
194 195 return streamclone.streamcloneapplier(fh)
195 196 else:
196 197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
197 198
198 199 def getbundlespec(ui, fh):
199 200 """Infer the bundlespec from a bundle file handle.
200 201
201 202 The input file handle is seeked and the original seek position is not
202 203 restored.
203 204 """
204 205 def speccompression(alg):
205 206 try:
206 207 return util.compengines.forbundletype(alg).bundletype()[0]
207 208 except KeyError:
208 209 return None
209 210
210 211 b = readbundle(ui, fh, None)
211 212 if isinstance(b, changegroup.cg1unpacker):
212 213 alg = b._type
213 214 if alg == '_truncatedBZ':
214 215 alg = 'BZ'
215 216 comp = speccompression(alg)
216 217 if not comp:
217 218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
218 219 return '%s-v1' % comp
219 220 elif isinstance(b, bundle2.unbundle20):
220 221 if 'Compression' in b.params:
221 222 comp = speccompression(b.params['Compression'])
222 223 if not comp:
223 224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
224 225 else:
225 226 comp = 'none'
226 227
227 228 version = None
228 229 for part in b.iterparts():
229 230 if part.type == 'changegroup':
230 231 version = part.params['version']
231 232 if version in ('01', '02'):
232 233 version = 'v2'
233 234 else:
234 235 raise error.Abort(_('changegroup version %s does not have '
235 236 'a known bundlespec') % version,
236 237 hint=_('try upgrading your Mercurial '
237 238 'client'))
238 239
239 240 if not version:
240 241 raise error.Abort(_('could not identify changegroup version in '
241 242 'bundle'))
242 243
243 244 return '%s-%s' % (comp, version)
244 245 elif isinstance(b, streamclone.streamcloneapplier):
245 246 requirements = streamclone.readbundle1header(fh)[2]
246 247 params = 'requirements=%s' % ','.join(sorted(requirements))
247 248 return 'none-packed1;%s' % urlreq.quote(params)
248 249 else:
249 250 raise error.Abort(_('unknown bundle type: %s') % b)
250 251
251 252 def _computeoutgoing(repo, heads, common):
252 253 """Computes which revs are outgoing given a set of common
253 254 and a set of heads.
254 255
255 256 This is a separate function so extensions can have access to
256 257 the logic.
257 258
258 259 Returns a discovery.outgoing object.
259 260 """
260 261 cl = repo.changelog
261 262 if common:
262 263 hasnode = cl.hasnode
263 264 common = [n for n in common if hasnode(n)]
264 265 else:
265 266 common = [nullid]
266 267 if not heads:
267 268 heads = cl.heads()
268 269 return discovery.outgoing(repo, common, heads)
269 270
270 271 def _forcebundle1(op):
271 272 """return true if a pull/push must use bundle1
272 273
273 274 This function is used to allow testing of the older bundle version"""
274 275 ui = op.repo.ui
275 276 forcebundle1 = False
276 277 # The goal is this config is to allow developer to choose the bundle
277 278 # version used during exchanged. This is especially handy during test.
278 279 # Value is a list of bundle version to be picked from, highest version
279 280 # should be used.
280 281 #
281 282 # developer config: devel.legacy.exchange
282 283 exchange = ui.configlist('devel', 'legacy.exchange')
283 284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
284 285 return forcebundle1 or not op.remote.capable('bundle2')
285 286
286 287 class pushoperation(object):
287 288 """A object that represent a single push operation
288 289
289 290 Its purpose is to carry push related state and very common operations.
290 291
291 292 A new pushoperation should be created at the beginning of each push and
292 293 discarded afterward.
293 294 """
294 295
295 296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
296 297 bookmarks=()):
297 298 # repo we push from
298 299 self.repo = repo
299 300 self.ui = repo.ui
300 301 # repo we push to
301 302 self.remote = remote
302 303 # force option provided
303 304 self.force = force
304 305 # revs to be pushed (None is "all")
305 306 self.revs = revs
306 307 # bookmark explicitly pushed
307 308 self.bookmarks = bookmarks
308 309 # allow push of new branch
309 310 self.newbranch = newbranch
310 311 # did a local lock get acquired?
311 312 self.locallocked = None
312 313 # step already performed
313 314 # (used to check what steps have been already performed through bundle2)
314 315 self.stepsdone = set()
315 316 # Integer version of the changegroup push result
316 317 # - None means nothing to push
317 318 # - 0 means HTTP error
318 319 # - 1 means we pushed and remote head count is unchanged *or*
319 320 # we have outgoing changesets but refused to push
320 321 # - other values as described by addchangegroup()
321 322 self.cgresult = None
322 323 # Boolean value for the bookmark push
323 324 self.bkresult = None
324 325 # discover.outgoing object (contains common and outgoing data)
325 326 self.outgoing = None
326 327 # all remote topological heads before the push
327 328 self.remoteheads = None
328 329 # Details of the remote branch pre and post push
329 330 #
330 331 # mapping: {'branch': ([remoteheads],
331 332 # [newheads],
332 333 # [unsyncedheads],
333 334 # [discardedheads])}
334 335 # - branch: the branch name
335 336 # - remoteheads: the list of remote heads known locally
336 337 # None if the branch is new
337 338 # - newheads: the new remote heads (known locally) with outgoing pushed
338 339 # - unsyncedheads: the list of remote heads unknown locally.
339 340 # - discardedheads: the list of remote heads made obsolete by the push
340 341 self.pushbranchmap = None
341 342 # testable as a boolean indicating if any nodes are missing locally.
342 343 self.incoming = None
343 344 # phases changes that must be pushed along side the changesets
344 345 self.outdatedphases = None
345 346 # phases changes that must be pushed if changeset push fails
346 347 self.fallbackoutdatedphases = None
347 348 # outgoing obsmarkers
348 349 self.outobsmarkers = set()
349 350 # outgoing bookmarks
350 351 self.outbookmarks = []
351 352 # transaction manager
352 353 self.trmanager = None
353 354 # map { pushkey partid -> callback handling failure}
354 355 # used to handle exception from mandatory pushkey part failure
355 356 self.pkfailcb = {}
356 357
357 358 @util.propertycache
358 359 def futureheads(self):
359 360 """future remote heads if the changeset push succeeds"""
360 361 return self.outgoing.missingheads
361 362
362 363 @util.propertycache
363 364 def fallbackheads(self):
364 365 """future remote heads if the changeset push fails"""
365 366 if self.revs is None:
366 367 # not target to push, all common are relevant
367 368 return self.outgoing.commonheads
368 369 unfi = self.repo.unfiltered()
369 370 # I want cheads = heads(::missingheads and ::commonheads)
370 371 # (missingheads is revs with secret changeset filtered out)
371 372 #
372 373 # This can be expressed as:
373 374 # cheads = ( (missingheads and ::commonheads)
374 375 # + (commonheads and ::missingheads))"
375 376 # )
376 377 #
377 378 # while trying to push we already computed the following:
378 379 # common = (::commonheads)
379 380 # missing = ((commonheads::missingheads) - commonheads)
380 381 #
381 382 # We can pick:
382 383 # * missingheads part of common (::commonheads)
383 384 common = self.outgoing.common
384 385 nm = self.repo.changelog.nodemap
385 386 cheads = [node for node in self.revs if nm[node] in common]
386 387 # and
387 388 # * commonheads parents on missing
388 389 revset = unfi.set('%ln and parents(roots(%ln))',
389 390 self.outgoing.commonheads,
390 391 self.outgoing.missing)
391 392 cheads.extend(c.node() for c in revset)
392 393 return cheads
393 394
394 395 @property
395 396 def commonheads(self):
396 397 """set of all common heads after changeset bundle push"""
397 398 if self.cgresult:
398 399 return self.futureheads
399 400 else:
400 401 return self.fallbackheads
401 402
402 403 # mapping of message used when pushing bookmark
403 404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
404 405 _('updating bookmark %s failed!\n')),
405 406 'export': (_("exporting bookmark %s\n"),
406 407 _('exporting bookmark %s failed!\n')),
407 408 'delete': (_("deleting remote bookmark %s\n"),
408 409 _('deleting remote bookmark %s failed!\n')),
409 410 }
410 411
411 412
412 413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
413 414 opargs=None):
414 415 '''Push outgoing changesets (limited by revs) from a local
415 416 repository to remote. Return an integer:
416 417 - None means nothing to push
417 418 - 0 means HTTP error
418 419 - 1 means we pushed and remote head count is unchanged *or*
419 420 we have outgoing changesets but refused to push
420 421 - other values as described by addchangegroup()
421 422 '''
422 423 if opargs is None:
423 424 opargs = {}
424 425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
425 426 **opargs)
426 427 if pushop.remote.local():
427 428 missing = (set(pushop.repo.requirements)
428 429 - pushop.remote.local().supported)
429 430 if missing:
430 431 msg = _("required features are not"
431 432 " supported in the destination:"
432 433 " %s") % (', '.join(sorted(missing)))
433 434 raise error.Abort(msg)
434 435
435 436 # there are two ways to push to remote repo:
436 437 #
437 438 # addchangegroup assumes local user can lock remote
438 439 # repo (local filesystem, old ssh servers).
439 440 #
440 441 # unbundle assumes local user cannot lock remote repo (new ssh
441 442 # servers, http servers).
442 443
443 444 if not pushop.remote.canpush():
444 445 raise error.Abort(_("destination does not support push"))
445 446 # get local lock as we might write phase data
446 447 localwlock = locallock = None
447 448 try:
448 449 # bundle2 push may receive a reply bundle touching bookmarks or other
449 450 # things requiring the wlock. Take it now to ensure proper ordering.
450 451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
451 452 if (not _forcebundle1(pushop)) and maypushback:
452 453 localwlock = pushop.repo.wlock()
453 454 locallock = pushop.repo.lock()
454 455 pushop.locallocked = True
455 456 except IOError as err:
456 457 pushop.locallocked = False
457 458 if err.errno != errno.EACCES:
458 459 raise
459 460 # source repo cannot be locked.
460 461 # We do not abort the push, but just disable the local phase
461 462 # synchronisation.
462 463 msg = 'cannot lock source repository: %s\n' % err
463 464 pushop.ui.debug(msg)
464 465 try:
465 466 if pushop.locallocked:
466 467 pushop.trmanager = transactionmanager(pushop.repo,
467 468 'push-response',
468 469 pushop.remote.url())
469 470 pushop.repo.checkpush(pushop)
470 471 lock = None
471 472 unbundle = pushop.remote.capable('unbundle')
472 473 if not unbundle:
473 474 lock = pushop.remote.lock()
474 475 try:
475 476 _pushdiscovery(pushop)
476 477 if not _forcebundle1(pushop):
477 478 _pushbundle2(pushop)
478 479 _pushchangeset(pushop)
479 480 _pushsyncphase(pushop)
480 481 _pushobsolete(pushop)
481 482 _pushbookmark(pushop)
482 483 finally:
483 484 if lock is not None:
484 485 lock.release()
485 486 if pushop.trmanager:
486 487 pushop.trmanager.close()
487 488 finally:
488 489 if pushop.trmanager:
489 490 pushop.trmanager.release()
490 491 if locallock is not None:
491 492 locallock.release()
492 493 if localwlock is not None:
493 494 localwlock.release()
494 495
495 496 return pushop
496 497
497 498 # list of steps to perform discovery before push
498 499 pushdiscoveryorder = []
499 500
500 501 # Mapping between step name and function
501 502 #
502 503 # This exists to help extensions wrap steps if necessary
503 504 pushdiscoverymapping = {}
504 505
505 506 def pushdiscovery(stepname):
506 507 """decorator for function performing discovery before push
507 508
508 509 The function is added to the step -> function mapping and appended to the
509 510 list of steps. Beware that decorated function will be added in order (this
510 511 may matter).
511 512
512 513 You can only use this decorator for a new step, if you want to wrap a step
513 514 from an extension, change the pushdiscovery dictionary directly."""
514 515 def dec(func):
515 516 assert stepname not in pushdiscoverymapping
516 517 pushdiscoverymapping[stepname] = func
517 518 pushdiscoveryorder.append(stepname)
518 519 return func
519 520 return dec
520 521
521 522 def _pushdiscovery(pushop):
522 523 """Run all discovery steps"""
523 524 for stepname in pushdiscoveryorder:
524 525 step = pushdiscoverymapping[stepname]
525 526 step(pushop)
526 527
527 528 @pushdiscovery('changeset')
528 529 def _pushdiscoverychangeset(pushop):
529 530 """discover the changeset that need to be pushed"""
530 531 fci = discovery.findcommonincoming
531 532 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
532 533 common, inc, remoteheads = commoninc
533 534 fco = discovery.findcommonoutgoing
534 535 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
535 536 commoninc=commoninc, force=pushop.force)
536 537 pushop.outgoing = outgoing
537 538 pushop.remoteheads = remoteheads
538 539 pushop.incoming = inc
539 540
540 541 @pushdiscovery('phase')
541 542 def _pushdiscoveryphase(pushop):
542 543 """discover the phase that needs to be pushed
543 544
544 545 (computed for both success and failure case for changesets push)"""
545 546 outgoing = pushop.outgoing
546 547 unfi = pushop.repo.unfiltered()
547 548 remotephases = pushop.remote.listkeys('phases')
548 549 publishing = remotephases.get('publishing', False)
549 550 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
550 551 and remotephases # server supports phases
551 552 and not pushop.outgoing.missing # no changesets to be pushed
552 553 and publishing):
553 554 # When:
554 555 # - this is a subrepo push
555 556 # - and remote support phase
556 557 # - and no changeset are to be pushed
557 558 # - and remote is publishing
558 559 # We may be in issue 3871 case!
559 560 # We drop the possible phase synchronisation done by
560 561 # courtesy to publish changesets possibly locally draft
561 562 # on the remote.
562 563 remotephases = {'publishing': 'True'}
563 564 ana = phases.analyzeremotephases(pushop.repo,
564 565 pushop.fallbackheads,
565 566 remotephases)
566 567 pheads, droots = ana
567 568 extracond = ''
568 569 if not publishing:
569 570 extracond = ' and public()'
570 571 revset = 'heads((%%ln::%%ln) %s)' % extracond
571 572 # Get the list of all revs draft on remote by public here.
572 573 # XXX Beware that revset break if droots is not strictly
573 574 # XXX root we may want to ensure it is but it is costly
574 575 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
575 576 if not outgoing.missing:
576 577 future = fallback
577 578 else:
578 579 # adds changeset we are going to push as draft
579 580 #
580 581 # should not be necessary for publishing server, but because of an
581 582 # issue fixed in xxxxx we have to do it anyway.
582 583 fdroots = list(unfi.set('roots(%ln + %ln::)',
583 584 outgoing.missing, droots))
584 585 fdroots = [f.node() for f in fdroots]
585 586 future = list(unfi.set(revset, fdroots, pushop.futureheads))
586 587 pushop.outdatedphases = future
587 588 pushop.fallbackoutdatedphases = fallback
588 589
589 590 @pushdiscovery('obsmarker')
590 591 def _pushdiscoveryobsmarkers(pushop):
591 592 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
592 593 and pushop.repo.obsstore
593 594 and 'obsolete' in pushop.remote.listkeys('namespaces')):
594 595 repo = pushop.repo
595 596 # very naive computation, that can be quite expensive on big repo.
596 597 # However: evolution is currently slow on them anyway.
597 598 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
598 599 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
599 600
600 601 @pushdiscovery('bookmarks')
601 602 def _pushdiscoverybookmarks(pushop):
602 603 ui = pushop.ui
603 604 repo = pushop.repo.unfiltered()
604 605 remote = pushop.remote
605 606 ui.debug("checking for updated bookmarks\n")
606 607 ancestors = ()
607 608 if pushop.revs:
608 609 revnums = map(repo.changelog.rev, pushop.revs)
609 610 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
610 611 remotebookmark = remote.listkeys('bookmarks')
611 612
612 613 explicit = set([repo._bookmarks.expandname(bookmark)
613 614 for bookmark in pushop.bookmarks])
614 615
615 616 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
616 617 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
617 618
618 619 def safehex(x):
619 620 if x is None:
620 621 return x
621 622 return hex(x)
622 623
623 624 def hexifycompbookmarks(bookmarks):
624 625 for b, scid, dcid in bookmarks:
625 626 yield b, safehex(scid), safehex(dcid)
626 627
627 628 comp = [hexifycompbookmarks(marks) for marks in comp]
628 629 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
629 630
630 631 for b, scid, dcid in advsrc:
631 632 if b in explicit:
632 633 explicit.remove(b)
633 634 if not ancestors or repo[scid].rev() in ancestors:
634 635 pushop.outbookmarks.append((b, dcid, scid))
635 636 # search added bookmark
636 637 for b, scid, dcid in addsrc:
637 638 if b in explicit:
638 639 explicit.remove(b)
639 640 pushop.outbookmarks.append((b, '', scid))
640 641 # search for overwritten bookmark
641 642 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
642 643 if b in explicit:
643 644 explicit.remove(b)
644 645 pushop.outbookmarks.append((b, dcid, scid))
645 646 # search for bookmark to delete
646 647 for b, scid, dcid in adddst:
647 648 if b in explicit:
648 649 explicit.remove(b)
649 650 # treat as "deleted locally"
650 651 pushop.outbookmarks.append((b, dcid, ''))
651 652 # identical bookmarks shouldn't get reported
652 653 for b, scid, dcid in same:
653 654 if b in explicit:
654 655 explicit.remove(b)
655 656
656 657 if explicit:
657 658 explicit = sorted(explicit)
658 659 # we should probably list all of them
659 660 ui.warn(_('bookmark %s does not exist on the local '
660 661 'or remote repository!\n') % explicit[0])
661 662 pushop.bkresult = 2
662 663
663 664 pushop.outbookmarks.sort()
664 665
665 666 def _pushcheckoutgoing(pushop):
666 667 outgoing = pushop.outgoing
667 668 unfi = pushop.repo.unfiltered()
668 669 if not outgoing.missing:
669 670 # nothing to push
670 671 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
671 672 return False
672 673 # something to push
673 674 if not pushop.force:
674 675 # if repo.obsstore == False --> no obsolete
675 676 # then, save the iteration
676 677 if unfi.obsstore:
677 678 # this message are here for 80 char limit reason
678 679 mso = _("push includes obsolete changeset: %s!")
679 680 mst = {"unstable": _("push includes unstable changeset: %s!"),
680 681 "bumped": _("push includes bumped changeset: %s!"),
681 682 "divergent": _("push includes divergent changeset: %s!")}
682 683 # If we are to push if there is at least one
683 684 # obsolete or unstable changeset in missing, at
684 685 # least one of the missinghead will be obsolete or
685 686 # unstable. So checking heads only is ok
686 687 for node in outgoing.missingheads:
687 688 ctx = unfi[node]
688 689 if ctx.obsolete():
689 690 raise error.Abort(mso % ctx)
690 691 elif ctx.troubled():
691 692 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
692 693
693 694 discovery.checkheads(pushop)
694 695 return True
695 696
696 697 # List of names of steps to perform for an outgoing bundle2, order matters.
697 698 b2partsgenorder = []
698 699
699 700 # Mapping between step name and function
700 701 #
701 702 # This exists to help extensions wrap steps if necessary
702 703 b2partsgenmapping = {}
703 704
704 705 def b2partsgenerator(stepname, idx=None):
705 706 """decorator for function generating bundle2 part
706 707
707 708 The function is added to the step -> function mapping and appended to the
708 709 list of steps. Beware that decorated functions will be added in order
709 710 (this may matter).
710 711
711 712 You can only use this decorator for new steps, if you want to wrap a step
712 713 from an extension, attack the b2partsgenmapping dictionary directly."""
713 714 def dec(func):
714 715 assert stepname not in b2partsgenmapping
715 716 b2partsgenmapping[stepname] = func
716 717 if idx is None:
717 718 b2partsgenorder.append(stepname)
718 719 else:
719 720 b2partsgenorder.insert(idx, stepname)
720 721 return func
721 722 return dec
722 723
723 724 def _pushb2ctxcheckheads(pushop, bundler):
724 725 """Generate race condition checking parts
725 726
726 727 Exists as an independent function to aid extensions
727 728 """
728 729 # * 'force' do not check for push race,
729 730 # * if we don't push anything, there are nothing to check.
730 731 if not pushop.force and pushop.outgoing.missingheads:
731 732 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
732 733 if not allowunrelated:
733 734 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
734 735 else:
735 736 affected = set()
736 737 for branch, heads in pushop.pushbranchmap.iteritems():
737 738 remoteheads, newheads, unsyncedheads, discardedheads = heads
738 739 if remoteheads is not None:
739 740 remote = set(remoteheads)
740 741 affected |= set(discardedheads) & remote
741 742 affected |= remote - set(newheads)
742 743 if affected:
743 744 data = iter(sorted(affected))
744 745 bundler.newpart('check:updated-heads', data=data)
745 746
746 747 @b2partsgenerator('changeset')
747 748 def _pushb2ctx(pushop, bundler):
748 749 """handle changegroup push through bundle2
749 750
750 751 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
751 752 """
752 753 if 'changesets' in pushop.stepsdone:
753 754 return
754 755 pushop.stepsdone.add('changesets')
755 756 # Send known heads to the server for race detection.
756 757 if not _pushcheckoutgoing(pushop):
757 758 return
758 759 pushop.repo.prepushoutgoinghooks(pushop)
759 760
760 761 _pushb2ctxcheckheads(pushop, bundler)
761 762
762 763 b2caps = bundle2.bundle2caps(pushop.remote)
763 764 version = '01'
764 765 cgversions = b2caps.get('changegroup')
765 766 if cgversions: # 3.1 and 3.2 ship with an empty value
766 767 cgversions = [v for v in cgversions
767 768 if v in changegroup.supportedoutgoingversions(
768 769 pushop.repo)]
769 770 if not cgversions:
770 771 raise ValueError(_('no common changegroup version'))
771 772 version = max(cgversions)
772 773 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
773 774 pushop.outgoing,
774 775 version=version)
775 776 cgpart = bundler.newpart('changegroup', data=cg)
776 777 if cgversions:
777 778 cgpart.addparam('version', version)
778 779 if 'treemanifest' in pushop.repo.requirements:
779 780 cgpart.addparam('treemanifest', '1')
780 781 def handlereply(op):
781 782 """extract addchangegroup returns from server reply"""
782 783 cgreplies = op.records.getreplies(cgpart.id)
783 784 assert len(cgreplies['changegroup']) == 1
784 785 pushop.cgresult = cgreplies['changegroup'][0]['return']
785 786 return handlereply
786 787
787 788 @b2partsgenerator('phase')
788 789 def _pushb2phases(pushop, bundler):
789 790 """handle phase push through bundle2"""
790 791 if 'phases' in pushop.stepsdone:
791 792 return
792 793 b2caps = bundle2.bundle2caps(pushop.remote)
793 794 if not 'pushkey' in b2caps:
794 795 return
795 796 pushop.stepsdone.add('phases')
796 797 part2node = []
797 798
798 799 def handlefailure(pushop, exc):
799 800 targetid = int(exc.partid)
800 801 for partid, node in part2node:
801 802 if partid == targetid:
802 803 raise error.Abort(_('updating %s to public failed') % node)
803 804
804 805 enc = pushkey.encode
805 806 for newremotehead in pushop.outdatedphases:
806 807 part = bundler.newpart('pushkey')
807 808 part.addparam('namespace', enc('phases'))
808 809 part.addparam('key', enc(newremotehead.hex()))
809 810 part.addparam('old', enc(str(phases.draft)))
810 811 part.addparam('new', enc(str(phases.public)))
811 812 part2node.append((part.id, newremotehead))
812 813 pushop.pkfailcb[part.id] = handlefailure
813 814
814 815 def handlereply(op):
815 816 for partid, node in part2node:
816 817 partrep = op.records.getreplies(partid)
817 818 results = partrep['pushkey']
818 819 assert len(results) <= 1
819 820 msg = None
820 821 if not results:
821 822 msg = _('server ignored update of %s to public!\n') % node
822 823 elif not int(results[0]['return']):
823 824 msg = _('updating %s to public failed!\n') % node
824 825 if msg is not None:
825 826 pushop.ui.warn(msg)
826 827 return handlereply
827 828
828 829 @b2partsgenerator('obsmarkers')
829 830 def _pushb2obsmarkers(pushop, bundler):
830 831 if 'obsmarkers' in pushop.stepsdone:
831 832 return
832 833 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
833 834 if obsolete.commonversion(remoteversions) is None:
834 835 return
835 836 pushop.stepsdone.add('obsmarkers')
836 837 if pushop.outobsmarkers:
837 838 markers = sorted(pushop.outobsmarkers)
838 839 bundle2.buildobsmarkerspart(bundler, markers)
839 840
840 841 @b2partsgenerator('bookmarks')
841 842 def _pushb2bookmarks(pushop, bundler):
842 843 """handle bookmark push through bundle2"""
843 844 if 'bookmarks' in pushop.stepsdone:
844 845 return
845 846 b2caps = bundle2.bundle2caps(pushop.remote)
846 847 if 'pushkey' not in b2caps:
847 848 return
848 849 pushop.stepsdone.add('bookmarks')
849 850 part2book = []
850 851 enc = pushkey.encode
851 852
852 853 def handlefailure(pushop, exc):
853 854 targetid = int(exc.partid)
854 855 for partid, book, action in part2book:
855 856 if partid == targetid:
856 857 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
857 858 # we should not be called for part we did not generated
858 859 assert False
859 860
860 861 for book, old, new in pushop.outbookmarks:
861 862 part = bundler.newpart('pushkey')
862 863 part.addparam('namespace', enc('bookmarks'))
863 864 part.addparam('key', enc(book))
864 865 part.addparam('old', enc(old))
865 866 part.addparam('new', enc(new))
866 867 action = 'update'
867 868 if not old:
868 869 action = 'export'
869 870 elif not new:
870 871 action = 'delete'
871 872 part2book.append((part.id, book, action))
872 873 pushop.pkfailcb[part.id] = handlefailure
873 874
874 875 def handlereply(op):
875 876 ui = pushop.ui
876 877 for partid, book, action in part2book:
877 878 partrep = op.records.getreplies(partid)
878 879 results = partrep['pushkey']
879 880 assert len(results) <= 1
880 881 if not results:
881 882 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
882 883 else:
883 884 ret = int(results[0]['return'])
884 885 if ret:
885 886 ui.status(bookmsgmap[action][0] % book)
886 887 else:
887 888 ui.warn(bookmsgmap[action][1] % book)
888 889 if pushop.bkresult is not None:
889 890 pushop.bkresult = 1
890 891 return handlereply
891 892
892 893
893 894 def _pushbundle2(pushop):
894 895 """push data to the remote using bundle2
895 896
896 897 The only currently supported type of data is changegroup but this will
897 898 evolve in the future."""
898 899 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
899 900 pushback = (pushop.trmanager
900 901 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
901 902
902 903 # create reply capability
903 904 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
904 905 allowpushback=pushback))
905 906 bundler.newpart('replycaps', data=capsblob)
906 907 replyhandlers = []
907 908 for partgenname in b2partsgenorder:
908 909 partgen = b2partsgenmapping[partgenname]
909 910 ret = partgen(pushop, bundler)
910 911 if callable(ret):
911 912 replyhandlers.append(ret)
912 913 # do not push if nothing to push
913 914 if bundler.nbparts <= 1:
914 915 return
915 916 stream = util.chunkbuffer(bundler.getchunks())
916 917 try:
917 918 try:
918 919 reply = pushop.remote.unbundle(
919 920 stream, ['force'], pushop.remote.url())
920 921 except error.BundleValueError as exc:
921 922 raise error.Abort(_('missing support for %s') % exc)
922 923 try:
923 924 trgetter = None
924 925 if pushback:
925 926 trgetter = pushop.trmanager.transaction
926 927 op = bundle2.processbundle(pushop.repo, reply, trgetter)
927 928 except error.BundleValueError as exc:
928 929 raise error.Abort(_('missing support for %s') % exc)
929 930 except bundle2.AbortFromPart as exc:
930 931 pushop.ui.status(_('remote: %s\n') % exc)
931 932 if exc.hint is not None:
932 933 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
933 934 raise error.Abort(_('push failed on remote'))
934 935 except error.PushkeyFailed as exc:
935 936 partid = int(exc.partid)
936 937 if partid not in pushop.pkfailcb:
937 938 raise
938 939 pushop.pkfailcb[partid](pushop, exc)
939 940 for rephand in replyhandlers:
940 941 rephand(op)
941 942
942 943 def _pushchangeset(pushop):
943 944 """Make the actual push of changeset bundle to remote repo"""
944 945 if 'changesets' in pushop.stepsdone:
945 946 return
946 947 pushop.stepsdone.add('changesets')
947 948 if not _pushcheckoutgoing(pushop):
948 949 return
949 950 pushop.repo.prepushoutgoinghooks(pushop)
950 951 outgoing = pushop.outgoing
951 952 unbundle = pushop.remote.capable('unbundle')
952 953 # TODO: get bundlecaps from remote
953 954 bundlecaps = None
954 955 # create a changegroup from local
955 956 if pushop.revs is None and not (outgoing.excluded
956 957 or pushop.repo.changelog.filteredrevs):
957 958 # push everything,
958 959 # use the fast path, no race possible on push
959 960 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
960 961 cg = changegroup.getsubset(pushop.repo,
961 962 outgoing,
962 963 bundler,
963 964 'push',
964 965 fastpath=True)
965 966 else:
966 967 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
967 968 bundlecaps=bundlecaps)
968 969
969 970 # apply changegroup to remote
970 971 if unbundle:
971 972 # local repo finds heads on server, finds out what
972 973 # revs it must push. once revs transferred, if server
973 974 # finds it has different heads (someone else won
974 975 # commit/push race), server aborts.
975 976 if pushop.force:
976 977 remoteheads = ['force']
977 978 else:
978 979 remoteheads = pushop.remoteheads
979 980 # ssh: return remote's addchangegroup()
980 981 # http: return remote's addchangegroup() or 0 for error
981 982 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
982 983 pushop.repo.url())
983 984 else:
984 985 # we return an integer indicating remote head count
985 986 # change
986 987 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
987 988 pushop.repo.url())
988 989
989 990 def _pushsyncphase(pushop):
990 991 """synchronise phase information locally and remotely"""
991 992 cheads = pushop.commonheads
992 993 # even when we don't push, exchanging phase data is useful
993 994 remotephases = pushop.remote.listkeys('phases')
994 995 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
995 996 and remotephases # server supports phases
996 997 and pushop.cgresult is None # nothing was pushed
997 998 and remotephases.get('publishing', False)):
998 999 # When:
999 1000 # - this is a subrepo push
1000 1001 # - and remote support phase
1001 1002 # - and no changeset was pushed
1002 1003 # - and remote is publishing
1003 1004 # We may be in issue 3871 case!
1004 1005 # We drop the possible phase synchronisation done by
1005 1006 # courtesy to publish changesets possibly locally draft
1006 1007 # on the remote.
1007 1008 remotephases = {'publishing': 'True'}
1008 1009 if not remotephases: # old server or public only reply from non-publishing
1009 1010 _localphasemove(pushop, cheads)
1010 1011 # don't push any phase data as there is nothing to push
1011 1012 else:
1012 1013 ana = phases.analyzeremotephases(pushop.repo, cheads,
1013 1014 remotephases)
1014 1015 pheads, droots = ana
1015 1016 ### Apply remote phase on local
1016 1017 if remotephases.get('publishing', False):
1017 1018 _localphasemove(pushop, cheads)
1018 1019 else: # publish = False
1019 1020 _localphasemove(pushop, pheads)
1020 1021 _localphasemove(pushop, cheads, phases.draft)
1021 1022 ### Apply local phase on remote
1022 1023
1023 1024 if pushop.cgresult:
1024 1025 if 'phases' in pushop.stepsdone:
1025 1026 # phases already pushed though bundle2
1026 1027 return
1027 1028 outdated = pushop.outdatedphases
1028 1029 else:
1029 1030 outdated = pushop.fallbackoutdatedphases
1030 1031
1031 1032 pushop.stepsdone.add('phases')
1032 1033
1033 1034 # filter heads already turned public by the push
1034 1035 outdated = [c for c in outdated if c.node() not in pheads]
1035 1036 # fallback to independent pushkey command
1036 1037 for newremotehead in outdated:
1037 1038 r = pushop.remote.pushkey('phases',
1038 1039 newremotehead.hex(),
1039 1040 str(phases.draft),
1040 1041 str(phases.public))
1041 1042 if not r:
1042 1043 pushop.ui.warn(_('updating %s to public failed!\n')
1043 1044 % newremotehead)
1044 1045
1045 1046 def _localphasemove(pushop, nodes, phase=phases.public):
1046 1047 """move <nodes> to <phase> in the local source repo"""
1047 1048 if pushop.trmanager:
1048 1049 phases.advanceboundary(pushop.repo,
1049 1050 pushop.trmanager.transaction(),
1050 1051 phase,
1051 1052 nodes)
1052 1053 else:
1053 1054 # repo is not locked, do not change any phases!
1054 1055 # Informs the user that phases should have been moved when
1055 1056 # applicable.
1056 1057 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1057 1058 phasestr = phases.phasenames[phase]
1058 1059 if actualmoves:
1059 1060 pushop.ui.status(_('cannot lock source repo, skipping '
1060 1061 'local %s phase update\n') % phasestr)
1061 1062
1062 1063 def _pushobsolete(pushop):
1063 1064 """utility function to push obsolete markers to a remote"""
1064 1065 if 'obsmarkers' in pushop.stepsdone:
1065 1066 return
1066 1067 repo = pushop.repo
1067 1068 remote = pushop.remote
1068 1069 pushop.stepsdone.add('obsmarkers')
1069 1070 if pushop.outobsmarkers:
1070 1071 pushop.ui.debug('try to push obsolete markers to remote\n')
1071 1072 rslts = []
1072 1073 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1073 1074 for key in sorted(remotedata, reverse=True):
1074 1075 # reverse sort to ensure we end with dump0
1075 1076 data = remotedata[key]
1076 1077 rslts.append(remote.pushkey('obsolete', key, '', data))
1077 1078 if [r for r in rslts if not r]:
1078 1079 msg = _('failed to push some obsolete markers!\n')
1079 1080 repo.ui.warn(msg)
1080 1081
1081 1082 def _pushbookmark(pushop):
1082 1083 """Update bookmark position on remote"""
1083 1084 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1084 1085 return
1085 1086 pushop.stepsdone.add('bookmarks')
1086 1087 ui = pushop.ui
1087 1088 remote = pushop.remote
1088 1089
1089 1090 for b, old, new in pushop.outbookmarks:
1090 1091 action = 'update'
1091 1092 if not old:
1092 1093 action = 'export'
1093 1094 elif not new:
1094 1095 action = 'delete'
1095 1096 if remote.pushkey('bookmarks', b, old, new):
1096 1097 ui.status(bookmsgmap[action][0] % b)
1097 1098 else:
1098 1099 ui.warn(bookmsgmap[action][1] % b)
1099 1100 # discovery can have set the value form invalid entry
1100 1101 if pushop.bkresult is not None:
1101 1102 pushop.bkresult = 1
1102 1103
1103 1104 class pulloperation(object):
1104 1105 """A object that represent a single pull operation
1105 1106
1106 1107 It purpose is to carry pull related state and very common operation.
1107 1108
1108 1109 A new should be created at the beginning of each pull and discarded
1109 1110 afterward.
1110 1111 """
1111 1112
1112 1113 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1113 1114 remotebookmarks=None, streamclonerequested=None):
1114 1115 # repo we pull into
1115 1116 self.repo = repo
1116 1117 # repo we pull from
1117 1118 self.remote = remote
1118 1119 # revision we try to pull (None is "all")
1119 1120 self.heads = heads
1120 1121 # bookmark pulled explicitly
1121 1122 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1122 1123 for bookmark in bookmarks]
1123 1124 # do we force pull?
1124 1125 self.force = force
1125 1126 # whether a streaming clone was requested
1126 1127 self.streamclonerequested = streamclonerequested
1127 1128 # transaction manager
1128 1129 self.trmanager = None
1129 1130 # set of common changeset between local and remote before pull
1130 1131 self.common = None
1131 1132 # set of pulled head
1132 1133 self.rheads = None
1133 1134 # list of missing changeset to fetch remotely
1134 1135 self.fetch = None
1135 1136 # remote bookmarks data
1136 1137 self.remotebookmarks = remotebookmarks
1137 1138 # result of changegroup pulling (used as return code by pull)
1138 1139 self.cgresult = None
1139 1140 # list of step already done
1140 1141 self.stepsdone = set()
1141 1142 # Whether we attempted a clone from pre-generated bundles.
1142 1143 self.clonebundleattempted = False
1143 1144
1144 1145 @util.propertycache
1145 1146 def pulledsubset(self):
1146 1147 """heads of the set of changeset target by the pull"""
1147 1148 # compute target subset
1148 1149 if self.heads is None:
1149 1150 # We pulled every thing possible
1150 1151 # sync on everything common
1151 1152 c = set(self.common)
1152 1153 ret = list(self.common)
1153 1154 for n in self.rheads:
1154 1155 if n not in c:
1155 1156 ret.append(n)
1156 1157 return ret
1157 1158 else:
1158 1159 # We pulled a specific subset
1159 1160 # sync on this subset
1160 1161 return self.heads
1161 1162
1162 1163 @util.propertycache
1163 1164 def canusebundle2(self):
1164 1165 return not _forcebundle1(self)
1165 1166
1166 1167 @util.propertycache
1167 1168 def remotebundle2caps(self):
1168 1169 return bundle2.bundle2caps(self.remote)
1169 1170
1170 1171 def gettransaction(self):
1171 1172 # deprecated; talk to trmanager directly
1172 1173 return self.trmanager.transaction()
1173 1174
1174 1175 class transactionmanager(object):
1175 1176 """An object to manage the life cycle of a transaction
1176 1177
1177 1178 It creates the transaction on demand and calls the appropriate hooks when
1178 1179 closing the transaction."""
1179 1180 def __init__(self, repo, source, url):
1180 1181 self.repo = repo
1181 1182 self.source = source
1182 1183 self.url = url
1183 1184 self._tr = None
1184 1185
1185 1186 def transaction(self):
1186 1187 """Return an open transaction object, constructing if necessary"""
1187 1188 if not self._tr:
1188 1189 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1189 1190 self._tr = self.repo.transaction(trname)
1190 1191 self._tr.hookargs['source'] = self.source
1191 1192 self._tr.hookargs['url'] = self.url
1192 1193 return self._tr
1193 1194
1194 1195 def close(self):
1195 1196 """close transaction if created"""
1196 1197 if self._tr is not None:
1197 1198 self._tr.close()
1198 1199
1199 1200 def release(self):
1200 1201 """release transaction if created"""
1201 1202 if self._tr is not None:
1202 1203 self._tr.release()
1203 1204
1204 1205 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1205 1206 streamclonerequested=None):
1206 1207 """Fetch repository data from a remote.
1207 1208
1208 1209 This is the main function used to retrieve data from a remote repository.
1209 1210
1210 1211 ``repo`` is the local repository to clone into.
1211 1212 ``remote`` is a peer instance.
1212 1213 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1213 1214 default) means to pull everything from the remote.
1214 1215 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1215 1216 default, all remote bookmarks are pulled.
1216 1217 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1217 1218 initialization.
1218 1219 ``streamclonerequested`` is a boolean indicating whether a "streaming
1219 1220 clone" is requested. A "streaming clone" is essentially a raw file copy
1220 1221 of revlogs from the server. This only works when the local repository is
1221 1222 empty. The default value of ``None`` means to respect the server
1222 1223 configuration for preferring stream clones.
1223 1224
1224 1225 Returns the ``pulloperation`` created for this pull.
1225 1226 """
1226 1227 if opargs is None:
1227 1228 opargs = {}
1228 1229 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1229 1230 streamclonerequested=streamclonerequested, **opargs)
1230 1231 if pullop.remote.local():
1231 1232 missing = set(pullop.remote.requirements) - pullop.repo.supported
1232 1233 if missing:
1233 1234 msg = _("required features are not"
1234 1235 " supported in the destination:"
1235 1236 " %s") % (', '.join(sorted(missing)))
1236 1237 raise error.Abort(msg)
1237 1238
1238 1239 wlock = lock = None
1239 1240 try:
1240 1241 wlock = pullop.repo.wlock()
1241 1242 lock = pullop.repo.lock()
1242 1243 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1243 1244 streamclone.maybeperformlegacystreamclone(pullop)
1244 1245 # This should ideally be in _pullbundle2(). However, it needs to run
1245 1246 # before discovery to avoid extra work.
1246 1247 _maybeapplyclonebundle(pullop)
1247 1248 _pulldiscovery(pullop)
1248 1249 if pullop.canusebundle2:
1249 1250 _pullbundle2(pullop)
1250 1251 _pullchangeset(pullop)
1251 1252 _pullphase(pullop)
1252 1253 _pullbookmarks(pullop)
1253 1254 _pullobsolete(pullop)
1254 1255 pullop.trmanager.close()
1255 1256 finally:
1256 1257 lockmod.release(pullop.trmanager, lock, wlock)
1257 1258
1258 1259 return pullop
1259 1260
1260 1261 # list of steps to perform discovery before pull
1261 1262 pulldiscoveryorder = []
1262 1263
1263 1264 # Mapping between step name and function
1264 1265 #
1265 1266 # This exists to help extensions wrap steps if necessary
1266 1267 pulldiscoverymapping = {}
1267 1268
1268 1269 def pulldiscovery(stepname):
1269 1270 """decorator for function performing discovery before pull
1270 1271
1271 1272 The function is added to the step -> function mapping and appended to the
1272 1273 list of steps. Beware that decorated function will be added in order (this
1273 1274 may matter).
1274 1275
1275 1276 You can only use this decorator for a new step, if you want to wrap a step
1276 1277 from an extension, change the pulldiscovery dictionary directly."""
1277 1278 def dec(func):
1278 1279 assert stepname not in pulldiscoverymapping
1279 1280 pulldiscoverymapping[stepname] = func
1280 1281 pulldiscoveryorder.append(stepname)
1281 1282 return func
1282 1283 return dec
1283 1284
1284 1285 def _pulldiscovery(pullop):
1285 1286 """Run all discovery steps"""
1286 1287 for stepname in pulldiscoveryorder:
1287 1288 step = pulldiscoverymapping[stepname]
1288 1289 step(pullop)
1289 1290
1290 1291 @pulldiscovery('b1:bookmarks')
1291 1292 def _pullbookmarkbundle1(pullop):
1292 1293 """fetch bookmark data in bundle1 case
1293 1294
1294 1295 If not using bundle2, we have to fetch bookmarks before changeset
1295 1296 discovery to reduce the chance and impact of race conditions."""
1296 1297 if pullop.remotebookmarks is not None:
1297 1298 return
1298 1299 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1299 1300 # all known bundle2 servers now support listkeys, but lets be nice with
1300 1301 # new implementation.
1301 1302 return
1302 1303 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1303 1304
1304 1305
1305 1306 @pulldiscovery('changegroup')
1306 1307 def _pulldiscoverychangegroup(pullop):
1307 1308 """discovery phase for the pull
1308 1309
1309 1310 Current handle changeset discovery only, will change handle all discovery
1310 1311 at some point."""
1311 1312 tmp = discovery.findcommonincoming(pullop.repo,
1312 1313 pullop.remote,
1313 1314 heads=pullop.heads,
1314 1315 force=pullop.force)
1315 1316 common, fetch, rheads = tmp
1316 1317 nm = pullop.repo.unfiltered().changelog.nodemap
1317 1318 if fetch and rheads:
1318 1319 # If a remote heads in filtered locally, lets drop it from the unknown
1319 1320 # remote heads and put in back in common.
1320 1321 #
1321 1322 # This is a hackish solution to catch most of "common but locally
1322 1323 # hidden situation". We do not performs discovery on unfiltered
1323 1324 # repository because it end up doing a pathological amount of round
1324 1325 # trip for w huge amount of changeset we do not care about.
1325 1326 #
1326 1327 # If a set of such "common but filtered" changeset exist on the server
1327 1328 # but are not including a remote heads, we'll not be able to detect it,
1328 1329 scommon = set(common)
1329 1330 filteredrheads = []
1330 1331 for n in rheads:
1331 1332 if n in nm:
1332 1333 if n not in scommon:
1333 1334 common.append(n)
1334 1335 else:
1335 1336 filteredrheads.append(n)
1336 1337 if not filteredrheads:
1337 1338 fetch = []
1338 1339 rheads = filteredrheads
1339 1340 pullop.common = common
1340 1341 pullop.fetch = fetch
1341 1342 pullop.rheads = rheads
1342 1343
1343 1344 def _pullbundle2(pullop):
1344 1345 """pull data using bundle2
1345 1346
1346 1347 For now, the only supported data are changegroup."""
1347 1348 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1348 1349
1349 1350 # At the moment we don't do stream clones over bundle2. If that is
1350 1351 # implemented then here's where the check for that will go.
1351 1352 streaming = False
1352 1353
1353 1354 # pulling changegroup
1354 1355 pullop.stepsdone.add('changegroup')
1355 1356
1356 1357 kwargs['common'] = pullop.common
1357 1358 kwargs['heads'] = pullop.heads or pullop.rheads
1358 1359 kwargs['cg'] = pullop.fetch
1359 1360 if 'listkeys' in pullop.remotebundle2caps:
1360 1361 kwargs['listkeys'] = ['phases']
1361 1362 if pullop.remotebookmarks is None:
1362 1363 # make sure to always includes bookmark data when migrating
1363 1364 # `hg incoming --bundle` to using this function.
1364 1365 kwargs['listkeys'].append('bookmarks')
1365 1366
1366 1367 # If this is a full pull / clone and the server supports the clone bundles
1367 1368 # feature, tell the server whether we attempted a clone bundle. The
1368 1369 # presence of this flag indicates the client supports clone bundles. This
1369 1370 # will enable the server to treat clients that support clone bundles
1370 1371 # differently from those that don't.
1371 1372 if (pullop.remote.capable('clonebundles')
1372 1373 and pullop.heads is None and list(pullop.common) == [nullid]):
1373 1374 kwargs['cbattempted'] = pullop.clonebundleattempted
1374 1375
1375 1376 if streaming:
1376 1377 pullop.repo.ui.status(_('streaming all changes\n'))
1377 1378 elif not pullop.fetch:
1378 1379 pullop.repo.ui.status(_("no changes found\n"))
1379 1380 pullop.cgresult = 0
1380 1381 else:
1381 1382 if pullop.heads is None and list(pullop.common) == [nullid]:
1382 1383 pullop.repo.ui.status(_("requesting all changes\n"))
1383 1384 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1384 1385 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1385 1386 if obsolete.commonversion(remoteversions) is not None:
1386 1387 kwargs['obsmarkers'] = True
1387 1388 pullop.stepsdone.add('obsmarkers')
1388 1389 _pullbundle2extraprepare(pullop, kwargs)
1389 bundle = pullop.remote.getbundle('pull', **kwargs)
1390 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1390 1391 try:
1391 1392 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1392 1393 except bundle2.AbortFromPart as exc:
1393 1394 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1394 1395 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1395 1396 except error.BundleValueError as exc:
1396 1397 raise error.Abort(_('missing support for %s') % exc)
1397 1398
1398 1399 if pullop.fetch:
1399 1400 results = [cg['return'] for cg in op.records['changegroup']]
1400 1401 pullop.cgresult = changegroup.combineresults(results)
1401 1402
1402 1403 # processing phases change
1403 1404 for namespace, value in op.records['listkeys']:
1404 1405 if namespace == 'phases':
1405 1406 _pullapplyphases(pullop, value)
1406 1407
1407 1408 # processing bookmark update
1408 1409 for namespace, value in op.records['listkeys']:
1409 1410 if namespace == 'bookmarks':
1410 1411 pullop.remotebookmarks = value
1411 1412
1412 1413 # bookmark data were either already there or pulled in the bundle
1413 1414 if pullop.remotebookmarks is not None:
1414 1415 _pullbookmarks(pullop)
1415 1416
1416 1417 def _pullbundle2extraprepare(pullop, kwargs):
1417 1418 """hook function so that extensions can extend the getbundle call"""
1418 1419 pass
1419 1420
1420 1421 def _pullchangeset(pullop):
1421 1422 """pull changeset from unbundle into the local repo"""
1422 1423 # We delay the open of the transaction as late as possible so we
1423 1424 # don't open transaction for nothing or you break future useful
1424 1425 # rollback call
1425 1426 if 'changegroup' in pullop.stepsdone:
1426 1427 return
1427 1428 pullop.stepsdone.add('changegroup')
1428 1429 if not pullop.fetch:
1429 1430 pullop.repo.ui.status(_("no changes found\n"))
1430 1431 pullop.cgresult = 0
1431 1432 return
1432 1433 pullop.gettransaction()
1433 1434 if pullop.heads is None and list(pullop.common) == [nullid]:
1434 1435 pullop.repo.ui.status(_("requesting all changes\n"))
1435 1436 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1436 1437 # issue1320, avoid a race if remote changed after discovery
1437 1438 pullop.heads = pullop.rheads
1438 1439
1439 1440 if pullop.remote.capable('getbundle'):
1440 1441 # TODO: get bundlecaps from remote
1441 1442 cg = pullop.remote.getbundle('pull', common=pullop.common,
1442 1443 heads=pullop.heads or pullop.rheads)
1443 1444 elif pullop.heads is None:
1444 1445 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1445 1446 elif not pullop.remote.capable('changegroupsubset'):
1446 1447 raise error.Abort(_("partial pull cannot be done because "
1447 1448 "other repository doesn't support "
1448 1449 "changegroupsubset."))
1449 1450 else:
1450 1451 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1451 1452 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1452 1453
1453 1454 def _pullphase(pullop):
1454 1455 # Get remote phases data from remote
1455 1456 if 'phases' in pullop.stepsdone:
1456 1457 return
1457 1458 remotephases = pullop.remote.listkeys('phases')
1458 1459 _pullapplyphases(pullop, remotephases)
1459 1460
1460 1461 def _pullapplyphases(pullop, remotephases):
1461 1462 """apply phase movement from observed remote state"""
1462 1463 if 'phases' in pullop.stepsdone:
1463 1464 return
1464 1465 pullop.stepsdone.add('phases')
1465 1466 publishing = bool(remotephases.get('publishing', False))
1466 1467 if remotephases and not publishing:
1467 1468 # remote is new and non-publishing
1468 1469 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1469 1470 pullop.pulledsubset,
1470 1471 remotephases)
1471 1472 dheads = pullop.pulledsubset
1472 1473 else:
1473 1474 # Remote is old or publishing all common changesets
1474 1475 # should be seen as public
1475 1476 pheads = pullop.pulledsubset
1476 1477 dheads = []
1477 1478 unfi = pullop.repo.unfiltered()
1478 1479 phase = unfi._phasecache.phase
1479 1480 rev = unfi.changelog.nodemap.get
1480 1481 public = phases.public
1481 1482 draft = phases.draft
1482 1483
1483 1484 # exclude changesets already public locally and update the others
1484 1485 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1485 1486 if pheads:
1486 1487 tr = pullop.gettransaction()
1487 1488 phases.advanceboundary(pullop.repo, tr, public, pheads)
1488 1489
1489 1490 # exclude changesets already draft locally and update the others
1490 1491 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1491 1492 if dheads:
1492 1493 tr = pullop.gettransaction()
1493 1494 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1494 1495
1495 1496 def _pullbookmarks(pullop):
1496 1497 """process the remote bookmark information to update the local one"""
1497 1498 if 'bookmarks' in pullop.stepsdone:
1498 1499 return
1499 1500 pullop.stepsdone.add('bookmarks')
1500 1501 repo = pullop.repo
1501 1502 remotebookmarks = pullop.remotebookmarks
1502 1503 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1503 1504 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1504 1505 pullop.remote.url(),
1505 1506 pullop.gettransaction,
1506 1507 explicit=pullop.explicitbookmarks)
1507 1508
1508 1509 def _pullobsolete(pullop):
1509 1510 """utility function to pull obsolete markers from a remote
1510 1511
1511 1512 The `gettransaction` is function that return the pull transaction, creating
1512 1513 one if necessary. We return the transaction to inform the calling code that
1513 1514 a new transaction have been created (when applicable).
1514 1515
1515 1516 Exists mostly to allow overriding for experimentation purpose"""
1516 1517 if 'obsmarkers' in pullop.stepsdone:
1517 1518 return
1518 1519 pullop.stepsdone.add('obsmarkers')
1519 1520 tr = None
1520 1521 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1521 1522 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1522 1523 remoteobs = pullop.remote.listkeys('obsolete')
1523 1524 if 'dump0' in remoteobs:
1524 1525 tr = pullop.gettransaction()
1525 1526 markers = []
1526 1527 for key in sorted(remoteobs, reverse=True):
1527 1528 if key.startswith('dump'):
1528 1529 data = util.b85decode(remoteobs[key])
1529 1530 version, newmarks = obsolete._readmarkers(data)
1530 1531 markers += newmarks
1531 1532 if markers:
1532 1533 pullop.repo.obsstore.add(tr, markers)
1533 1534 pullop.repo.invalidatevolatilesets()
1534 1535 return tr
1535 1536
1536 1537 def caps20to10(repo):
1537 1538 """return a set with appropriate options to use bundle20 during getbundle"""
1538 1539 caps = {'HG20'}
1539 1540 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1540 1541 caps.add('bundle2=' + urlreq.quote(capsblob))
1541 1542 return caps
1542 1543
1543 1544 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1544 1545 getbundle2partsorder = []
1545 1546
1546 1547 # Mapping between step name and function
1547 1548 #
1548 1549 # This exists to help extensions wrap steps if necessary
1549 1550 getbundle2partsmapping = {}
1550 1551
1551 1552 def getbundle2partsgenerator(stepname, idx=None):
1552 1553 """decorator for function generating bundle2 part for getbundle
1553 1554
1554 1555 The function is added to the step -> function mapping and appended to the
1555 1556 list of steps. Beware that decorated functions will be added in order
1556 1557 (this may matter).
1557 1558
1558 1559 You can only use this decorator for new steps, if you want to wrap a step
1559 1560 from an extension, attack the getbundle2partsmapping dictionary directly."""
1560 1561 def dec(func):
1561 1562 assert stepname not in getbundle2partsmapping
1562 1563 getbundle2partsmapping[stepname] = func
1563 1564 if idx is None:
1564 1565 getbundle2partsorder.append(stepname)
1565 1566 else:
1566 1567 getbundle2partsorder.insert(idx, stepname)
1567 1568 return func
1568 1569 return dec
1569 1570
1570 1571 def bundle2requested(bundlecaps):
1571 1572 if bundlecaps is not None:
1572 1573 return any(cap.startswith('HG2') for cap in bundlecaps)
1573 1574 return False
1574 1575
1575 1576 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1576 1577 **kwargs):
1577 1578 """Return chunks constituting a bundle's raw data.
1578 1579
1579 1580 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1580 1581 passed.
1581 1582
1582 1583 Returns an iterator over raw chunks (of varying sizes).
1583 1584 """
1584 1585 usebundle2 = bundle2requested(bundlecaps)
1585 1586 # bundle10 case
1586 1587 if not usebundle2:
1587 1588 if bundlecaps and not kwargs.get('cg', True):
1588 1589 raise ValueError(_('request for bundle10 must include changegroup'))
1589 1590
1590 1591 if kwargs:
1591 1592 raise ValueError(_('unsupported getbundle arguments: %s')
1592 1593 % ', '.join(sorted(kwargs.keys())))
1593 1594 outgoing = _computeoutgoing(repo, heads, common)
1594 1595 bundler = changegroup.getbundler('01', repo, bundlecaps)
1595 1596 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1596 1597
1597 1598 # bundle20 case
1598 1599 b2caps = {}
1599 1600 for bcaps in bundlecaps:
1600 1601 if bcaps.startswith('bundle2='):
1601 1602 blob = urlreq.unquote(bcaps[len('bundle2='):])
1602 1603 b2caps.update(bundle2.decodecaps(blob))
1603 1604 bundler = bundle2.bundle20(repo.ui, b2caps)
1604 1605
1605 1606 kwargs['heads'] = heads
1606 1607 kwargs['common'] = common
1607 1608
1608 1609 for name in getbundle2partsorder:
1609 1610 func = getbundle2partsmapping[name]
1610 1611 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1611 1612 **kwargs)
1612 1613
1613 1614 return bundler.getchunks()
1614 1615
1615 1616 @getbundle2partsgenerator('changegroup')
1616 1617 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1617 1618 b2caps=None, heads=None, common=None, **kwargs):
1618 1619 """add a changegroup part to the requested bundle"""
1619 1620 cg = None
1620 1621 if kwargs.get('cg', True):
1621 1622 # build changegroup bundle here.
1622 1623 version = '01'
1623 1624 cgversions = b2caps.get('changegroup')
1624 1625 if cgversions: # 3.1 and 3.2 ship with an empty value
1625 1626 cgversions = [v for v in cgversions
1626 1627 if v in changegroup.supportedoutgoingversions(repo)]
1627 1628 if not cgversions:
1628 1629 raise ValueError(_('no common changegroup version'))
1629 1630 version = max(cgversions)
1630 1631 outgoing = _computeoutgoing(repo, heads, common)
1631 1632 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1632 1633 bundlecaps=bundlecaps,
1633 1634 version=version)
1634 1635
1635 1636 if cg:
1636 1637 part = bundler.newpart('changegroup', data=cg)
1637 1638 if cgversions:
1638 1639 part.addparam('version', version)
1639 1640 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1640 1641 if 'treemanifest' in repo.requirements:
1641 1642 part.addparam('treemanifest', '1')
1642 1643
1643 1644 @getbundle2partsgenerator('listkeys')
1644 1645 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1645 1646 b2caps=None, **kwargs):
1646 1647 """add parts containing listkeys namespaces to the requested bundle"""
1647 1648 listkeys = kwargs.get('listkeys', ())
1648 1649 for namespace in listkeys:
1649 1650 part = bundler.newpart('listkeys')
1650 1651 part.addparam('namespace', namespace)
1651 1652 keys = repo.listkeys(namespace).items()
1652 1653 part.data = pushkey.encodekeys(keys)
1653 1654
1654 1655 @getbundle2partsgenerator('obsmarkers')
1655 1656 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1656 1657 b2caps=None, heads=None, **kwargs):
1657 1658 """add an obsolescence markers part to the requested bundle"""
1658 1659 if kwargs.get('obsmarkers', False):
1659 1660 if heads is None:
1660 1661 heads = repo.heads()
1661 1662 subset = [c.node() for c in repo.set('::%ln', heads)]
1662 1663 markers = repo.obsstore.relevantmarkers(subset)
1663 1664 markers = sorted(markers)
1664 1665 bundle2.buildobsmarkerspart(bundler, markers)
1665 1666
1666 1667 @getbundle2partsgenerator('hgtagsfnodes')
1667 1668 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1668 1669 b2caps=None, heads=None, common=None,
1669 1670 **kwargs):
1670 1671 """Transfer the .hgtags filenodes mapping.
1671 1672
1672 1673 Only values for heads in this bundle will be transferred.
1673 1674
1674 1675 The part data consists of pairs of 20 byte changeset node and .hgtags
1675 1676 filenodes raw values.
1676 1677 """
1677 1678 # Don't send unless:
1678 1679 # - changeset are being exchanged,
1679 1680 # - the client supports it.
1680 1681 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1681 1682 return
1682 1683
1683 1684 outgoing = _computeoutgoing(repo, heads, common)
1684 1685 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1685 1686
1686 1687 def _getbookmarks(repo, **kwargs):
1687 1688 """Returns bookmark to node mapping.
1688 1689
1689 1690 This function is primarily used to generate `bookmarks` bundle2 part.
1690 1691 It is a separate function in order to make it easy to wrap it
1691 1692 in extensions. Passing `kwargs` to the function makes it easy to
1692 1693 add new parameters in extensions.
1693 1694 """
1694 1695
1695 1696 return dict(bookmod.listbinbookmarks(repo))
1696 1697
1697 1698 def check_heads(repo, their_heads, context):
1698 1699 """check if the heads of a repo have been modified
1699 1700
1700 1701 Used by peer for unbundling.
1701 1702 """
1702 1703 heads = repo.heads()
1703 1704 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1704 1705 if not (their_heads == ['force'] or their_heads == heads or
1705 1706 their_heads == ['hashed', heads_hash]):
1706 1707 # someone else committed/pushed/unbundled while we
1707 1708 # were transferring data
1708 1709 raise error.PushRaced('repository changed while %s - '
1709 1710 'please try again' % context)
1710 1711
1711 1712 def unbundle(repo, cg, heads, source, url):
1712 1713 """Apply a bundle to a repo.
1713 1714
1714 1715 this function makes sure the repo is locked during the application and have
1715 1716 mechanism to check that no push race occurred between the creation of the
1716 1717 bundle and its application.
1717 1718
1718 1719 If the push was raced as PushRaced exception is raised."""
1719 1720 r = 0
1720 1721 # need a transaction when processing a bundle2 stream
1721 1722 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1722 1723 lockandtr = [None, None, None]
1723 1724 recordout = None
1724 1725 # quick fix for output mismatch with bundle2 in 3.4
1725 1726 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1726 1727 False)
1727 1728 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1728 1729 captureoutput = True
1729 1730 try:
1730 1731 # note: outside bundle1, 'heads' is expected to be empty and this
1731 1732 # 'check_heads' call wil be a no-op
1732 1733 check_heads(repo, heads, 'uploading changes')
1733 1734 # push can proceed
1734 1735 if not isinstance(cg, bundle2.unbundle20):
1735 1736 # legacy case: bundle1 (changegroup 01)
1736 1737 with repo.lock():
1737 1738 r = cg.apply(repo, source, url)
1738 1739 else:
1739 1740 r = None
1740 1741 try:
1741 1742 def gettransaction():
1742 1743 if not lockandtr[2]:
1743 1744 lockandtr[0] = repo.wlock()
1744 1745 lockandtr[1] = repo.lock()
1745 1746 lockandtr[2] = repo.transaction(source)
1746 1747 lockandtr[2].hookargs['source'] = source
1747 1748 lockandtr[2].hookargs['url'] = url
1748 1749 lockandtr[2].hookargs['bundle2'] = '1'
1749 1750 return lockandtr[2]
1750 1751
1751 1752 # Do greedy locking by default until we're satisfied with lazy
1752 1753 # locking.
1753 1754 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1754 1755 gettransaction()
1755 1756
1756 1757 op = bundle2.bundleoperation(repo, gettransaction,
1757 1758 captureoutput=captureoutput)
1758 1759 try:
1759 1760 op = bundle2.processbundle(repo, cg, op=op)
1760 1761 finally:
1761 1762 r = op.reply
1762 1763 if captureoutput and r is not None:
1763 1764 repo.ui.pushbuffer(error=True, subproc=True)
1764 1765 def recordout(output):
1765 1766 r.newpart('output', data=output, mandatory=False)
1766 1767 if lockandtr[2] is not None:
1767 1768 lockandtr[2].close()
1768 1769 except BaseException as exc:
1769 1770 exc.duringunbundle2 = True
1770 1771 if captureoutput and r is not None:
1771 1772 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1772 1773 def recordout(output):
1773 1774 part = bundle2.bundlepart('output', data=output,
1774 1775 mandatory=False)
1775 1776 parts.append(part)
1776 1777 raise
1777 1778 finally:
1778 1779 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1779 1780 if recordout is not None:
1780 1781 recordout(repo.ui.popbuffer())
1781 1782 return r
1782 1783
1783 1784 def _maybeapplyclonebundle(pullop):
1784 1785 """Apply a clone bundle from a remote, if possible."""
1785 1786
1786 1787 repo = pullop.repo
1787 1788 remote = pullop.remote
1788 1789
1789 1790 if not repo.ui.configbool('ui', 'clonebundles', True):
1790 1791 return
1791 1792
1792 1793 # Only run if local repo is empty.
1793 1794 if len(repo):
1794 1795 return
1795 1796
1796 1797 if pullop.heads:
1797 1798 return
1798 1799
1799 1800 if not remote.capable('clonebundles'):
1800 1801 return
1801 1802
1802 1803 res = remote._call('clonebundles')
1803 1804
1804 1805 # If we call the wire protocol command, that's good enough to record the
1805 1806 # attempt.
1806 1807 pullop.clonebundleattempted = True
1807 1808
1808 1809 entries = parseclonebundlesmanifest(repo, res)
1809 1810 if not entries:
1810 1811 repo.ui.note(_('no clone bundles available on remote; '
1811 1812 'falling back to regular clone\n'))
1812 1813 return
1813 1814
1814 1815 entries = filterclonebundleentries(repo, entries)
1815 1816 if not entries:
1816 1817 # There is a thundering herd concern here. However, if a server
1817 1818 # operator doesn't advertise bundles appropriate for its clients,
1818 1819 # they deserve what's coming. Furthermore, from a client's
1819 1820 # perspective, no automatic fallback would mean not being able to
1820 1821 # clone!
1821 1822 repo.ui.warn(_('no compatible clone bundles available on server; '
1822 1823 'falling back to regular clone\n'))
1823 1824 repo.ui.warn(_('(you may want to report this to the server '
1824 1825 'operator)\n'))
1825 1826 return
1826 1827
1827 1828 entries = sortclonebundleentries(repo.ui, entries)
1828 1829
1829 1830 url = entries[0]['URL']
1830 1831 repo.ui.status(_('applying clone bundle from %s\n') % url)
1831 1832 if trypullbundlefromurl(repo.ui, repo, url):
1832 1833 repo.ui.status(_('finished applying clone bundle\n'))
1833 1834 # Bundle failed.
1834 1835 #
1835 1836 # We abort by default to avoid the thundering herd of
1836 1837 # clients flooding a server that was expecting expensive
1837 1838 # clone load to be offloaded.
1838 1839 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1839 1840 repo.ui.warn(_('falling back to normal clone\n'))
1840 1841 else:
1841 1842 raise error.Abort(_('error applying bundle'),
1842 1843 hint=_('if this error persists, consider contacting '
1843 1844 'the server operator or disable clone '
1844 1845 'bundles via '
1845 1846 '"--config ui.clonebundles=false"'))
1846 1847
1847 1848 def parseclonebundlesmanifest(repo, s):
1848 1849 """Parses the raw text of a clone bundles manifest.
1849 1850
1850 1851 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1851 1852 to the URL and other keys are the attributes for the entry.
1852 1853 """
1853 1854 m = []
1854 1855 for line in s.splitlines():
1855 1856 fields = line.split()
1856 1857 if not fields:
1857 1858 continue
1858 1859 attrs = {'URL': fields[0]}
1859 1860 for rawattr in fields[1:]:
1860 1861 key, value = rawattr.split('=', 1)
1861 1862 key = urlreq.unquote(key)
1862 1863 value = urlreq.unquote(value)
1863 1864 attrs[key] = value
1864 1865
1865 1866 # Parse BUNDLESPEC into components. This makes client-side
1866 1867 # preferences easier to specify since you can prefer a single
1867 1868 # component of the BUNDLESPEC.
1868 1869 if key == 'BUNDLESPEC':
1869 1870 try:
1870 1871 comp, version, params = parsebundlespec(repo, value,
1871 1872 externalnames=True)
1872 1873 attrs['COMPRESSION'] = comp
1873 1874 attrs['VERSION'] = version
1874 1875 except error.InvalidBundleSpecification:
1875 1876 pass
1876 1877 except error.UnsupportedBundleSpecification:
1877 1878 pass
1878 1879
1879 1880 m.append(attrs)
1880 1881
1881 1882 return m
1882 1883
1883 1884 def filterclonebundleentries(repo, entries):
1884 1885 """Remove incompatible clone bundle manifest entries.
1885 1886
1886 1887 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1887 1888 and returns a new list consisting of only the entries that this client
1888 1889 should be able to apply.
1889 1890
1890 1891 There is no guarantee we'll be able to apply all returned entries because
1891 1892 the metadata we use to filter on may be missing or wrong.
1892 1893 """
1893 1894 newentries = []
1894 1895 for entry in entries:
1895 1896 spec = entry.get('BUNDLESPEC')
1896 1897 if spec:
1897 1898 try:
1898 1899 parsebundlespec(repo, spec, strict=True)
1899 1900 except error.InvalidBundleSpecification as e:
1900 1901 repo.ui.debug(str(e) + '\n')
1901 1902 continue
1902 1903 except error.UnsupportedBundleSpecification as e:
1903 1904 repo.ui.debug('filtering %s because unsupported bundle '
1904 1905 'spec: %s\n' % (entry['URL'], str(e)))
1905 1906 continue
1906 1907
1907 1908 if 'REQUIRESNI' in entry and not sslutil.hassni:
1908 1909 repo.ui.debug('filtering %s because SNI not supported\n' %
1909 1910 entry['URL'])
1910 1911 continue
1911 1912
1912 1913 newentries.append(entry)
1913 1914
1914 1915 return newentries
1915 1916
1916 1917 class clonebundleentry(object):
1917 1918 """Represents an item in a clone bundles manifest.
1918 1919
1919 1920 This rich class is needed to support sorting since sorted() in Python 3
1920 1921 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1921 1922 won't work.
1922 1923 """
1923 1924
1924 1925 def __init__(self, value, prefers):
1925 1926 self.value = value
1926 1927 self.prefers = prefers
1927 1928
1928 1929 def _cmp(self, other):
1929 1930 for prefkey, prefvalue in self.prefers:
1930 1931 avalue = self.value.get(prefkey)
1931 1932 bvalue = other.value.get(prefkey)
1932 1933
1933 1934 # Special case for b missing attribute and a matches exactly.
1934 1935 if avalue is not None and bvalue is None and avalue == prefvalue:
1935 1936 return -1
1936 1937
1937 1938 # Special case for a missing attribute and b matches exactly.
1938 1939 if bvalue is not None and avalue is None and bvalue == prefvalue:
1939 1940 return 1
1940 1941
1941 1942 # We can't compare unless attribute present on both.
1942 1943 if avalue is None or bvalue is None:
1943 1944 continue
1944 1945
1945 1946 # Same values should fall back to next attribute.
1946 1947 if avalue == bvalue:
1947 1948 continue
1948 1949
1949 1950 # Exact matches come first.
1950 1951 if avalue == prefvalue:
1951 1952 return -1
1952 1953 if bvalue == prefvalue:
1953 1954 return 1
1954 1955
1955 1956 # Fall back to next attribute.
1956 1957 continue
1957 1958
1958 1959 # If we got here we couldn't sort by attributes and prefers. Fall
1959 1960 # back to index order.
1960 1961 return 0
1961 1962
1962 1963 def __lt__(self, other):
1963 1964 return self._cmp(other) < 0
1964 1965
1965 1966 def __gt__(self, other):
1966 1967 return self._cmp(other) > 0
1967 1968
1968 1969 def __eq__(self, other):
1969 1970 return self._cmp(other) == 0
1970 1971
1971 1972 def __le__(self, other):
1972 1973 return self._cmp(other) <= 0
1973 1974
1974 1975 def __ge__(self, other):
1975 1976 return self._cmp(other) >= 0
1976 1977
1977 1978 def __ne__(self, other):
1978 1979 return self._cmp(other) != 0
1979 1980
1980 1981 def sortclonebundleentries(ui, entries):
1981 1982 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1982 1983 if not prefers:
1983 1984 return list(entries)
1984 1985
1985 1986 prefers = [p.split('=', 1) for p in prefers]
1986 1987
1987 1988 items = sorted(clonebundleentry(v, prefers) for v in entries)
1988 1989 return [i.value for i in items]
1989 1990
1990 1991 def trypullbundlefromurl(ui, repo, url):
1991 1992 """Attempt to apply a bundle from a URL."""
1992 1993 with repo.lock(), repo.transaction('bundleurl') as tr:
1993 1994 try:
1994 1995 fh = urlmod.open(ui, url)
1995 1996 cg = readbundle(ui, fh, 'stream')
1996 1997
1997 1998 if isinstance(cg, bundle2.unbundle20):
1998 1999 bundle2.processbundle(repo, cg, lambda: tr)
1999 2000 elif isinstance(cg, streamclone.streamcloneapplier):
2000 2001 cg.apply(repo)
2001 2002 else:
2002 2003 cg.apply(repo, 'clonebundles', url)
2003 2004 return True
2004 2005 except urlerr.httperror as e:
2005 2006 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2006 2007 except urlerr.urlerror as e:
2007 2008 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2008 2009
2009 2010 return False
@@ -1,490 +1,491
1 1 # formatter.py - generic output formatting for mercurial
2 2 #
3 3 # Copyright 2012 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Generic output formatting for Mercurial
9 9
10 10 The formatter provides API to show data in various ways. The following
11 11 functions should be used in place of ui.write():
12 12
13 13 - fm.write() for unconditional output
14 14 - fm.condwrite() to show some extra data conditionally in plain output
15 15 - fm.context() to provide changectx to template output
16 16 - fm.data() to provide extra data to JSON or template output
17 17 - fm.plain() to show raw text that isn't provided to JSON or template output
18 18
19 19 To show structured data (e.g. date tuples, dicts, lists), apply fm.format*()
20 20 beforehand so the data is converted to the appropriate data type. Use
21 21 fm.isplain() if you need to convert or format data conditionally which isn't
22 22 supported by the formatter API.
23 23
24 24 To build nested structure (i.e. a list of dicts), use fm.nested().
25 25
26 26 See also https://www.mercurial-scm.org/wiki/GenericTemplatingPlan
27 27
28 28 fm.condwrite() vs 'if cond:':
29 29
30 30 In most cases, use fm.condwrite() so users can selectively show the data
31 31 in template output. If it's costly to build data, use plain 'if cond:' with
32 32 fm.write().
33 33
34 34 fm.nested() vs fm.formatdict() (or fm.formatlist()):
35 35
36 36 fm.nested() should be used to form a tree structure (a list of dicts of
37 37 lists of dicts...) which can be accessed through template keywords, e.g.
38 38 "{foo % "{bar % {...}} {baz % {...}}"}". On the other hand, fm.formatdict()
39 39 exports a dict-type object to template, which can be accessed by e.g.
40 40 "{get(foo, key)}" function.
41 41
42 42 Doctest helper:
43 43
44 44 >>> def show(fn, verbose=False, **opts):
45 45 ... import sys
46 46 ... from . import ui as uimod
47 47 ... ui = uimod.ui()
48 48 ... ui.fout = sys.stdout # redirect to doctest
49 49 ... ui.verbose = verbose
50 50 ... return fn(ui, ui.formatter(fn.__name__, opts))
51 51
52 52 Basic example:
53 53
54 54 >>> def files(ui, fm):
55 55 ... files = [('foo', 123, (0, 0)), ('bar', 456, (1, 0))]
56 56 ... for f in files:
57 57 ... fm.startitem()
58 58 ... fm.write('path', '%s', f[0])
59 59 ... fm.condwrite(ui.verbose, 'date', ' %s',
60 60 ... fm.formatdate(f[2], '%Y-%m-%d %H:%M:%S'))
61 61 ... fm.data(size=f[1])
62 62 ... fm.plain('\\n')
63 63 ... fm.end()
64 64 >>> show(files)
65 65 foo
66 66 bar
67 67 >>> show(files, verbose=True)
68 68 foo 1970-01-01 00:00:00
69 69 bar 1970-01-01 00:00:01
70 70 >>> show(files, template='json')
71 71 [
72 72 {
73 73 "date": [0, 0],
74 74 "path": "foo",
75 75 "size": 123
76 76 },
77 77 {
78 78 "date": [1, 0],
79 79 "path": "bar",
80 80 "size": 456
81 81 }
82 82 ]
83 83 >>> show(files, template='path: {path}\\ndate: {date|rfc3339date}\\n')
84 84 path: foo
85 85 date: 1970-01-01T00:00:00+00:00
86 86 path: bar
87 87 date: 1970-01-01T00:00:01+00:00
88 88
89 89 Nested example:
90 90
91 91 >>> def subrepos(ui, fm):
92 92 ... fm.startitem()
93 93 ... fm.write('repo', '[%s]\\n', 'baz')
94 94 ... files(ui, fm.nested('files'))
95 95 ... fm.end()
96 96 >>> show(subrepos)
97 97 [baz]
98 98 foo
99 99 bar
100 100 >>> show(subrepos, template='{repo}: {join(files % "{path}", ", ")}\\n')
101 101 baz: foo, bar
102 102 """
103 103
104 104 from __future__ import absolute_import
105 105
106 106 import collections
107 107 import contextlib
108 108 import itertools
109 109 import os
110 110
111 111 from .i18n import _
112 112 from .node import (
113 113 hex,
114 114 short,
115 115 )
116 116
117 117 from . import (
118 118 error,
119 119 pycompat,
120 120 templatefilters,
121 121 templatekw,
122 122 templater,
123 123 util,
124 124 )
125 125
126 126 pickle = util.pickle
127 127
128 128 class _nullconverter(object):
129 129 '''convert non-primitive data types to be processed by formatter'''
130 130 @staticmethod
131 131 def formatdate(date, fmt):
132 132 '''convert date tuple to appropriate format'''
133 133 return date
134 134 @staticmethod
135 135 def formatdict(data, key, value, fmt, sep):
136 136 '''convert dict or key-value pairs to appropriate dict format'''
137 137 # use plain dict instead of util.sortdict so that data can be
138 138 # serialized as a builtin dict in pickle output
139 139 return dict(data)
140 140 @staticmethod
141 141 def formatlist(data, name, fmt, sep):
142 142 '''convert iterable to appropriate list format'''
143 143 return list(data)
144 144
145 145 class baseformatter(object):
146 146 def __init__(self, ui, topic, opts, converter):
147 147 self._ui = ui
148 148 self._topic = topic
149 149 self._style = opts.get("style")
150 150 self._template = opts.get("template")
151 151 self._converter = converter
152 152 self._item = None
153 153 # function to convert node to string suitable for this output
154 154 self.hexfunc = hex
155 155 def __enter__(self):
156 156 return self
157 157 def __exit__(self, exctype, excvalue, traceback):
158 158 if exctype is None:
159 159 self.end()
160 160 def _showitem(self):
161 161 '''show a formatted item once all data is collected'''
162 162 pass
163 163 def startitem(self):
164 164 '''begin an item in the format list'''
165 165 if self._item is not None:
166 166 self._showitem()
167 167 self._item = {}
168 168 def formatdate(self, date, fmt='%a %b %d %H:%M:%S %Y %1%2'):
169 169 '''convert date tuple to appropriate format'''
170 170 return self._converter.formatdate(date, fmt)
171 171 def formatdict(self, data, key='key', value='value', fmt='%s=%s', sep=' '):
172 172 '''convert dict or key-value pairs to appropriate dict format'''
173 173 return self._converter.formatdict(data, key, value, fmt, sep)
174 174 def formatlist(self, data, name, fmt='%s', sep=' '):
175 175 '''convert iterable to appropriate list format'''
176 176 # name is mandatory argument for now, but it could be optional if
177 177 # we have default template keyword, e.g. {item}
178 178 return self._converter.formatlist(data, name, fmt, sep)
179 179 def context(self, **ctxs):
180 180 '''insert context objects to be used to render template keywords'''
181 181 pass
182 182 def data(self, **data):
183 183 '''insert data into item that's not shown in default output'''
184 184 data = pycompat.byteskwargs(data)
185 185 self._item.update(data)
186 186 def write(self, fields, deftext, *fielddata, **opts):
187 187 '''do default text output while assigning data to item'''
188 188 fieldkeys = fields.split()
189 189 assert len(fieldkeys) == len(fielddata)
190 190 self._item.update(zip(fieldkeys, fielddata))
191 191 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
192 192 '''do conditional write (primarily for plain formatter)'''
193 193 fieldkeys = fields.split()
194 194 assert len(fieldkeys) == len(fielddata)
195 195 self._item.update(zip(fieldkeys, fielddata))
196 196 def plain(self, text, **opts):
197 197 '''show raw text for non-templated mode'''
198 198 pass
199 199 def isplain(self):
200 200 '''check for plain formatter usage'''
201 201 return False
202 202 def nested(self, field):
203 203 '''sub formatter to store nested data in the specified field'''
204 204 self._item[field] = data = []
205 205 return _nestedformatter(self._ui, self._converter, data)
206 206 def end(self):
207 207 '''end output for the formatter'''
208 208 if self._item is not None:
209 209 self._showitem()
210 210
211 211 def nullformatter(ui, topic):
212 212 '''formatter that prints nothing'''
213 213 return baseformatter(ui, topic, opts={}, converter=_nullconverter)
214 214
215 215 class _nestedformatter(baseformatter):
216 216 '''build sub items and store them in the parent formatter'''
217 217 def __init__(self, ui, converter, data):
218 218 baseformatter.__init__(self, ui, topic='', opts={}, converter=converter)
219 219 self._data = data
220 220 def _showitem(self):
221 221 self._data.append(self._item)
222 222
223 223 def _iteritems(data):
224 224 '''iterate key-value pairs in stable order'''
225 225 if isinstance(data, dict):
226 226 return sorted(data.iteritems())
227 227 return data
228 228
229 229 class _plainconverter(object):
230 230 '''convert non-primitive data types to text'''
231 231 @staticmethod
232 232 def formatdate(date, fmt):
233 233 '''stringify date tuple in the given format'''
234 234 return util.datestr(date, fmt)
235 235 @staticmethod
236 236 def formatdict(data, key, value, fmt, sep):
237 237 '''stringify key-value pairs separated by sep'''
238 238 return sep.join(fmt % (k, v) for k, v in _iteritems(data))
239 239 @staticmethod
240 240 def formatlist(data, name, fmt, sep):
241 241 '''stringify iterable separated by sep'''
242 242 return sep.join(fmt % e for e in data)
243 243
244 244 class plainformatter(baseformatter):
245 245 '''the default text output scheme'''
246 246 def __init__(self, ui, out, topic, opts):
247 247 baseformatter.__init__(self, ui, topic, opts, _plainconverter)
248 248 if ui.debugflag:
249 249 self.hexfunc = hex
250 250 else:
251 251 self.hexfunc = short
252 252 if ui is out:
253 253 self._write = ui.write
254 254 else:
255 255 self._write = lambda s, **opts: out.write(s)
256 256 def startitem(self):
257 257 pass
258 258 def data(self, **data):
259 259 pass
260 260 def write(self, fields, deftext, *fielddata, **opts):
261 261 self._write(deftext % fielddata, **opts)
262 262 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
263 263 '''do conditional write'''
264 264 if cond:
265 265 self._write(deftext % fielddata, **opts)
266 266 def plain(self, text, **opts):
267 267 self._write(text, **opts)
268 268 def isplain(self):
269 269 return True
270 270 def nested(self, field):
271 271 # nested data will be directly written to ui
272 272 return self
273 273 def end(self):
274 274 pass
275 275
276 276 class debugformatter(baseformatter):
277 277 def __init__(self, ui, out, topic, opts):
278 278 baseformatter.__init__(self, ui, topic, opts, _nullconverter)
279 279 self._out = out
280 280 self._out.write("%s = [\n" % self._topic)
281 281 def _showitem(self):
282 282 self._out.write(" " + repr(self._item) + ",\n")
283 283 def end(self):
284 284 baseformatter.end(self)
285 285 self._out.write("]\n")
286 286
287 287 class pickleformatter(baseformatter):
288 288 def __init__(self, ui, out, topic, opts):
289 289 baseformatter.__init__(self, ui, topic, opts, _nullconverter)
290 290 self._out = out
291 291 self._data = []
292 292 def _showitem(self):
293 293 self._data.append(self._item)
294 294 def end(self):
295 295 baseformatter.end(self)
296 296 self._out.write(pickle.dumps(self._data))
297 297
298 298 class jsonformatter(baseformatter):
299 299 def __init__(self, ui, out, topic, opts):
300 300 baseformatter.__init__(self, ui, topic, opts, _nullconverter)
301 301 self._out = out
302 302 self._out.write("[")
303 303 self._first = True
304 304 def _showitem(self):
305 305 if self._first:
306 306 self._first = False
307 307 else:
308 308 self._out.write(",")
309 309
310 310 self._out.write("\n {\n")
311 311 first = True
312 312 for k, v in sorted(self._item.items()):
313 313 if first:
314 314 first = False
315 315 else:
316 316 self._out.write(",\n")
317 317 u = templatefilters.json(v, paranoid=False)
318 318 self._out.write(' "%s": %s' % (k, u))
319 319 self._out.write("\n }")
320 320 def end(self):
321 321 baseformatter.end(self)
322 322 self._out.write("\n]\n")
323 323
324 324 class _templateconverter(object):
325 325 '''convert non-primitive data types to be processed by templater'''
326 326 @staticmethod
327 327 def formatdate(date, fmt):
328 328 '''return date tuple'''
329 329 return date
330 330 @staticmethod
331 331 def formatdict(data, key, value, fmt, sep):
332 332 '''build object that can be evaluated as either plain string or dict'''
333 333 data = util.sortdict(_iteritems(data))
334 334 def f():
335 335 yield _plainconverter.formatdict(data, key, value, fmt, sep)
336 336 return templatekw.hybriddict(data, key=key, value=value, fmt=fmt,
337 337 gen=f())
338 338 @staticmethod
339 339 def formatlist(data, name, fmt, sep):
340 340 '''build object that can be evaluated as either plain string or list'''
341 341 data = list(data)
342 342 def f():
343 343 yield _plainconverter.formatlist(data, name, fmt, sep)
344 344 return templatekw.hybridlist(data, name=name, fmt=fmt, gen=f())
345 345
346 346 class templateformatter(baseformatter):
347 347 def __init__(self, ui, out, topic, opts):
348 348 baseformatter.__init__(self, ui, topic, opts, _templateconverter)
349 349 self._out = out
350 350 spec = lookuptemplate(ui, topic, opts.get('template', ''))
351 351 self._tref = spec.ref
352 352 self._t = loadtemplater(ui, spec, cache=templatekw.defaulttempl)
353 353 self._counter = itertools.count()
354 354 self._cache = {} # for templatekw/funcs to store reusable data
355 355 def context(self, **ctxs):
356 356 '''insert context objects to be used to render template keywords'''
357 357 assert all(k == 'ctx' for k in ctxs)
358 358 self._item.update(ctxs)
359 359 def _showitem(self):
360 360 # TODO: add support for filectx. probably each template keyword or
361 361 # function will have to declare dependent resources. e.g.
362 362 # @templatekeyword(..., requires=('ctx',))
363 363 props = {}
364 364 if 'ctx' in self._item:
365 365 props.update(templatekw.keywords)
366 366 props['index'] = next(self._counter)
367 367 # explicitly-defined fields precede templatekw
368 368 props.update(self._item)
369 369 if 'ctx' in self._item:
370 370 # but template resources must be always available
371 371 props['templ'] = self._t
372 372 props['repo'] = props['ctx'].repo()
373 373 props['revcache'] = {}
374 props = pycompat.strkwargs(props)
374 375 g = self._t(self._tref, ui=self._ui, cache=self._cache, **props)
375 376 self._out.write(templater.stringify(g))
376 377
377 378 templatespec = collections.namedtuple(r'templatespec',
378 379 r'ref tmpl mapfile')
379 380
380 381 def lookuptemplate(ui, topic, tmpl):
381 382 """Find the template matching the given -T/--template spec 'tmpl'
382 383
383 384 'tmpl' can be any of the following:
384 385
385 386 - a literal template (e.g. '{rev}')
386 387 - a map-file name or path (e.g. 'changelog')
387 388 - a reference to [templates] in config file
388 389 - a path to raw template file
389 390
390 391 A map file defines a stand-alone template environment. If a map file
391 392 selected, all templates defined in the file will be loaded, and the
392 393 template matching the given topic will be rendered. No aliases will be
393 394 loaded from user config.
394 395
395 396 If no map file selected, all templates in [templates] section will be
396 397 available as well as aliases in [templatealias].
397 398 """
398 399
399 400 # looks like a literal template?
400 401 if '{' in tmpl:
401 402 return templatespec('', tmpl, None)
402 403
403 404 # perhaps a stock style?
404 405 if not os.path.split(tmpl)[0]:
405 406 mapname = (templater.templatepath('map-cmdline.' + tmpl)
406 407 or templater.templatepath(tmpl))
407 408 if mapname and os.path.isfile(mapname):
408 409 return templatespec(topic, None, mapname)
409 410
410 411 # perhaps it's a reference to [templates]
411 412 if ui.config('templates', tmpl):
412 413 return templatespec(tmpl, None, None)
413 414
414 415 if tmpl == 'list':
415 416 ui.write(_("available styles: %s\n") % templater.stylelist())
416 417 raise error.Abort(_("specify a template"))
417 418
418 419 # perhaps it's a path to a map or a template
419 420 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
420 421 # is it a mapfile for a style?
421 422 if os.path.basename(tmpl).startswith("map-"):
422 423 return templatespec(topic, None, os.path.realpath(tmpl))
423 424 with util.posixfile(tmpl, 'rb') as f:
424 425 tmpl = f.read()
425 426 return templatespec('', tmpl, None)
426 427
427 428 # constant string?
428 429 return templatespec('', tmpl, None)
429 430
430 431 def loadtemplater(ui, spec, cache=None):
431 432 """Create a templater from either a literal template or loading from
432 433 a map file"""
433 434 assert not (spec.tmpl and spec.mapfile)
434 435 if spec.mapfile:
435 436 return templater.templater.frommapfile(spec.mapfile, cache=cache)
436 437 return maketemplater(ui, spec.tmpl, cache=cache)
437 438
438 439 def maketemplater(ui, tmpl, cache=None):
439 440 """Create a templater from a string template 'tmpl'"""
440 441 aliases = ui.configitems('templatealias')
441 442 t = templater.templater(cache=cache, aliases=aliases)
442 443 t.cache.update((k, templater.unquotestring(v))
443 444 for k, v in ui.configitems('templates'))
444 445 if tmpl:
445 446 t.cache[''] = tmpl
446 447 return t
447 448
448 449 def formatter(ui, out, topic, opts):
449 450 template = opts.get("template", "")
450 451 if template == "json":
451 452 return jsonformatter(ui, out, topic, opts)
452 453 elif template == "pickle":
453 454 return pickleformatter(ui, out, topic, opts)
454 455 elif template == "debug":
455 456 return debugformatter(ui, out, topic, opts)
456 457 elif template != "":
457 458 return templateformatter(ui, out, topic, opts)
458 459 # developer config: ui.formatdebug
459 460 elif ui.configbool('ui', 'formatdebug'):
460 461 return debugformatter(ui, out, topic, opts)
461 462 # deprecated config: ui.formatjson
462 463 elif ui.configbool('ui', 'formatjson'):
463 464 return jsonformatter(ui, out, topic, opts)
464 465 return plainformatter(ui, out, topic, opts)
465 466
466 467 @contextlib.contextmanager
467 468 def openformatter(ui, filename, topic, opts):
468 469 """Create a formatter that writes outputs to the specified file
469 470
470 471 Must be invoked using the 'with' statement.
471 472 """
472 473 with util.posixfile(filename, 'wb') as out:
473 474 with formatter(ui, out, topic, opts) as fm:
474 475 yield fm
475 476
476 477 @contextlib.contextmanager
477 478 def _neverending(fm):
478 479 yield fm
479 480
480 481 def maybereopen(fm, filename, opts):
481 482 """Create a formatter backed by file if filename specified, else return
482 483 the given formatter
483 484
484 485 Must be invoked using the 'with' statement. This will never call fm.end()
485 486 of the given formatter.
486 487 """
487 488 if filename:
488 489 return openformatter(fm._ui, filename, fm._topic, opts)
489 490 else:
490 491 return _neverending(fm)
General Comments 0
You need to be logged in to leave comments. Login now