##// END OF EJS Templates
pushrace: avoid crash on bare push when using concurrent push mode...
marmoute -
r33133:78fc540c default
parent child Browse files
Show More
@@ -1,2012 +1,2013 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 bookmarks as bookmod,
20 20 bundle2,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 obsolete,
26 26 phases,
27 27 pushkey,
28 28 pycompat,
29 29 scmutil,
30 30 sslutil,
31 31 streamclone,
32 32 url as urlmod,
33 33 util,
34 34 )
35 35
36 36 urlerr = util.urlerr
37 37 urlreq = util.urlreq
38 38
39 39 # Maps bundle version human names to changegroup versions.
40 40 _bundlespeccgversions = {'v1': '01',
41 41 'v2': '02',
42 42 'packed1': 's1',
43 43 'bundle2': '02', #legacy
44 44 }
45 45
46 46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48 48
49 49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 50 """Parse a bundle string specification into parts.
51 51
52 52 Bundle specifications denote a well-defined bundle/exchange format.
53 53 The content of a given specification should not change over time in
54 54 order to ensure that bundles produced by a newer version of Mercurial are
55 55 readable from an older version.
56 56
57 57 The string currently has the form:
58 58
59 59 <compression>-<type>[;<parameter0>[;<parameter1>]]
60 60
61 61 Where <compression> is one of the supported compression formats
62 62 and <type> is (currently) a version string. A ";" can follow the type and
63 63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 64 pairs.
65 65
66 66 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 67 it is optional.
68 68
69 69 If ``externalnames`` is False (the default), the human-centric names will
70 70 be converted to their internal representation.
71 71
72 72 Returns a 3-tuple of (compression, version, parameters). Compression will
73 73 be ``None`` if not in strict mode and a compression isn't defined.
74 74
75 75 An ``InvalidBundleSpecification`` is raised when the specification is
76 76 not syntactically well formed.
77 77
78 78 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 79 bundle type/version is not recognized.
80 80
81 81 Note: this function will likely eventually return a more complex data
82 82 structure, including bundle2 part information.
83 83 """
84 84 def parseparams(s):
85 85 if ';' not in s:
86 86 return s, {}
87 87
88 88 params = {}
89 89 version, paramstr = s.split(';', 1)
90 90
91 91 for p in paramstr.split(';'):
92 92 if '=' not in p:
93 93 raise error.InvalidBundleSpecification(
94 94 _('invalid bundle specification: '
95 95 'missing "=" in parameter: %s') % p)
96 96
97 97 key, value = p.split('=', 1)
98 98 key = urlreq.unquote(key)
99 99 value = urlreq.unquote(value)
100 100 params[key] = value
101 101
102 102 return version, params
103 103
104 104
105 105 if strict and '-' not in spec:
106 106 raise error.InvalidBundleSpecification(
107 107 _('invalid bundle specification; '
108 108 'must be prefixed with compression: %s') % spec)
109 109
110 110 if '-' in spec:
111 111 compression, version = spec.split('-', 1)
112 112
113 113 if compression not in util.compengines.supportedbundlenames:
114 114 raise error.UnsupportedBundleSpecification(
115 115 _('%s compression is not supported') % compression)
116 116
117 117 version, params = parseparams(version)
118 118
119 119 if version not in _bundlespeccgversions:
120 120 raise error.UnsupportedBundleSpecification(
121 121 _('%s is not a recognized bundle version') % version)
122 122 else:
123 123 # Value could be just the compression or just the version, in which
124 124 # case some defaults are assumed (but only when not in strict mode).
125 125 assert not strict
126 126
127 127 spec, params = parseparams(spec)
128 128
129 129 if spec in util.compengines.supportedbundlenames:
130 130 compression = spec
131 131 version = 'v1'
132 132 # Generaldelta repos require v2.
133 133 if 'generaldelta' in repo.requirements:
134 134 version = 'v2'
135 135 # Modern compression engines require v2.
136 136 if compression not in _bundlespecv1compengines:
137 137 version = 'v2'
138 138 elif spec in _bundlespeccgversions:
139 139 if spec == 'packed1':
140 140 compression = 'none'
141 141 else:
142 142 compression = 'bzip2'
143 143 version = spec
144 144 else:
145 145 raise error.UnsupportedBundleSpecification(
146 146 _('%s is not a recognized bundle specification') % spec)
147 147
148 148 # Bundle version 1 only supports a known set of compression engines.
149 149 if version == 'v1' and compression not in _bundlespecv1compengines:
150 150 raise error.UnsupportedBundleSpecification(
151 151 _('compression engine %s is not supported on v1 bundles') %
152 152 compression)
153 153
154 154 # The specification for packed1 can optionally declare the data formats
155 155 # required to apply it. If we see this metadata, compare against what the
156 156 # repo supports and error if the bundle isn't compatible.
157 157 if version == 'packed1' and 'requirements' in params:
158 158 requirements = set(params['requirements'].split(','))
159 159 missingreqs = requirements - repo.supportedformats
160 160 if missingreqs:
161 161 raise error.UnsupportedBundleSpecification(
162 162 _('missing support for repository features: %s') %
163 163 ', '.join(sorted(missingreqs)))
164 164
165 165 if not externalnames:
166 166 engine = util.compengines.forbundlename(compression)
167 167 compression = engine.bundletype()[1]
168 168 version = _bundlespeccgversions[version]
169 169 return compression, version, params
170 170
171 171 def readbundle(ui, fh, fname, vfs=None):
172 172 header = changegroup.readexactly(fh, 4)
173 173
174 174 alg = None
175 175 if not fname:
176 176 fname = "stream"
177 177 if not header.startswith('HG') and header.startswith('\0'):
178 178 fh = changegroup.headerlessfixup(fh, header)
179 179 header = "HG10"
180 180 alg = 'UN'
181 181 elif vfs:
182 182 fname = vfs.join(fname)
183 183
184 184 magic, version = header[0:2], header[2:4]
185 185
186 186 if magic != 'HG':
187 187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 188 if version == '10':
189 189 if alg is None:
190 190 alg = changegroup.readexactly(fh, 2)
191 191 return changegroup.cg1unpacker(fh, alg)
192 192 elif version.startswith('2'):
193 193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 194 elif version == 'S1':
195 195 return streamclone.streamcloneapplier(fh)
196 196 else:
197 197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198 198
199 199 def getbundlespec(ui, fh):
200 200 """Infer the bundlespec from a bundle file handle.
201 201
202 202 The input file handle is seeked and the original seek position is not
203 203 restored.
204 204 """
205 205 def speccompression(alg):
206 206 try:
207 207 return util.compengines.forbundletype(alg).bundletype()[0]
208 208 except KeyError:
209 209 return None
210 210
211 211 b = readbundle(ui, fh, None)
212 212 if isinstance(b, changegroup.cg1unpacker):
213 213 alg = b._type
214 214 if alg == '_truncatedBZ':
215 215 alg = 'BZ'
216 216 comp = speccompression(alg)
217 217 if not comp:
218 218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 219 return '%s-v1' % comp
220 220 elif isinstance(b, bundle2.unbundle20):
221 221 if 'Compression' in b.params:
222 222 comp = speccompression(b.params['Compression'])
223 223 if not comp:
224 224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 225 else:
226 226 comp = 'none'
227 227
228 228 version = None
229 229 for part in b.iterparts():
230 230 if part.type == 'changegroup':
231 231 version = part.params['version']
232 232 if version in ('01', '02'):
233 233 version = 'v2'
234 234 else:
235 235 raise error.Abort(_('changegroup version %s does not have '
236 236 'a known bundlespec') % version,
237 237 hint=_('try upgrading your Mercurial '
238 238 'client'))
239 239
240 240 if not version:
241 241 raise error.Abort(_('could not identify changegroup version in '
242 242 'bundle'))
243 243
244 244 return '%s-%s' % (comp, version)
245 245 elif isinstance(b, streamclone.streamcloneapplier):
246 246 requirements = streamclone.readbundle1header(fh)[2]
247 247 params = 'requirements=%s' % ','.join(sorted(requirements))
248 248 return 'none-packed1;%s' % urlreq.quote(params)
249 249 else:
250 250 raise error.Abort(_('unknown bundle type: %s') % b)
251 251
252 252 def _computeoutgoing(repo, heads, common):
253 253 """Computes which revs are outgoing given a set of common
254 254 and a set of heads.
255 255
256 256 This is a separate function so extensions can have access to
257 257 the logic.
258 258
259 259 Returns a discovery.outgoing object.
260 260 """
261 261 cl = repo.changelog
262 262 if common:
263 263 hasnode = cl.hasnode
264 264 common = [n for n in common if hasnode(n)]
265 265 else:
266 266 common = [nullid]
267 267 if not heads:
268 268 heads = cl.heads()
269 269 return discovery.outgoing(repo, common, heads)
270 270
271 271 def _forcebundle1(op):
272 272 """return true if a pull/push must use bundle1
273 273
274 274 This function is used to allow testing of the older bundle version"""
275 275 ui = op.repo.ui
276 276 forcebundle1 = False
277 277 # The goal is this config is to allow developer to choose the bundle
278 278 # version used during exchanged. This is especially handy during test.
279 279 # Value is a list of bundle version to be picked from, highest version
280 280 # should be used.
281 281 #
282 282 # developer config: devel.legacy.exchange
283 283 exchange = ui.configlist('devel', 'legacy.exchange')
284 284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 285 return forcebundle1 or not op.remote.capable('bundle2')
286 286
287 287 class pushoperation(object):
288 288 """A object that represent a single push operation
289 289
290 290 Its purpose is to carry push related state and very common operations.
291 291
292 292 A new pushoperation should be created at the beginning of each push and
293 293 discarded afterward.
294 294 """
295 295
296 296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 297 bookmarks=()):
298 298 # repo we push from
299 299 self.repo = repo
300 300 self.ui = repo.ui
301 301 # repo we push to
302 302 self.remote = remote
303 303 # force option provided
304 304 self.force = force
305 305 # revs to be pushed (None is "all")
306 306 self.revs = revs
307 307 # bookmark explicitly pushed
308 308 self.bookmarks = bookmarks
309 309 # allow push of new branch
310 310 self.newbranch = newbranch
311 311 # did a local lock get acquired?
312 312 self.locallocked = None
313 313 # step already performed
314 314 # (used to check what steps have been already performed through bundle2)
315 315 self.stepsdone = set()
316 316 # Integer version of the changegroup push result
317 317 # - None means nothing to push
318 318 # - 0 means HTTP error
319 319 # - 1 means we pushed and remote head count is unchanged *or*
320 320 # we have outgoing changesets but refused to push
321 321 # - other values as described by addchangegroup()
322 322 self.cgresult = None
323 323 # Boolean value for the bookmark push
324 324 self.bkresult = None
325 325 # discover.outgoing object (contains common and outgoing data)
326 326 self.outgoing = None
327 327 # all remote topological heads before the push
328 328 self.remoteheads = None
329 329 # Details of the remote branch pre and post push
330 330 #
331 331 # mapping: {'branch': ([remoteheads],
332 332 # [newheads],
333 333 # [unsyncedheads],
334 334 # [discardedheads])}
335 335 # - branch: the branch name
336 336 # - remoteheads: the list of remote heads known locally
337 337 # None if the branch is new
338 338 # - newheads: the new remote heads (known locally) with outgoing pushed
339 339 # - unsyncedheads: the list of remote heads unknown locally.
340 340 # - discardedheads: the list of remote heads made obsolete by the push
341 341 self.pushbranchmap = None
342 342 # testable as a boolean indicating if any nodes are missing locally.
343 343 self.incoming = None
344 344 # phases changes that must be pushed along side the changesets
345 345 self.outdatedphases = None
346 346 # phases changes that must be pushed if changeset push fails
347 347 self.fallbackoutdatedphases = None
348 348 # outgoing obsmarkers
349 349 self.outobsmarkers = set()
350 350 # outgoing bookmarks
351 351 self.outbookmarks = []
352 352 # transaction manager
353 353 self.trmanager = None
354 354 # map { pushkey partid -> callback handling failure}
355 355 # used to handle exception from mandatory pushkey part failure
356 356 self.pkfailcb = {}
357 357
358 358 @util.propertycache
359 359 def futureheads(self):
360 360 """future remote heads if the changeset push succeeds"""
361 361 return self.outgoing.missingheads
362 362
363 363 @util.propertycache
364 364 def fallbackheads(self):
365 365 """future remote heads if the changeset push fails"""
366 366 if self.revs is None:
367 367 # not target to push, all common are relevant
368 368 return self.outgoing.commonheads
369 369 unfi = self.repo.unfiltered()
370 370 # I want cheads = heads(::missingheads and ::commonheads)
371 371 # (missingheads is revs with secret changeset filtered out)
372 372 #
373 373 # This can be expressed as:
374 374 # cheads = ( (missingheads and ::commonheads)
375 375 # + (commonheads and ::missingheads))"
376 376 # )
377 377 #
378 378 # while trying to push we already computed the following:
379 379 # common = (::commonheads)
380 380 # missing = ((commonheads::missingheads) - commonheads)
381 381 #
382 382 # We can pick:
383 383 # * missingheads part of common (::commonheads)
384 384 common = self.outgoing.common
385 385 nm = self.repo.changelog.nodemap
386 386 cheads = [node for node in self.revs if nm[node] in common]
387 387 # and
388 388 # * commonheads parents on missing
389 389 revset = unfi.set('%ln and parents(roots(%ln))',
390 390 self.outgoing.commonheads,
391 391 self.outgoing.missing)
392 392 cheads.extend(c.node() for c in revset)
393 393 return cheads
394 394
395 395 @property
396 396 def commonheads(self):
397 397 """set of all common heads after changeset bundle push"""
398 398 if self.cgresult:
399 399 return self.futureheads
400 400 else:
401 401 return self.fallbackheads
402 402
403 403 # mapping of message used when pushing bookmark
404 404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
405 405 _('updating bookmark %s failed!\n')),
406 406 'export': (_("exporting bookmark %s\n"),
407 407 _('exporting bookmark %s failed!\n')),
408 408 'delete': (_("deleting remote bookmark %s\n"),
409 409 _('deleting remote bookmark %s failed!\n')),
410 410 }
411 411
412 412
413 413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
414 414 opargs=None):
415 415 '''Push outgoing changesets (limited by revs) from a local
416 416 repository to remote. Return an integer:
417 417 - None means nothing to push
418 418 - 0 means HTTP error
419 419 - 1 means we pushed and remote head count is unchanged *or*
420 420 we have outgoing changesets but refused to push
421 421 - other values as described by addchangegroup()
422 422 '''
423 423 if opargs is None:
424 424 opargs = {}
425 425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
426 426 **opargs)
427 427 if pushop.remote.local():
428 428 missing = (set(pushop.repo.requirements)
429 429 - pushop.remote.local().supported)
430 430 if missing:
431 431 msg = _("required features are not"
432 432 " supported in the destination:"
433 433 " %s") % (', '.join(sorted(missing)))
434 434 raise error.Abort(msg)
435 435
436 436 # there are two ways to push to remote repo:
437 437 #
438 438 # addchangegroup assumes local user can lock remote
439 439 # repo (local filesystem, old ssh servers).
440 440 #
441 441 # unbundle assumes local user cannot lock remote repo (new ssh
442 442 # servers, http servers).
443 443
444 444 if not pushop.remote.canpush():
445 445 raise error.Abort(_("destination does not support push"))
446 446 # get local lock as we might write phase data
447 447 localwlock = locallock = None
448 448 try:
449 449 # bundle2 push may receive a reply bundle touching bookmarks or other
450 450 # things requiring the wlock. Take it now to ensure proper ordering.
451 451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
452 452 if (not _forcebundle1(pushop)) and maypushback:
453 453 localwlock = pushop.repo.wlock()
454 454 locallock = pushop.repo.lock()
455 455 pushop.locallocked = True
456 456 except IOError as err:
457 457 pushop.locallocked = False
458 458 if err.errno != errno.EACCES:
459 459 raise
460 460 # source repo cannot be locked.
461 461 # We do not abort the push, but just disable the local phase
462 462 # synchronisation.
463 463 msg = 'cannot lock source repository: %s\n' % err
464 464 pushop.ui.debug(msg)
465 465 try:
466 466 if pushop.locallocked:
467 467 pushop.trmanager = transactionmanager(pushop.repo,
468 468 'push-response',
469 469 pushop.remote.url())
470 470 pushop.repo.checkpush(pushop)
471 471 lock = None
472 472 unbundle = pushop.remote.capable('unbundle')
473 473 if not unbundle:
474 474 lock = pushop.remote.lock()
475 475 try:
476 476 _pushdiscovery(pushop)
477 477 if not _forcebundle1(pushop):
478 478 _pushbundle2(pushop)
479 479 _pushchangeset(pushop)
480 480 _pushsyncphase(pushop)
481 481 _pushobsolete(pushop)
482 482 _pushbookmark(pushop)
483 483 finally:
484 484 if lock is not None:
485 485 lock.release()
486 486 if pushop.trmanager:
487 487 pushop.trmanager.close()
488 488 finally:
489 489 if pushop.trmanager:
490 490 pushop.trmanager.release()
491 491 if locallock is not None:
492 492 locallock.release()
493 493 if localwlock is not None:
494 494 localwlock.release()
495 495
496 496 return pushop
497 497
498 498 # list of steps to perform discovery before push
499 499 pushdiscoveryorder = []
500 500
501 501 # Mapping between step name and function
502 502 #
503 503 # This exists to help extensions wrap steps if necessary
504 504 pushdiscoverymapping = {}
505 505
506 506 def pushdiscovery(stepname):
507 507 """decorator for function performing discovery before push
508 508
509 509 The function is added to the step -> function mapping and appended to the
510 510 list of steps. Beware that decorated function will be added in order (this
511 511 may matter).
512 512
513 513 You can only use this decorator for a new step, if you want to wrap a step
514 514 from an extension, change the pushdiscovery dictionary directly."""
515 515 def dec(func):
516 516 assert stepname not in pushdiscoverymapping
517 517 pushdiscoverymapping[stepname] = func
518 518 pushdiscoveryorder.append(stepname)
519 519 return func
520 520 return dec
521 521
522 522 def _pushdiscovery(pushop):
523 523 """Run all discovery steps"""
524 524 for stepname in pushdiscoveryorder:
525 525 step = pushdiscoverymapping[stepname]
526 526 step(pushop)
527 527
528 528 @pushdiscovery('changeset')
529 529 def _pushdiscoverychangeset(pushop):
530 530 """discover the changeset that need to be pushed"""
531 531 fci = discovery.findcommonincoming
532 532 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
533 533 common, inc, remoteheads = commoninc
534 534 fco = discovery.findcommonoutgoing
535 535 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
536 536 commoninc=commoninc, force=pushop.force)
537 537 pushop.outgoing = outgoing
538 538 pushop.remoteheads = remoteheads
539 539 pushop.incoming = inc
540 540
541 541 @pushdiscovery('phase')
542 542 def _pushdiscoveryphase(pushop):
543 543 """discover the phase that needs to be pushed
544 544
545 545 (computed for both success and failure case for changesets push)"""
546 546 outgoing = pushop.outgoing
547 547 unfi = pushop.repo.unfiltered()
548 548 remotephases = pushop.remote.listkeys('phases')
549 549 publishing = remotephases.get('publishing', False)
550 550 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
551 551 and remotephases # server supports phases
552 552 and not pushop.outgoing.missing # no changesets to be pushed
553 553 and publishing):
554 554 # When:
555 555 # - this is a subrepo push
556 556 # - and remote support phase
557 557 # - and no changeset are to be pushed
558 558 # - and remote is publishing
559 559 # We may be in issue 3871 case!
560 560 # We drop the possible phase synchronisation done by
561 561 # courtesy to publish changesets possibly locally draft
562 562 # on the remote.
563 563 remotephases = {'publishing': 'True'}
564 564 ana = phases.analyzeremotephases(pushop.repo,
565 565 pushop.fallbackheads,
566 566 remotephases)
567 567 pheads, droots = ana
568 568 extracond = ''
569 569 if not publishing:
570 570 extracond = ' and public()'
571 571 revset = 'heads((%%ln::%%ln) %s)' % extracond
572 572 # Get the list of all revs draft on remote by public here.
573 573 # XXX Beware that revset break if droots is not strictly
574 574 # XXX root we may want to ensure it is but it is costly
575 575 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
576 576 if not outgoing.missing:
577 577 future = fallback
578 578 else:
579 579 # adds changeset we are going to push as draft
580 580 #
581 581 # should not be necessary for publishing server, but because of an
582 582 # issue fixed in xxxxx we have to do it anyway.
583 583 fdroots = list(unfi.set('roots(%ln + %ln::)',
584 584 outgoing.missing, droots))
585 585 fdroots = [f.node() for f in fdroots]
586 586 future = list(unfi.set(revset, fdroots, pushop.futureheads))
587 587 pushop.outdatedphases = future
588 588 pushop.fallbackoutdatedphases = fallback
589 589
590 590 @pushdiscovery('obsmarker')
591 591 def _pushdiscoveryobsmarkers(pushop):
592 592 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
593 593 and pushop.repo.obsstore
594 594 and 'obsolete' in pushop.remote.listkeys('namespaces')):
595 595 repo = pushop.repo
596 596 # very naive computation, that can be quite expensive on big repo.
597 597 # However: evolution is currently slow on them anyway.
598 598 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
599 599 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
600 600
601 601 @pushdiscovery('bookmarks')
602 602 def _pushdiscoverybookmarks(pushop):
603 603 ui = pushop.ui
604 604 repo = pushop.repo.unfiltered()
605 605 remote = pushop.remote
606 606 ui.debug("checking for updated bookmarks\n")
607 607 ancestors = ()
608 608 if pushop.revs:
609 609 revnums = map(repo.changelog.rev, pushop.revs)
610 610 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
611 611 remotebookmark = remote.listkeys('bookmarks')
612 612
613 613 explicit = set([repo._bookmarks.expandname(bookmark)
614 614 for bookmark in pushop.bookmarks])
615 615
616 616 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
617 617 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
618 618
619 619 def safehex(x):
620 620 if x is None:
621 621 return x
622 622 return hex(x)
623 623
624 624 def hexifycompbookmarks(bookmarks):
625 625 for b, scid, dcid in bookmarks:
626 626 yield b, safehex(scid), safehex(dcid)
627 627
628 628 comp = [hexifycompbookmarks(marks) for marks in comp]
629 629 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
630 630
631 631 for b, scid, dcid in advsrc:
632 632 if b in explicit:
633 633 explicit.remove(b)
634 634 if not ancestors or repo[scid].rev() in ancestors:
635 635 pushop.outbookmarks.append((b, dcid, scid))
636 636 # search added bookmark
637 637 for b, scid, dcid in addsrc:
638 638 if b in explicit:
639 639 explicit.remove(b)
640 640 pushop.outbookmarks.append((b, '', scid))
641 641 # search for overwritten bookmark
642 642 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
643 643 if b in explicit:
644 644 explicit.remove(b)
645 645 pushop.outbookmarks.append((b, dcid, scid))
646 646 # search for bookmark to delete
647 647 for b, scid, dcid in adddst:
648 648 if b in explicit:
649 649 explicit.remove(b)
650 650 # treat as "deleted locally"
651 651 pushop.outbookmarks.append((b, dcid, ''))
652 652 # identical bookmarks shouldn't get reported
653 653 for b, scid, dcid in same:
654 654 if b in explicit:
655 655 explicit.remove(b)
656 656
657 657 if explicit:
658 658 explicit = sorted(explicit)
659 659 # we should probably list all of them
660 660 ui.warn(_('bookmark %s does not exist on the local '
661 661 'or remote repository!\n') % explicit[0])
662 662 pushop.bkresult = 2
663 663
664 664 pushop.outbookmarks.sort()
665 665
666 666 def _pushcheckoutgoing(pushop):
667 667 outgoing = pushop.outgoing
668 668 unfi = pushop.repo.unfiltered()
669 669 if not outgoing.missing:
670 670 # nothing to push
671 671 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
672 672 return False
673 673 # something to push
674 674 if not pushop.force:
675 675 # if repo.obsstore == False --> no obsolete
676 676 # then, save the iteration
677 677 if unfi.obsstore:
678 678 # this message are here for 80 char limit reason
679 679 mso = _("push includes obsolete changeset: %s!")
680 680 mst = {"unstable": _("push includes unstable changeset: %s!"),
681 681 "bumped": _("push includes bumped changeset: %s!"),
682 682 "divergent": _("push includes divergent changeset: %s!")}
683 683 # If we are to push if there is at least one
684 684 # obsolete or unstable changeset in missing, at
685 685 # least one of the missinghead will be obsolete or
686 686 # unstable. So checking heads only is ok
687 687 for node in outgoing.missingheads:
688 688 ctx = unfi[node]
689 689 if ctx.obsolete():
690 690 raise error.Abort(mso % ctx)
691 691 elif ctx.troubled():
692 692 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
693 693
694 694 discovery.checkheads(pushop)
695 695 return True
696 696
697 697 # List of names of steps to perform for an outgoing bundle2, order matters.
698 698 b2partsgenorder = []
699 699
700 700 # Mapping between step name and function
701 701 #
702 702 # This exists to help extensions wrap steps if necessary
703 703 b2partsgenmapping = {}
704 704
705 705 def b2partsgenerator(stepname, idx=None):
706 706 """decorator for function generating bundle2 part
707 707
708 708 The function is added to the step -> function mapping and appended to the
709 709 list of steps. Beware that decorated functions will be added in order
710 710 (this may matter).
711 711
712 712 You can only use this decorator for new steps, if you want to wrap a step
713 713 from an extension, attack the b2partsgenmapping dictionary directly."""
714 714 def dec(func):
715 715 assert stepname not in b2partsgenmapping
716 716 b2partsgenmapping[stepname] = func
717 717 if idx is None:
718 718 b2partsgenorder.append(stepname)
719 719 else:
720 720 b2partsgenorder.insert(idx, stepname)
721 721 return func
722 722 return dec
723 723
724 724 def _pushb2ctxcheckheads(pushop, bundler):
725 725 """Generate race condition checking parts
726 726
727 727 Exists as an independent function to aid extensions
728 728 """
729 729 # * 'force' do not check for push race,
730 730 # * if we don't push anything, there are nothing to check.
731 731 if not pushop.force and pushop.outgoing.missingheads:
732 732 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
733 if not allowunrelated:
733 emptyremote = pushop.pushbranchmap is None
734 if not allowunrelated or emptyremote:
734 735 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
735 736 else:
736 737 affected = set()
737 738 for branch, heads in pushop.pushbranchmap.iteritems():
738 739 remoteheads, newheads, unsyncedheads, discardedheads = heads
739 740 if remoteheads is not None:
740 741 remote = set(remoteheads)
741 742 affected |= set(discardedheads) & remote
742 743 affected |= remote - set(newheads)
743 744 if affected:
744 745 data = iter(sorted(affected))
745 746 bundler.newpart('check:updated-heads', data=data)
746 747
747 748 @b2partsgenerator('changeset')
748 749 def _pushb2ctx(pushop, bundler):
749 750 """handle changegroup push through bundle2
750 751
751 752 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
752 753 """
753 754 if 'changesets' in pushop.stepsdone:
754 755 return
755 756 pushop.stepsdone.add('changesets')
756 757 # Send known heads to the server for race detection.
757 758 if not _pushcheckoutgoing(pushop):
758 759 return
759 760 pushop.repo.prepushoutgoinghooks(pushop)
760 761
761 762 _pushb2ctxcheckheads(pushop, bundler)
762 763
763 764 b2caps = bundle2.bundle2caps(pushop.remote)
764 765 version = '01'
765 766 cgversions = b2caps.get('changegroup')
766 767 if cgversions: # 3.1 and 3.2 ship with an empty value
767 768 cgversions = [v for v in cgversions
768 769 if v in changegroup.supportedoutgoingversions(
769 770 pushop.repo)]
770 771 if not cgversions:
771 772 raise ValueError(_('no common changegroup version'))
772 773 version = max(cgversions)
773 774 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
774 775 pushop.outgoing,
775 776 version=version)
776 777 cgpart = bundler.newpart('changegroup', data=cg)
777 778 if cgversions:
778 779 cgpart.addparam('version', version)
779 780 if 'treemanifest' in pushop.repo.requirements:
780 781 cgpart.addparam('treemanifest', '1')
781 782 def handlereply(op):
782 783 """extract addchangegroup returns from server reply"""
783 784 cgreplies = op.records.getreplies(cgpart.id)
784 785 assert len(cgreplies['changegroup']) == 1
785 786 pushop.cgresult = cgreplies['changegroup'][0]['return']
786 787 return handlereply
787 788
788 789 @b2partsgenerator('phase')
789 790 def _pushb2phases(pushop, bundler):
790 791 """handle phase push through bundle2"""
791 792 if 'phases' in pushop.stepsdone:
792 793 return
793 794 b2caps = bundle2.bundle2caps(pushop.remote)
794 795 if not 'pushkey' in b2caps:
795 796 return
796 797 pushop.stepsdone.add('phases')
797 798 part2node = []
798 799
799 800 def handlefailure(pushop, exc):
800 801 targetid = int(exc.partid)
801 802 for partid, node in part2node:
802 803 if partid == targetid:
803 804 raise error.Abort(_('updating %s to public failed') % node)
804 805
805 806 enc = pushkey.encode
806 807 for newremotehead in pushop.outdatedphases:
807 808 part = bundler.newpart('pushkey')
808 809 part.addparam('namespace', enc('phases'))
809 810 part.addparam('key', enc(newremotehead.hex()))
810 811 part.addparam('old', enc(str(phases.draft)))
811 812 part.addparam('new', enc(str(phases.public)))
812 813 part2node.append((part.id, newremotehead))
813 814 pushop.pkfailcb[part.id] = handlefailure
814 815
815 816 def handlereply(op):
816 817 for partid, node in part2node:
817 818 partrep = op.records.getreplies(partid)
818 819 results = partrep['pushkey']
819 820 assert len(results) <= 1
820 821 msg = None
821 822 if not results:
822 823 msg = _('server ignored update of %s to public!\n') % node
823 824 elif not int(results[0]['return']):
824 825 msg = _('updating %s to public failed!\n') % node
825 826 if msg is not None:
826 827 pushop.ui.warn(msg)
827 828 return handlereply
828 829
829 830 @b2partsgenerator('obsmarkers')
830 831 def _pushb2obsmarkers(pushop, bundler):
831 832 if 'obsmarkers' in pushop.stepsdone:
832 833 return
833 834 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
834 835 if obsolete.commonversion(remoteversions) is None:
835 836 return
836 837 pushop.stepsdone.add('obsmarkers')
837 838 if pushop.outobsmarkers:
838 839 markers = sorted(pushop.outobsmarkers)
839 840 bundle2.buildobsmarkerspart(bundler, markers)
840 841
841 842 @b2partsgenerator('bookmarks')
842 843 def _pushb2bookmarks(pushop, bundler):
843 844 """handle bookmark push through bundle2"""
844 845 if 'bookmarks' in pushop.stepsdone:
845 846 return
846 847 b2caps = bundle2.bundle2caps(pushop.remote)
847 848 if 'pushkey' not in b2caps:
848 849 return
849 850 pushop.stepsdone.add('bookmarks')
850 851 part2book = []
851 852 enc = pushkey.encode
852 853
853 854 def handlefailure(pushop, exc):
854 855 targetid = int(exc.partid)
855 856 for partid, book, action in part2book:
856 857 if partid == targetid:
857 858 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
858 859 # we should not be called for part we did not generated
859 860 assert False
860 861
861 862 for book, old, new in pushop.outbookmarks:
862 863 part = bundler.newpart('pushkey')
863 864 part.addparam('namespace', enc('bookmarks'))
864 865 part.addparam('key', enc(book))
865 866 part.addparam('old', enc(old))
866 867 part.addparam('new', enc(new))
867 868 action = 'update'
868 869 if not old:
869 870 action = 'export'
870 871 elif not new:
871 872 action = 'delete'
872 873 part2book.append((part.id, book, action))
873 874 pushop.pkfailcb[part.id] = handlefailure
874 875
875 876 def handlereply(op):
876 877 ui = pushop.ui
877 878 for partid, book, action in part2book:
878 879 partrep = op.records.getreplies(partid)
879 880 results = partrep['pushkey']
880 881 assert len(results) <= 1
881 882 if not results:
882 883 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
883 884 else:
884 885 ret = int(results[0]['return'])
885 886 if ret:
886 887 ui.status(bookmsgmap[action][0] % book)
887 888 else:
888 889 ui.warn(bookmsgmap[action][1] % book)
889 890 if pushop.bkresult is not None:
890 891 pushop.bkresult = 1
891 892 return handlereply
892 893
893 894
894 895 def _pushbundle2(pushop):
895 896 """push data to the remote using bundle2
896 897
897 898 The only currently supported type of data is changegroup but this will
898 899 evolve in the future."""
899 900 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
900 901 pushback = (pushop.trmanager
901 902 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
902 903
903 904 # create reply capability
904 905 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
905 906 allowpushback=pushback))
906 907 bundler.newpart('replycaps', data=capsblob)
907 908 replyhandlers = []
908 909 for partgenname in b2partsgenorder:
909 910 partgen = b2partsgenmapping[partgenname]
910 911 ret = partgen(pushop, bundler)
911 912 if callable(ret):
912 913 replyhandlers.append(ret)
913 914 # do not push if nothing to push
914 915 if bundler.nbparts <= 1:
915 916 return
916 917 stream = util.chunkbuffer(bundler.getchunks())
917 918 try:
918 919 try:
919 920 reply = pushop.remote.unbundle(
920 921 stream, ['force'], pushop.remote.url())
921 922 except error.BundleValueError as exc:
922 923 raise error.Abort(_('missing support for %s') % exc)
923 924 try:
924 925 trgetter = None
925 926 if pushback:
926 927 trgetter = pushop.trmanager.transaction
927 928 op = bundle2.processbundle(pushop.repo, reply, trgetter)
928 929 except error.BundleValueError as exc:
929 930 raise error.Abort(_('missing support for %s') % exc)
930 931 except bundle2.AbortFromPart as exc:
931 932 pushop.ui.status(_('remote: %s\n') % exc)
932 933 if exc.hint is not None:
933 934 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
934 935 raise error.Abort(_('push failed on remote'))
935 936 except error.PushkeyFailed as exc:
936 937 partid = int(exc.partid)
937 938 if partid not in pushop.pkfailcb:
938 939 raise
939 940 pushop.pkfailcb[partid](pushop, exc)
940 941 for rephand in replyhandlers:
941 942 rephand(op)
942 943
943 944 def _pushchangeset(pushop):
944 945 """Make the actual push of changeset bundle to remote repo"""
945 946 if 'changesets' in pushop.stepsdone:
946 947 return
947 948 pushop.stepsdone.add('changesets')
948 949 if not _pushcheckoutgoing(pushop):
949 950 return
950 951 pushop.repo.prepushoutgoinghooks(pushop)
951 952 outgoing = pushop.outgoing
952 953 unbundle = pushop.remote.capable('unbundle')
953 954 # TODO: get bundlecaps from remote
954 955 bundlecaps = None
955 956 # create a changegroup from local
956 957 if pushop.revs is None and not (outgoing.excluded
957 958 or pushop.repo.changelog.filteredrevs):
958 959 # push everything,
959 960 # use the fast path, no race possible on push
960 961 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
961 962 cg = changegroup.getsubset(pushop.repo,
962 963 outgoing,
963 964 bundler,
964 965 'push',
965 966 fastpath=True)
966 967 else:
967 968 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
968 969 bundlecaps=bundlecaps)
969 970
970 971 # apply changegroup to remote
971 972 if unbundle:
972 973 # local repo finds heads on server, finds out what
973 974 # revs it must push. once revs transferred, if server
974 975 # finds it has different heads (someone else won
975 976 # commit/push race), server aborts.
976 977 if pushop.force:
977 978 remoteheads = ['force']
978 979 else:
979 980 remoteheads = pushop.remoteheads
980 981 # ssh: return remote's addchangegroup()
981 982 # http: return remote's addchangegroup() or 0 for error
982 983 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
983 984 pushop.repo.url())
984 985 else:
985 986 # we return an integer indicating remote head count
986 987 # change
987 988 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
988 989 pushop.repo.url())
989 990
990 991 def _pushsyncphase(pushop):
991 992 """synchronise phase information locally and remotely"""
992 993 cheads = pushop.commonheads
993 994 # even when we don't push, exchanging phase data is useful
994 995 remotephases = pushop.remote.listkeys('phases')
995 996 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
996 997 and remotephases # server supports phases
997 998 and pushop.cgresult is None # nothing was pushed
998 999 and remotephases.get('publishing', False)):
999 1000 # When:
1000 1001 # - this is a subrepo push
1001 1002 # - and remote support phase
1002 1003 # - and no changeset was pushed
1003 1004 # - and remote is publishing
1004 1005 # We may be in issue 3871 case!
1005 1006 # We drop the possible phase synchronisation done by
1006 1007 # courtesy to publish changesets possibly locally draft
1007 1008 # on the remote.
1008 1009 remotephases = {'publishing': 'True'}
1009 1010 if not remotephases: # old server or public only reply from non-publishing
1010 1011 _localphasemove(pushop, cheads)
1011 1012 # don't push any phase data as there is nothing to push
1012 1013 else:
1013 1014 ana = phases.analyzeremotephases(pushop.repo, cheads,
1014 1015 remotephases)
1015 1016 pheads, droots = ana
1016 1017 ### Apply remote phase on local
1017 1018 if remotephases.get('publishing', False):
1018 1019 _localphasemove(pushop, cheads)
1019 1020 else: # publish = False
1020 1021 _localphasemove(pushop, pheads)
1021 1022 _localphasemove(pushop, cheads, phases.draft)
1022 1023 ### Apply local phase on remote
1023 1024
1024 1025 if pushop.cgresult:
1025 1026 if 'phases' in pushop.stepsdone:
1026 1027 # phases already pushed though bundle2
1027 1028 return
1028 1029 outdated = pushop.outdatedphases
1029 1030 else:
1030 1031 outdated = pushop.fallbackoutdatedphases
1031 1032
1032 1033 pushop.stepsdone.add('phases')
1033 1034
1034 1035 # filter heads already turned public by the push
1035 1036 outdated = [c for c in outdated if c.node() not in pheads]
1036 1037 # fallback to independent pushkey command
1037 1038 for newremotehead in outdated:
1038 1039 r = pushop.remote.pushkey('phases',
1039 1040 newremotehead.hex(),
1040 1041 str(phases.draft),
1041 1042 str(phases.public))
1042 1043 if not r:
1043 1044 pushop.ui.warn(_('updating %s to public failed!\n')
1044 1045 % newremotehead)
1045 1046
1046 1047 def _localphasemove(pushop, nodes, phase=phases.public):
1047 1048 """move <nodes> to <phase> in the local source repo"""
1048 1049 if pushop.trmanager:
1049 1050 phases.advanceboundary(pushop.repo,
1050 1051 pushop.trmanager.transaction(),
1051 1052 phase,
1052 1053 nodes)
1053 1054 else:
1054 1055 # repo is not locked, do not change any phases!
1055 1056 # Informs the user that phases should have been moved when
1056 1057 # applicable.
1057 1058 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1058 1059 phasestr = phases.phasenames[phase]
1059 1060 if actualmoves:
1060 1061 pushop.ui.status(_('cannot lock source repo, skipping '
1061 1062 'local %s phase update\n') % phasestr)
1062 1063
1063 1064 def _pushobsolete(pushop):
1064 1065 """utility function to push obsolete markers to a remote"""
1065 1066 if 'obsmarkers' in pushop.stepsdone:
1066 1067 return
1067 1068 repo = pushop.repo
1068 1069 remote = pushop.remote
1069 1070 pushop.stepsdone.add('obsmarkers')
1070 1071 if pushop.outobsmarkers:
1071 1072 pushop.ui.debug('try to push obsolete markers to remote\n')
1072 1073 rslts = []
1073 1074 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1074 1075 for key in sorted(remotedata, reverse=True):
1075 1076 # reverse sort to ensure we end with dump0
1076 1077 data = remotedata[key]
1077 1078 rslts.append(remote.pushkey('obsolete', key, '', data))
1078 1079 if [r for r in rslts if not r]:
1079 1080 msg = _('failed to push some obsolete markers!\n')
1080 1081 repo.ui.warn(msg)
1081 1082
1082 1083 def _pushbookmark(pushop):
1083 1084 """Update bookmark position on remote"""
1084 1085 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1085 1086 return
1086 1087 pushop.stepsdone.add('bookmarks')
1087 1088 ui = pushop.ui
1088 1089 remote = pushop.remote
1089 1090
1090 1091 for b, old, new in pushop.outbookmarks:
1091 1092 action = 'update'
1092 1093 if not old:
1093 1094 action = 'export'
1094 1095 elif not new:
1095 1096 action = 'delete'
1096 1097 if remote.pushkey('bookmarks', b, old, new):
1097 1098 ui.status(bookmsgmap[action][0] % b)
1098 1099 else:
1099 1100 ui.warn(bookmsgmap[action][1] % b)
1100 1101 # discovery can have set the value form invalid entry
1101 1102 if pushop.bkresult is not None:
1102 1103 pushop.bkresult = 1
1103 1104
1104 1105 class pulloperation(object):
1105 1106 """A object that represent a single pull operation
1106 1107
1107 1108 It purpose is to carry pull related state and very common operation.
1108 1109
1109 1110 A new should be created at the beginning of each pull and discarded
1110 1111 afterward.
1111 1112 """
1112 1113
1113 1114 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1114 1115 remotebookmarks=None, streamclonerequested=None):
1115 1116 # repo we pull into
1116 1117 self.repo = repo
1117 1118 # repo we pull from
1118 1119 self.remote = remote
1119 1120 # revision we try to pull (None is "all")
1120 1121 self.heads = heads
1121 1122 # bookmark pulled explicitly
1122 1123 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1123 1124 for bookmark in bookmarks]
1124 1125 # do we force pull?
1125 1126 self.force = force
1126 1127 # whether a streaming clone was requested
1127 1128 self.streamclonerequested = streamclonerequested
1128 1129 # transaction manager
1129 1130 self.trmanager = None
1130 1131 # set of common changeset between local and remote before pull
1131 1132 self.common = None
1132 1133 # set of pulled head
1133 1134 self.rheads = None
1134 1135 # list of missing changeset to fetch remotely
1135 1136 self.fetch = None
1136 1137 # remote bookmarks data
1137 1138 self.remotebookmarks = remotebookmarks
1138 1139 # result of changegroup pulling (used as return code by pull)
1139 1140 self.cgresult = None
1140 1141 # list of step already done
1141 1142 self.stepsdone = set()
1142 1143 # Whether we attempted a clone from pre-generated bundles.
1143 1144 self.clonebundleattempted = False
1144 1145
1145 1146 @util.propertycache
1146 1147 def pulledsubset(self):
1147 1148 """heads of the set of changeset target by the pull"""
1148 1149 # compute target subset
1149 1150 if self.heads is None:
1150 1151 # We pulled every thing possible
1151 1152 # sync on everything common
1152 1153 c = set(self.common)
1153 1154 ret = list(self.common)
1154 1155 for n in self.rheads:
1155 1156 if n not in c:
1156 1157 ret.append(n)
1157 1158 return ret
1158 1159 else:
1159 1160 # We pulled a specific subset
1160 1161 # sync on this subset
1161 1162 return self.heads
1162 1163
1163 1164 @util.propertycache
1164 1165 def canusebundle2(self):
1165 1166 return not _forcebundle1(self)
1166 1167
1167 1168 @util.propertycache
1168 1169 def remotebundle2caps(self):
1169 1170 return bundle2.bundle2caps(self.remote)
1170 1171
1171 1172 def gettransaction(self):
1172 1173 # deprecated; talk to trmanager directly
1173 1174 return self.trmanager.transaction()
1174 1175
1175 1176 class transactionmanager(object):
1176 1177 """An object to manage the life cycle of a transaction
1177 1178
1178 1179 It creates the transaction on demand and calls the appropriate hooks when
1179 1180 closing the transaction."""
1180 1181 def __init__(self, repo, source, url):
1181 1182 self.repo = repo
1182 1183 self.source = source
1183 1184 self.url = url
1184 1185 self._tr = None
1185 1186
1186 1187 def transaction(self):
1187 1188 """Return an open transaction object, constructing if necessary"""
1188 1189 if not self._tr:
1189 1190 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1190 1191 self._tr = self.repo.transaction(trname)
1191 1192 self._tr.hookargs['source'] = self.source
1192 1193 self._tr.hookargs['url'] = self.url
1193 1194 return self._tr
1194 1195
1195 1196 def close(self):
1196 1197 """close transaction if created"""
1197 1198 if self._tr is not None:
1198 1199 self._tr.close()
1199 1200
1200 1201 def release(self):
1201 1202 """release transaction if created"""
1202 1203 if self._tr is not None:
1203 1204 self._tr.release()
1204 1205
1205 1206 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1206 1207 streamclonerequested=None):
1207 1208 """Fetch repository data from a remote.
1208 1209
1209 1210 This is the main function used to retrieve data from a remote repository.
1210 1211
1211 1212 ``repo`` is the local repository to clone into.
1212 1213 ``remote`` is a peer instance.
1213 1214 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1214 1215 default) means to pull everything from the remote.
1215 1216 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1216 1217 default, all remote bookmarks are pulled.
1217 1218 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1218 1219 initialization.
1219 1220 ``streamclonerequested`` is a boolean indicating whether a "streaming
1220 1221 clone" is requested. A "streaming clone" is essentially a raw file copy
1221 1222 of revlogs from the server. This only works when the local repository is
1222 1223 empty. The default value of ``None`` means to respect the server
1223 1224 configuration for preferring stream clones.
1224 1225
1225 1226 Returns the ``pulloperation`` created for this pull.
1226 1227 """
1227 1228 if opargs is None:
1228 1229 opargs = {}
1229 1230 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1230 1231 streamclonerequested=streamclonerequested, **opargs)
1231 1232 if pullop.remote.local():
1232 1233 missing = set(pullop.remote.requirements) - pullop.repo.supported
1233 1234 if missing:
1234 1235 msg = _("required features are not"
1235 1236 " supported in the destination:"
1236 1237 " %s") % (', '.join(sorted(missing)))
1237 1238 raise error.Abort(msg)
1238 1239
1239 1240 wlock = lock = None
1240 1241 try:
1241 1242 wlock = pullop.repo.wlock()
1242 1243 lock = pullop.repo.lock()
1243 1244 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1244 1245 streamclone.maybeperformlegacystreamclone(pullop)
1245 1246 # This should ideally be in _pullbundle2(). However, it needs to run
1246 1247 # before discovery to avoid extra work.
1247 1248 _maybeapplyclonebundle(pullop)
1248 1249 _pulldiscovery(pullop)
1249 1250 if pullop.canusebundle2:
1250 1251 _pullbundle2(pullop)
1251 1252 _pullchangeset(pullop)
1252 1253 _pullphase(pullop)
1253 1254 _pullbookmarks(pullop)
1254 1255 _pullobsolete(pullop)
1255 1256 pullop.trmanager.close()
1256 1257 finally:
1257 1258 lockmod.release(pullop.trmanager, lock, wlock)
1258 1259
1259 1260 return pullop
1260 1261
1261 1262 # list of steps to perform discovery before pull
1262 1263 pulldiscoveryorder = []
1263 1264
1264 1265 # Mapping between step name and function
1265 1266 #
1266 1267 # This exists to help extensions wrap steps if necessary
1267 1268 pulldiscoverymapping = {}
1268 1269
1269 1270 def pulldiscovery(stepname):
1270 1271 """decorator for function performing discovery before pull
1271 1272
1272 1273 The function is added to the step -> function mapping and appended to the
1273 1274 list of steps. Beware that decorated function will be added in order (this
1274 1275 may matter).
1275 1276
1276 1277 You can only use this decorator for a new step, if you want to wrap a step
1277 1278 from an extension, change the pulldiscovery dictionary directly."""
1278 1279 def dec(func):
1279 1280 assert stepname not in pulldiscoverymapping
1280 1281 pulldiscoverymapping[stepname] = func
1281 1282 pulldiscoveryorder.append(stepname)
1282 1283 return func
1283 1284 return dec
1284 1285
1285 1286 def _pulldiscovery(pullop):
1286 1287 """Run all discovery steps"""
1287 1288 for stepname in pulldiscoveryorder:
1288 1289 step = pulldiscoverymapping[stepname]
1289 1290 step(pullop)
1290 1291
1291 1292 @pulldiscovery('b1:bookmarks')
1292 1293 def _pullbookmarkbundle1(pullop):
1293 1294 """fetch bookmark data in bundle1 case
1294 1295
1295 1296 If not using bundle2, we have to fetch bookmarks before changeset
1296 1297 discovery to reduce the chance and impact of race conditions."""
1297 1298 if pullop.remotebookmarks is not None:
1298 1299 return
1299 1300 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1300 1301 # all known bundle2 servers now support listkeys, but lets be nice with
1301 1302 # new implementation.
1302 1303 return
1303 1304 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1304 1305
1305 1306
1306 1307 @pulldiscovery('changegroup')
1307 1308 def _pulldiscoverychangegroup(pullop):
1308 1309 """discovery phase for the pull
1309 1310
1310 1311 Current handle changeset discovery only, will change handle all discovery
1311 1312 at some point."""
1312 1313 tmp = discovery.findcommonincoming(pullop.repo,
1313 1314 pullop.remote,
1314 1315 heads=pullop.heads,
1315 1316 force=pullop.force)
1316 1317 common, fetch, rheads = tmp
1317 1318 nm = pullop.repo.unfiltered().changelog.nodemap
1318 1319 if fetch and rheads:
1319 1320 # If a remote heads in filtered locally, lets drop it from the unknown
1320 1321 # remote heads and put in back in common.
1321 1322 #
1322 1323 # This is a hackish solution to catch most of "common but locally
1323 1324 # hidden situation". We do not performs discovery on unfiltered
1324 1325 # repository because it end up doing a pathological amount of round
1325 1326 # trip for w huge amount of changeset we do not care about.
1326 1327 #
1327 1328 # If a set of such "common but filtered" changeset exist on the server
1328 1329 # but are not including a remote heads, we'll not be able to detect it,
1329 1330 scommon = set(common)
1330 1331 filteredrheads = []
1331 1332 for n in rheads:
1332 1333 if n in nm:
1333 1334 if n not in scommon:
1334 1335 common.append(n)
1335 1336 else:
1336 1337 filteredrheads.append(n)
1337 1338 if not filteredrheads:
1338 1339 fetch = []
1339 1340 rheads = filteredrheads
1340 1341 pullop.common = common
1341 1342 pullop.fetch = fetch
1342 1343 pullop.rheads = rheads
1343 1344
1344 1345 def _pullbundle2(pullop):
1345 1346 """pull data using bundle2
1346 1347
1347 1348 For now, the only supported data are changegroup."""
1348 1349 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1349 1350
1350 1351 # At the moment we don't do stream clones over bundle2. If that is
1351 1352 # implemented then here's where the check for that will go.
1352 1353 streaming = False
1353 1354
1354 1355 # pulling changegroup
1355 1356 pullop.stepsdone.add('changegroup')
1356 1357
1357 1358 kwargs['common'] = pullop.common
1358 1359 kwargs['heads'] = pullop.heads or pullop.rheads
1359 1360 kwargs['cg'] = pullop.fetch
1360 1361 if 'listkeys' in pullop.remotebundle2caps:
1361 1362 kwargs['listkeys'] = ['phases']
1362 1363 if pullop.remotebookmarks is None:
1363 1364 # make sure to always includes bookmark data when migrating
1364 1365 # `hg incoming --bundle` to using this function.
1365 1366 kwargs['listkeys'].append('bookmarks')
1366 1367
1367 1368 # If this is a full pull / clone and the server supports the clone bundles
1368 1369 # feature, tell the server whether we attempted a clone bundle. The
1369 1370 # presence of this flag indicates the client supports clone bundles. This
1370 1371 # will enable the server to treat clients that support clone bundles
1371 1372 # differently from those that don't.
1372 1373 if (pullop.remote.capable('clonebundles')
1373 1374 and pullop.heads is None and list(pullop.common) == [nullid]):
1374 1375 kwargs['cbattempted'] = pullop.clonebundleattempted
1375 1376
1376 1377 if streaming:
1377 1378 pullop.repo.ui.status(_('streaming all changes\n'))
1378 1379 elif not pullop.fetch:
1379 1380 pullop.repo.ui.status(_("no changes found\n"))
1380 1381 pullop.cgresult = 0
1381 1382 else:
1382 1383 if pullop.heads is None and list(pullop.common) == [nullid]:
1383 1384 pullop.repo.ui.status(_("requesting all changes\n"))
1384 1385 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1385 1386 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1386 1387 if obsolete.commonversion(remoteversions) is not None:
1387 1388 kwargs['obsmarkers'] = True
1388 1389 pullop.stepsdone.add('obsmarkers')
1389 1390 _pullbundle2extraprepare(pullop, kwargs)
1390 1391 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1391 1392 try:
1392 1393 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1393 1394 except bundle2.AbortFromPart as exc:
1394 1395 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1395 1396 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1396 1397 except error.BundleValueError as exc:
1397 1398 raise error.Abort(_('missing support for %s') % exc)
1398 1399
1399 1400 if pullop.fetch:
1400 1401 pullop.cgresult = bundle2.combinechangegroupresults(op)
1401 1402
1402 1403 # processing phases change
1403 1404 for namespace, value in op.records['listkeys']:
1404 1405 if namespace == 'phases':
1405 1406 _pullapplyphases(pullop, value)
1406 1407
1407 1408 # processing bookmark update
1408 1409 for namespace, value in op.records['listkeys']:
1409 1410 if namespace == 'bookmarks':
1410 1411 pullop.remotebookmarks = value
1411 1412
1412 1413 # bookmark data were either already there or pulled in the bundle
1413 1414 if pullop.remotebookmarks is not None:
1414 1415 _pullbookmarks(pullop)
1415 1416
1416 1417 def _pullbundle2extraprepare(pullop, kwargs):
1417 1418 """hook function so that extensions can extend the getbundle call"""
1418 1419 pass
1419 1420
1420 1421 def _pullchangeset(pullop):
1421 1422 """pull changeset from unbundle into the local repo"""
1422 1423 # We delay the open of the transaction as late as possible so we
1423 1424 # don't open transaction for nothing or you break future useful
1424 1425 # rollback call
1425 1426 if 'changegroup' in pullop.stepsdone:
1426 1427 return
1427 1428 pullop.stepsdone.add('changegroup')
1428 1429 if not pullop.fetch:
1429 1430 pullop.repo.ui.status(_("no changes found\n"))
1430 1431 pullop.cgresult = 0
1431 1432 return
1432 1433 tr = pullop.gettransaction()
1433 1434 if pullop.heads is None and list(pullop.common) == [nullid]:
1434 1435 pullop.repo.ui.status(_("requesting all changes\n"))
1435 1436 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1436 1437 # issue1320, avoid a race if remote changed after discovery
1437 1438 pullop.heads = pullop.rheads
1438 1439
1439 1440 if pullop.remote.capable('getbundle'):
1440 1441 # TODO: get bundlecaps from remote
1441 1442 cg = pullop.remote.getbundle('pull', common=pullop.common,
1442 1443 heads=pullop.heads or pullop.rheads)
1443 1444 elif pullop.heads is None:
1444 1445 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1445 1446 elif not pullop.remote.capable('changegroupsubset'):
1446 1447 raise error.Abort(_("partial pull cannot be done because "
1447 1448 "other repository doesn't support "
1448 1449 "changegroupsubset."))
1449 1450 else:
1450 1451 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1451 1452 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1452 1453 pullop.remote.url())
1453 1454 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1454 1455
1455 1456 def _pullphase(pullop):
1456 1457 # Get remote phases data from remote
1457 1458 if 'phases' in pullop.stepsdone:
1458 1459 return
1459 1460 remotephases = pullop.remote.listkeys('phases')
1460 1461 _pullapplyphases(pullop, remotephases)
1461 1462
1462 1463 def _pullapplyphases(pullop, remotephases):
1463 1464 """apply phase movement from observed remote state"""
1464 1465 if 'phases' in pullop.stepsdone:
1465 1466 return
1466 1467 pullop.stepsdone.add('phases')
1467 1468 publishing = bool(remotephases.get('publishing', False))
1468 1469 if remotephases and not publishing:
1469 1470 # remote is new and non-publishing
1470 1471 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1471 1472 pullop.pulledsubset,
1472 1473 remotephases)
1473 1474 dheads = pullop.pulledsubset
1474 1475 else:
1475 1476 # Remote is old or publishing all common changesets
1476 1477 # should be seen as public
1477 1478 pheads = pullop.pulledsubset
1478 1479 dheads = []
1479 1480 unfi = pullop.repo.unfiltered()
1480 1481 phase = unfi._phasecache.phase
1481 1482 rev = unfi.changelog.nodemap.get
1482 1483 public = phases.public
1483 1484 draft = phases.draft
1484 1485
1485 1486 # exclude changesets already public locally and update the others
1486 1487 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1487 1488 if pheads:
1488 1489 tr = pullop.gettransaction()
1489 1490 phases.advanceboundary(pullop.repo, tr, public, pheads)
1490 1491
1491 1492 # exclude changesets already draft locally and update the others
1492 1493 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1493 1494 if dheads:
1494 1495 tr = pullop.gettransaction()
1495 1496 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1496 1497
1497 1498 def _pullbookmarks(pullop):
1498 1499 """process the remote bookmark information to update the local one"""
1499 1500 if 'bookmarks' in pullop.stepsdone:
1500 1501 return
1501 1502 pullop.stepsdone.add('bookmarks')
1502 1503 repo = pullop.repo
1503 1504 remotebookmarks = pullop.remotebookmarks
1504 1505 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1505 1506 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1506 1507 pullop.remote.url(),
1507 1508 pullop.gettransaction,
1508 1509 explicit=pullop.explicitbookmarks)
1509 1510
1510 1511 def _pullobsolete(pullop):
1511 1512 """utility function to pull obsolete markers from a remote
1512 1513
1513 1514 The `gettransaction` is function that return the pull transaction, creating
1514 1515 one if necessary. We return the transaction to inform the calling code that
1515 1516 a new transaction have been created (when applicable).
1516 1517
1517 1518 Exists mostly to allow overriding for experimentation purpose"""
1518 1519 if 'obsmarkers' in pullop.stepsdone:
1519 1520 return
1520 1521 pullop.stepsdone.add('obsmarkers')
1521 1522 tr = None
1522 1523 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1523 1524 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1524 1525 remoteobs = pullop.remote.listkeys('obsolete')
1525 1526 if 'dump0' in remoteobs:
1526 1527 tr = pullop.gettransaction()
1527 1528 markers = []
1528 1529 for key in sorted(remoteobs, reverse=True):
1529 1530 if key.startswith('dump'):
1530 1531 data = util.b85decode(remoteobs[key])
1531 1532 version, newmarks = obsolete._readmarkers(data)
1532 1533 markers += newmarks
1533 1534 if markers:
1534 1535 pullop.repo.obsstore.add(tr, markers)
1535 1536 pullop.repo.invalidatevolatilesets()
1536 1537 return tr
1537 1538
1538 1539 def caps20to10(repo):
1539 1540 """return a set with appropriate options to use bundle20 during getbundle"""
1540 1541 caps = {'HG20'}
1541 1542 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1542 1543 caps.add('bundle2=' + urlreq.quote(capsblob))
1543 1544 return caps
1544 1545
1545 1546 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1546 1547 getbundle2partsorder = []
1547 1548
1548 1549 # Mapping between step name and function
1549 1550 #
1550 1551 # This exists to help extensions wrap steps if necessary
1551 1552 getbundle2partsmapping = {}
1552 1553
1553 1554 def getbundle2partsgenerator(stepname, idx=None):
1554 1555 """decorator for function generating bundle2 part for getbundle
1555 1556
1556 1557 The function is added to the step -> function mapping and appended to the
1557 1558 list of steps. Beware that decorated functions will be added in order
1558 1559 (this may matter).
1559 1560
1560 1561 You can only use this decorator for new steps, if you want to wrap a step
1561 1562 from an extension, attack the getbundle2partsmapping dictionary directly."""
1562 1563 def dec(func):
1563 1564 assert stepname not in getbundle2partsmapping
1564 1565 getbundle2partsmapping[stepname] = func
1565 1566 if idx is None:
1566 1567 getbundle2partsorder.append(stepname)
1567 1568 else:
1568 1569 getbundle2partsorder.insert(idx, stepname)
1569 1570 return func
1570 1571 return dec
1571 1572
1572 1573 def bundle2requested(bundlecaps):
1573 1574 if bundlecaps is not None:
1574 1575 return any(cap.startswith('HG2') for cap in bundlecaps)
1575 1576 return False
1576 1577
1577 1578 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1578 1579 **kwargs):
1579 1580 """Return chunks constituting a bundle's raw data.
1580 1581
1581 1582 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1582 1583 passed.
1583 1584
1584 1585 Returns an iterator over raw chunks (of varying sizes).
1585 1586 """
1586 1587 kwargs = pycompat.byteskwargs(kwargs)
1587 1588 usebundle2 = bundle2requested(bundlecaps)
1588 1589 # bundle10 case
1589 1590 if not usebundle2:
1590 1591 if bundlecaps and not kwargs.get('cg', True):
1591 1592 raise ValueError(_('request for bundle10 must include changegroup'))
1592 1593
1593 1594 if kwargs:
1594 1595 raise ValueError(_('unsupported getbundle arguments: %s')
1595 1596 % ', '.join(sorted(kwargs.keys())))
1596 1597 outgoing = _computeoutgoing(repo, heads, common)
1597 1598 bundler = changegroup.getbundler('01', repo, bundlecaps)
1598 1599 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1599 1600
1600 1601 # bundle20 case
1601 1602 b2caps = {}
1602 1603 for bcaps in bundlecaps:
1603 1604 if bcaps.startswith('bundle2='):
1604 1605 blob = urlreq.unquote(bcaps[len('bundle2='):])
1605 1606 b2caps.update(bundle2.decodecaps(blob))
1606 1607 bundler = bundle2.bundle20(repo.ui, b2caps)
1607 1608
1608 1609 kwargs['heads'] = heads
1609 1610 kwargs['common'] = common
1610 1611
1611 1612 for name in getbundle2partsorder:
1612 1613 func = getbundle2partsmapping[name]
1613 1614 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1614 1615 **pycompat.strkwargs(kwargs))
1615 1616
1616 1617 return bundler.getchunks()
1617 1618
1618 1619 @getbundle2partsgenerator('changegroup')
1619 1620 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1620 1621 b2caps=None, heads=None, common=None, **kwargs):
1621 1622 """add a changegroup part to the requested bundle"""
1622 1623 cg = None
1623 1624 if kwargs.get('cg', True):
1624 1625 # build changegroup bundle here.
1625 1626 version = '01'
1626 1627 cgversions = b2caps.get('changegroup')
1627 1628 if cgversions: # 3.1 and 3.2 ship with an empty value
1628 1629 cgversions = [v for v in cgversions
1629 1630 if v in changegroup.supportedoutgoingversions(repo)]
1630 1631 if not cgversions:
1631 1632 raise ValueError(_('no common changegroup version'))
1632 1633 version = max(cgversions)
1633 1634 outgoing = _computeoutgoing(repo, heads, common)
1634 1635 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1635 1636 bundlecaps=bundlecaps,
1636 1637 version=version)
1637 1638
1638 1639 if cg:
1639 1640 part = bundler.newpart('changegroup', data=cg)
1640 1641 if cgversions:
1641 1642 part.addparam('version', version)
1642 1643 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1643 1644 if 'treemanifest' in repo.requirements:
1644 1645 part.addparam('treemanifest', '1')
1645 1646
1646 1647 @getbundle2partsgenerator('listkeys')
1647 1648 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1648 1649 b2caps=None, **kwargs):
1649 1650 """add parts containing listkeys namespaces to the requested bundle"""
1650 1651 listkeys = kwargs.get('listkeys', ())
1651 1652 for namespace in listkeys:
1652 1653 part = bundler.newpart('listkeys')
1653 1654 part.addparam('namespace', namespace)
1654 1655 keys = repo.listkeys(namespace).items()
1655 1656 part.data = pushkey.encodekeys(keys)
1656 1657
1657 1658 @getbundle2partsgenerator('obsmarkers')
1658 1659 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1659 1660 b2caps=None, heads=None, **kwargs):
1660 1661 """add an obsolescence markers part to the requested bundle"""
1661 1662 if kwargs.get('obsmarkers', False):
1662 1663 if heads is None:
1663 1664 heads = repo.heads()
1664 1665 subset = [c.node() for c in repo.set('::%ln', heads)]
1665 1666 markers = repo.obsstore.relevantmarkers(subset)
1666 1667 markers = sorted(markers)
1667 1668 bundle2.buildobsmarkerspart(bundler, markers)
1668 1669
1669 1670 @getbundle2partsgenerator('hgtagsfnodes')
1670 1671 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1671 1672 b2caps=None, heads=None, common=None,
1672 1673 **kwargs):
1673 1674 """Transfer the .hgtags filenodes mapping.
1674 1675
1675 1676 Only values for heads in this bundle will be transferred.
1676 1677
1677 1678 The part data consists of pairs of 20 byte changeset node and .hgtags
1678 1679 filenodes raw values.
1679 1680 """
1680 1681 # Don't send unless:
1681 1682 # - changeset are being exchanged,
1682 1683 # - the client supports it.
1683 1684 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1684 1685 return
1685 1686
1686 1687 outgoing = _computeoutgoing(repo, heads, common)
1687 1688 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1688 1689
1689 1690 def _getbookmarks(repo, **kwargs):
1690 1691 """Returns bookmark to node mapping.
1691 1692
1692 1693 This function is primarily used to generate `bookmarks` bundle2 part.
1693 1694 It is a separate function in order to make it easy to wrap it
1694 1695 in extensions. Passing `kwargs` to the function makes it easy to
1695 1696 add new parameters in extensions.
1696 1697 """
1697 1698
1698 1699 return dict(bookmod.listbinbookmarks(repo))
1699 1700
1700 1701 def check_heads(repo, their_heads, context):
1701 1702 """check if the heads of a repo have been modified
1702 1703
1703 1704 Used by peer for unbundling.
1704 1705 """
1705 1706 heads = repo.heads()
1706 1707 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1707 1708 if not (their_heads == ['force'] or their_heads == heads or
1708 1709 their_heads == ['hashed', heads_hash]):
1709 1710 # someone else committed/pushed/unbundled while we
1710 1711 # were transferring data
1711 1712 raise error.PushRaced('repository changed while %s - '
1712 1713 'please try again' % context)
1713 1714
1714 1715 def unbundle(repo, cg, heads, source, url):
1715 1716 """Apply a bundle to a repo.
1716 1717
1717 1718 this function makes sure the repo is locked during the application and have
1718 1719 mechanism to check that no push race occurred between the creation of the
1719 1720 bundle and its application.
1720 1721
1721 1722 If the push was raced as PushRaced exception is raised."""
1722 1723 r = 0
1723 1724 # need a transaction when processing a bundle2 stream
1724 1725 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1725 1726 lockandtr = [None, None, None]
1726 1727 recordout = None
1727 1728 # quick fix for output mismatch with bundle2 in 3.4
1728 1729 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1729 1730 False)
1730 1731 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1731 1732 captureoutput = True
1732 1733 try:
1733 1734 # note: outside bundle1, 'heads' is expected to be empty and this
1734 1735 # 'check_heads' call wil be a no-op
1735 1736 check_heads(repo, heads, 'uploading changes')
1736 1737 # push can proceed
1737 1738 if not isinstance(cg, bundle2.unbundle20):
1738 1739 # legacy case: bundle1 (changegroup 01)
1739 1740 txnname = "\n".join([source, util.hidepassword(url)])
1740 1741 with repo.lock(), repo.transaction(txnname) as tr:
1741 1742 op = bundle2.applybundle(repo, cg, tr, source, url)
1742 1743 r = bundle2.combinechangegroupresults(op)
1743 1744 else:
1744 1745 r = None
1745 1746 try:
1746 1747 def gettransaction():
1747 1748 if not lockandtr[2]:
1748 1749 lockandtr[0] = repo.wlock()
1749 1750 lockandtr[1] = repo.lock()
1750 1751 lockandtr[2] = repo.transaction(source)
1751 1752 lockandtr[2].hookargs['source'] = source
1752 1753 lockandtr[2].hookargs['url'] = url
1753 1754 lockandtr[2].hookargs['bundle2'] = '1'
1754 1755 return lockandtr[2]
1755 1756
1756 1757 # Do greedy locking by default until we're satisfied with lazy
1757 1758 # locking.
1758 1759 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1759 1760 gettransaction()
1760 1761
1761 1762 op = bundle2.bundleoperation(repo, gettransaction,
1762 1763 captureoutput=captureoutput)
1763 1764 try:
1764 1765 op = bundle2.processbundle(repo, cg, op=op)
1765 1766 finally:
1766 1767 r = op.reply
1767 1768 if captureoutput and r is not None:
1768 1769 repo.ui.pushbuffer(error=True, subproc=True)
1769 1770 def recordout(output):
1770 1771 r.newpart('output', data=output, mandatory=False)
1771 1772 if lockandtr[2] is not None:
1772 1773 lockandtr[2].close()
1773 1774 except BaseException as exc:
1774 1775 exc.duringunbundle2 = True
1775 1776 if captureoutput and r is not None:
1776 1777 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1777 1778 def recordout(output):
1778 1779 part = bundle2.bundlepart('output', data=output,
1779 1780 mandatory=False)
1780 1781 parts.append(part)
1781 1782 raise
1782 1783 finally:
1783 1784 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1784 1785 if recordout is not None:
1785 1786 recordout(repo.ui.popbuffer())
1786 1787 return r
1787 1788
1788 1789 def _maybeapplyclonebundle(pullop):
1789 1790 """Apply a clone bundle from a remote, if possible."""
1790 1791
1791 1792 repo = pullop.repo
1792 1793 remote = pullop.remote
1793 1794
1794 1795 if not repo.ui.configbool('ui', 'clonebundles', True):
1795 1796 return
1796 1797
1797 1798 # Only run if local repo is empty.
1798 1799 if len(repo):
1799 1800 return
1800 1801
1801 1802 if pullop.heads:
1802 1803 return
1803 1804
1804 1805 if not remote.capable('clonebundles'):
1805 1806 return
1806 1807
1807 1808 res = remote._call('clonebundles')
1808 1809
1809 1810 # If we call the wire protocol command, that's good enough to record the
1810 1811 # attempt.
1811 1812 pullop.clonebundleattempted = True
1812 1813
1813 1814 entries = parseclonebundlesmanifest(repo, res)
1814 1815 if not entries:
1815 1816 repo.ui.note(_('no clone bundles available on remote; '
1816 1817 'falling back to regular clone\n'))
1817 1818 return
1818 1819
1819 1820 entries = filterclonebundleentries(repo, entries)
1820 1821 if not entries:
1821 1822 # There is a thundering herd concern here. However, if a server
1822 1823 # operator doesn't advertise bundles appropriate for its clients,
1823 1824 # they deserve what's coming. Furthermore, from a client's
1824 1825 # perspective, no automatic fallback would mean not being able to
1825 1826 # clone!
1826 1827 repo.ui.warn(_('no compatible clone bundles available on server; '
1827 1828 'falling back to regular clone\n'))
1828 1829 repo.ui.warn(_('(you may want to report this to the server '
1829 1830 'operator)\n'))
1830 1831 return
1831 1832
1832 1833 entries = sortclonebundleentries(repo.ui, entries)
1833 1834
1834 1835 url = entries[0]['URL']
1835 1836 repo.ui.status(_('applying clone bundle from %s\n') % url)
1836 1837 if trypullbundlefromurl(repo.ui, repo, url):
1837 1838 repo.ui.status(_('finished applying clone bundle\n'))
1838 1839 # Bundle failed.
1839 1840 #
1840 1841 # We abort by default to avoid the thundering herd of
1841 1842 # clients flooding a server that was expecting expensive
1842 1843 # clone load to be offloaded.
1843 1844 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1844 1845 repo.ui.warn(_('falling back to normal clone\n'))
1845 1846 else:
1846 1847 raise error.Abort(_('error applying bundle'),
1847 1848 hint=_('if this error persists, consider contacting '
1848 1849 'the server operator or disable clone '
1849 1850 'bundles via '
1850 1851 '"--config ui.clonebundles=false"'))
1851 1852
1852 1853 def parseclonebundlesmanifest(repo, s):
1853 1854 """Parses the raw text of a clone bundles manifest.
1854 1855
1855 1856 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1856 1857 to the URL and other keys are the attributes for the entry.
1857 1858 """
1858 1859 m = []
1859 1860 for line in s.splitlines():
1860 1861 fields = line.split()
1861 1862 if not fields:
1862 1863 continue
1863 1864 attrs = {'URL': fields[0]}
1864 1865 for rawattr in fields[1:]:
1865 1866 key, value = rawattr.split('=', 1)
1866 1867 key = urlreq.unquote(key)
1867 1868 value = urlreq.unquote(value)
1868 1869 attrs[key] = value
1869 1870
1870 1871 # Parse BUNDLESPEC into components. This makes client-side
1871 1872 # preferences easier to specify since you can prefer a single
1872 1873 # component of the BUNDLESPEC.
1873 1874 if key == 'BUNDLESPEC':
1874 1875 try:
1875 1876 comp, version, params = parsebundlespec(repo, value,
1876 1877 externalnames=True)
1877 1878 attrs['COMPRESSION'] = comp
1878 1879 attrs['VERSION'] = version
1879 1880 except error.InvalidBundleSpecification:
1880 1881 pass
1881 1882 except error.UnsupportedBundleSpecification:
1882 1883 pass
1883 1884
1884 1885 m.append(attrs)
1885 1886
1886 1887 return m
1887 1888
1888 1889 def filterclonebundleentries(repo, entries):
1889 1890 """Remove incompatible clone bundle manifest entries.
1890 1891
1891 1892 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1892 1893 and returns a new list consisting of only the entries that this client
1893 1894 should be able to apply.
1894 1895
1895 1896 There is no guarantee we'll be able to apply all returned entries because
1896 1897 the metadata we use to filter on may be missing or wrong.
1897 1898 """
1898 1899 newentries = []
1899 1900 for entry in entries:
1900 1901 spec = entry.get('BUNDLESPEC')
1901 1902 if spec:
1902 1903 try:
1903 1904 parsebundlespec(repo, spec, strict=True)
1904 1905 except error.InvalidBundleSpecification as e:
1905 1906 repo.ui.debug(str(e) + '\n')
1906 1907 continue
1907 1908 except error.UnsupportedBundleSpecification as e:
1908 1909 repo.ui.debug('filtering %s because unsupported bundle '
1909 1910 'spec: %s\n' % (entry['URL'], str(e)))
1910 1911 continue
1911 1912
1912 1913 if 'REQUIRESNI' in entry and not sslutil.hassni:
1913 1914 repo.ui.debug('filtering %s because SNI not supported\n' %
1914 1915 entry['URL'])
1915 1916 continue
1916 1917
1917 1918 newentries.append(entry)
1918 1919
1919 1920 return newentries
1920 1921
1921 1922 class clonebundleentry(object):
1922 1923 """Represents an item in a clone bundles manifest.
1923 1924
1924 1925 This rich class is needed to support sorting since sorted() in Python 3
1925 1926 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1926 1927 won't work.
1927 1928 """
1928 1929
1929 1930 def __init__(self, value, prefers):
1930 1931 self.value = value
1931 1932 self.prefers = prefers
1932 1933
1933 1934 def _cmp(self, other):
1934 1935 for prefkey, prefvalue in self.prefers:
1935 1936 avalue = self.value.get(prefkey)
1936 1937 bvalue = other.value.get(prefkey)
1937 1938
1938 1939 # Special case for b missing attribute and a matches exactly.
1939 1940 if avalue is not None and bvalue is None and avalue == prefvalue:
1940 1941 return -1
1941 1942
1942 1943 # Special case for a missing attribute and b matches exactly.
1943 1944 if bvalue is not None and avalue is None and bvalue == prefvalue:
1944 1945 return 1
1945 1946
1946 1947 # We can't compare unless attribute present on both.
1947 1948 if avalue is None or bvalue is None:
1948 1949 continue
1949 1950
1950 1951 # Same values should fall back to next attribute.
1951 1952 if avalue == bvalue:
1952 1953 continue
1953 1954
1954 1955 # Exact matches come first.
1955 1956 if avalue == prefvalue:
1956 1957 return -1
1957 1958 if bvalue == prefvalue:
1958 1959 return 1
1959 1960
1960 1961 # Fall back to next attribute.
1961 1962 continue
1962 1963
1963 1964 # If we got here we couldn't sort by attributes and prefers. Fall
1964 1965 # back to index order.
1965 1966 return 0
1966 1967
1967 1968 def __lt__(self, other):
1968 1969 return self._cmp(other) < 0
1969 1970
1970 1971 def __gt__(self, other):
1971 1972 return self._cmp(other) > 0
1972 1973
1973 1974 def __eq__(self, other):
1974 1975 return self._cmp(other) == 0
1975 1976
1976 1977 def __le__(self, other):
1977 1978 return self._cmp(other) <= 0
1978 1979
1979 1980 def __ge__(self, other):
1980 1981 return self._cmp(other) >= 0
1981 1982
1982 1983 def __ne__(self, other):
1983 1984 return self._cmp(other) != 0
1984 1985
1985 1986 def sortclonebundleentries(ui, entries):
1986 1987 prefers = ui.configlist('ui', 'clonebundleprefers')
1987 1988 if not prefers:
1988 1989 return list(entries)
1989 1990
1990 1991 prefers = [p.split('=', 1) for p in prefers]
1991 1992
1992 1993 items = sorted(clonebundleentry(v, prefers) for v in entries)
1993 1994 return [i.value for i in items]
1994 1995
1995 1996 def trypullbundlefromurl(ui, repo, url):
1996 1997 """Attempt to apply a bundle from a URL."""
1997 1998 with repo.lock(), repo.transaction('bundleurl') as tr:
1998 1999 try:
1999 2000 fh = urlmod.open(ui, url)
2000 2001 cg = readbundle(ui, fh, 'stream')
2001 2002
2002 2003 if isinstance(cg, streamclone.streamcloneapplier):
2003 2004 cg.apply(repo)
2004 2005 else:
2005 2006 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2006 2007 return True
2007 2008 except urlerr.httperror as e:
2008 2009 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2009 2010 except urlerr.urlerror as e:
2010 2011 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2011 2012
2012 2013 return False
@@ -1,299 +1,318 b''
1 1 ==================================
2 2 Basic testing for the push command
3 3 ==================================
4 4
5 5 Testing of the '--rev' flag
6 6 ===========================
7 7
8 8 $ hg init test-revflag
9 9 $ hg -R test-revflag unbundle "$TESTDIR/bundles/remote.hg"
10 10 adding changesets
11 11 adding manifests
12 12 adding file changes
13 13 added 9 changesets with 7 changes to 4 files (+1 heads)
14 14 (run 'hg heads' to see heads, 'hg merge' to merge)
15 15
16 16 $ for i in 0 1 2 3 4 5 6 7 8; do
17 17 > echo
18 18 > hg init test-revflag-"$i"
19 19 > hg -R test-revflag push -r "$i" test-revflag-"$i"
20 20 > hg -R test-revflag-"$i" verify
21 21 > done
22 22
23 23 pushing to test-revflag-0
24 24 searching for changes
25 25 adding changesets
26 26 adding manifests
27 27 adding file changes
28 28 added 1 changesets with 1 changes to 1 files
29 29 checking changesets
30 30 checking manifests
31 31 crosschecking files in changesets and manifests
32 32 checking files
33 33 1 files, 1 changesets, 1 total revisions
34 34
35 35 pushing to test-revflag-1
36 36 searching for changes
37 37 adding changesets
38 38 adding manifests
39 39 adding file changes
40 40 added 2 changesets with 2 changes to 1 files
41 41 checking changesets
42 42 checking manifests
43 43 crosschecking files in changesets and manifests
44 44 checking files
45 45 1 files, 2 changesets, 2 total revisions
46 46
47 47 pushing to test-revflag-2
48 48 searching for changes
49 49 adding changesets
50 50 adding manifests
51 51 adding file changes
52 52 added 3 changesets with 3 changes to 1 files
53 53 checking changesets
54 54 checking manifests
55 55 crosschecking files in changesets and manifests
56 56 checking files
57 57 1 files, 3 changesets, 3 total revisions
58 58
59 59 pushing to test-revflag-3
60 60 searching for changes
61 61 adding changesets
62 62 adding manifests
63 63 adding file changes
64 64 added 4 changesets with 4 changes to 1 files
65 65 checking changesets
66 66 checking manifests
67 67 crosschecking files in changesets and manifests
68 68 checking files
69 69 1 files, 4 changesets, 4 total revisions
70 70
71 71 pushing to test-revflag-4
72 72 searching for changes
73 73 adding changesets
74 74 adding manifests
75 75 adding file changes
76 76 added 2 changesets with 2 changes to 1 files
77 77 checking changesets
78 78 checking manifests
79 79 crosschecking files in changesets and manifests
80 80 checking files
81 81 1 files, 2 changesets, 2 total revisions
82 82
83 83 pushing to test-revflag-5
84 84 searching for changes
85 85 adding changesets
86 86 adding manifests
87 87 adding file changes
88 88 added 3 changesets with 3 changes to 1 files
89 89 checking changesets
90 90 checking manifests
91 91 crosschecking files in changesets and manifests
92 92 checking files
93 93 1 files, 3 changesets, 3 total revisions
94 94
95 95 pushing to test-revflag-6
96 96 searching for changes
97 97 adding changesets
98 98 adding manifests
99 99 adding file changes
100 100 added 4 changesets with 5 changes to 2 files
101 101 checking changesets
102 102 checking manifests
103 103 crosschecking files in changesets and manifests
104 104 checking files
105 105 2 files, 4 changesets, 5 total revisions
106 106
107 107 pushing to test-revflag-7
108 108 searching for changes
109 109 adding changesets
110 110 adding manifests
111 111 adding file changes
112 112 added 5 changesets with 6 changes to 3 files
113 113 checking changesets
114 114 checking manifests
115 115 crosschecking files in changesets and manifests
116 116 checking files
117 117 3 files, 5 changesets, 6 total revisions
118 118
119 119 pushing to test-revflag-8
120 120 searching for changes
121 121 adding changesets
122 122 adding manifests
123 123 adding file changes
124 124 added 5 changesets with 5 changes to 2 files
125 125 checking changesets
126 126 checking manifests
127 127 crosschecking files in changesets and manifests
128 128 checking files
129 129 2 files, 5 changesets, 5 total revisions
130 130
131 131 $ cd test-revflag-8
132 132
133 133 $ hg pull ../test-revflag-7
134 134 pulling from ../test-revflag-7
135 135 searching for changes
136 136 adding changesets
137 137 adding manifests
138 138 adding file changes
139 139 added 4 changesets with 2 changes to 3 files (+1 heads)
140 140 (run 'hg heads' to see heads, 'hg merge' to merge)
141 141
142 142 $ hg verify
143 143 checking changesets
144 144 checking manifests
145 145 crosschecking files in changesets and manifests
146 146 checking files
147 147 4 files, 9 changesets, 7 total revisions
148 148
149 149 $ cd ..
150 150
151 151 Test server side validation during push
152 152 =======================================
153 153
154 154 $ hg init test-validation
155 155 $ cd test-validation
156 156
157 157 $ cat > .hg/hgrc <<EOF
158 158 > [server]
159 159 > validate=1
160 160 > EOF
161 161
162 162 $ echo alpha > alpha
163 163 $ echo beta > beta
164 164 $ hg addr
165 165 adding alpha
166 166 adding beta
167 167 $ hg ci -m 1
168 168
169 169 $ cd ..
170 170 $ hg clone test-validation test-validation-clone
171 171 updating to branch default
172 172 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
173 173
174 174 Test spurious filelog entries:
175 175
176 176 $ cd test-validation-clone
177 177 $ echo blah >> beta
178 178 $ cp .hg/store/data/beta.i tmp1
179 179 $ hg ci -m 2
180 180 $ cp .hg/store/data/beta.i tmp2
181 181 $ hg -q rollback
182 182 $ mv tmp2 .hg/store/data/beta.i
183 183 $ echo blah >> beta
184 184 $ hg ci -m '2 (corrupt)'
185 185
186 186 Expected to fail:
187 187
188 188 $ hg verify
189 189 checking changesets
190 190 checking manifests
191 191 crosschecking files in changesets and manifests
192 192 checking files
193 193 beta@1: dddc47b3ba30 not in manifests
194 194 2 files, 2 changesets, 4 total revisions
195 195 1 integrity errors encountered!
196 196 (first damaged changeset appears to be 1)
197 197 [1]
198 198
199 199 $ hg push
200 200 pushing to $TESTTMP/test-validation (glob)
201 201 searching for changes
202 202 adding changesets
203 203 adding manifests
204 204 adding file changes
205 205 transaction abort!
206 206 rollback completed
207 207 abort: received spurious file revlog entry
208 208 [255]
209 209
210 210 $ hg -q rollback
211 211 $ mv tmp1 .hg/store/data/beta.i
212 212 $ echo beta > beta
213 213
214 214 Test missing filelog entries:
215 215
216 216 $ cp .hg/store/data/beta.i tmp
217 217 $ echo blah >> beta
218 218 $ hg ci -m '2 (corrupt)'
219 219 $ mv tmp .hg/store/data/beta.i
220 220
221 221 Expected to fail:
222 222
223 223 $ hg verify
224 224 checking changesets
225 225 checking manifests
226 226 crosschecking files in changesets and manifests
227 227 checking files
228 228 beta@1: manifest refers to unknown revision dddc47b3ba30
229 229 2 files, 2 changesets, 2 total revisions
230 230 1 integrity errors encountered!
231 231 (first damaged changeset appears to be 1)
232 232 [1]
233 233
234 234 $ hg push
235 235 pushing to $TESTTMP/test-validation (glob)
236 236 searching for changes
237 237 adding changesets
238 238 adding manifests
239 239 adding file changes
240 240 transaction abort!
241 241 rollback completed
242 242 abort: missing file data for beta:dddc47b3ba30e54484720ce0f4f768a0f4b6efb9 - run hg verify
243 243 [255]
244 244
245 245 $ cd ..
246 246
247 247 Test push hook locking
248 248 =====================
249 249
250 250 $ hg init 1
251 251
252 252 $ echo '[ui]' >> 1/.hg/hgrc
253 253 $ echo 'timeout = 10' >> 1/.hg/hgrc
254 254
255 255 $ echo foo > 1/foo
256 256 $ hg --cwd 1 ci -A -m foo
257 257 adding foo
258 258
259 259 $ hg clone 1 2
260 260 updating to branch default
261 261 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
262 262
263 263 $ hg clone 2 3
264 264 updating to branch default
265 265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 266
267 267 $ cat <<EOF > $TESTTMP/debuglocks-pretxn-hook.sh
268 268 > hg debuglocks
269 269 > true
270 270 > EOF
271 271 $ echo '[hooks]' >> 2/.hg/hgrc
272 272 $ echo "pretxnchangegroup.a = sh $TESTTMP/debuglocks-pretxn-hook.sh" >> 2/.hg/hgrc
273 273 $ echo 'changegroup.push = hg push -qf ../1' >> 2/.hg/hgrc
274 274
275 275 $ echo bar >> 3/foo
276 276 $ hg --cwd 3 ci -m bar
277 277
278 278 $ hg --cwd 3 push ../2 --config devel.legacy.exchange=bundle1
279 279 pushing to ../2
280 280 searching for changes
281 281 adding changesets
282 282 adding manifests
283 283 adding file changes
284 284 added 1 changesets with 1 changes to 1 files
285 285 lock: user *, process * (*s) (glob)
286 286 wlock: free
287 287
288 288 $ hg --cwd 1 --config extensions.strip= strip tip -q
289 289 $ hg --cwd 2 --config extensions.strip= strip tip -q
290 290 $ hg --cwd 3 push ../2 # bundle2+
291 291 pushing to ../2
292 292 searching for changes
293 293 adding changesets
294 294 adding manifests
295 295 adding file changes
296 296 added 1 changesets with 1 changes to 1 files
297 297 lock: user *, process * (*s) (glob)
298 298 wlock: user *, process * (*s) (glob)
299 299
300 Test bare push with multiple race checking options
301 --------------------------------------------------
302
303 $ hg init test-bare-push-no-concurrency
304 $ hg init test-bare-push-unrelated-concurrency
305 $ hg -R test-revflag push -r 0 test-bare-push-no-concurrency --config server.concurrent-push-mode=strict
306 pushing to test-bare-push-no-concurrency
307 searching for changes
308 adding changesets
309 adding manifests
310 adding file changes
311 added 1 changesets with 1 changes to 1 files
312 $ hg -R test-revflag push -r 0 test-bare-push-unrelated-concurrency --config server.concurrent-push-mode=check-related
313 pushing to test-bare-push-unrelated-concurrency
314 searching for changes
315 adding changesets
316 adding manifests
317 adding file changes
318 added 1 changesets with 1 changes to 1 files
General Comments 0
You need to be logged in to leave comments. Login now