##// END OF EJS Templates
push: continue without locking on lock failure other than EEXIST (issue5882)...
Yuya Nishihara -
r38111:7c05198c stable
parent child Browse files
Show More
@@ -1,2421 +1,2418 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 import errno
12 11 import hashlib
13 12
14 13 from .i18n import _
15 14 from .node import (
16 15 bin,
17 16 hex,
18 17 nullid,
19 18 )
20 19 from .thirdparty import (
21 20 attr,
22 21 )
23 22 from . import (
24 23 bookmarks as bookmod,
25 24 bundle2,
26 25 changegroup,
27 26 discovery,
28 27 error,
29 28 lock as lockmod,
30 29 logexchange,
31 30 obsolete,
32 31 phases,
33 32 pushkey,
34 33 pycompat,
35 34 scmutil,
36 35 sslutil,
37 36 streamclone,
38 37 url as urlmod,
39 38 util,
40 39 )
41 40 from .utils import (
42 41 stringutil,
43 42 )
44 43
45 44 urlerr = util.urlerr
46 45 urlreq = util.urlreq
47 46
48 47 # Maps bundle version human names to changegroup versions.
49 48 _bundlespeccgversions = {'v1': '01',
50 49 'v2': '02',
51 50 'packed1': 's1',
52 51 'bundle2': '02', #legacy
53 52 }
54 53
55 54 # Maps bundle version with content opts to choose which part to bundle
56 55 _bundlespeccontentopts = {
57 56 'v1': {
58 57 'changegroup': True,
59 58 'cg.version': '01',
60 59 'obsolescence': False,
61 60 'phases': False,
62 61 'tagsfnodescache': False,
63 62 'revbranchcache': False
64 63 },
65 64 'v2': {
66 65 'changegroup': True,
67 66 'cg.version': '02',
68 67 'obsolescence': False,
69 68 'phases': False,
70 69 'tagsfnodescache': True,
71 70 'revbranchcache': True
72 71 },
73 72 'packed1' : {
74 73 'cg.version': 's1'
75 74 }
76 75 }
77 76 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
78 77
79 78 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
80 79 "tagsfnodescache": False,
81 80 "revbranchcache": False}}
82 81
83 82 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
84 83 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
85 84
86 85 @attr.s
87 86 class bundlespec(object):
88 87 compression = attr.ib()
89 88 wirecompression = attr.ib()
90 89 version = attr.ib()
91 90 wireversion = attr.ib()
92 91 params = attr.ib()
93 92 contentopts = attr.ib()
94 93
95 94 def parsebundlespec(repo, spec, strict=True):
96 95 """Parse a bundle string specification into parts.
97 96
98 97 Bundle specifications denote a well-defined bundle/exchange format.
99 98 The content of a given specification should not change over time in
100 99 order to ensure that bundles produced by a newer version of Mercurial are
101 100 readable from an older version.
102 101
103 102 The string currently has the form:
104 103
105 104 <compression>-<type>[;<parameter0>[;<parameter1>]]
106 105
107 106 Where <compression> is one of the supported compression formats
108 107 and <type> is (currently) a version string. A ";" can follow the type and
109 108 all text afterwards is interpreted as URI encoded, ";" delimited key=value
110 109 pairs.
111 110
112 111 If ``strict`` is True (the default) <compression> is required. Otherwise,
113 112 it is optional.
114 113
115 114 Returns a bundlespec object of (compression, version, parameters).
116 115 Compression will be ``None`` if not in strict mode and a compression isn't
117 116 defined.
118 117
119 118 An ``InvalidBundleSpecification`` is raised when the specification is
120 119 not syntactically well formed.
121 120
122 121 An ``UnsupportedBundleSpecification`` is raised when the compression or
123 122 bundle type/version is not recognized.
124 123
125 124 Note: this function will likely eventually return a more complex data
126 125 structure, including bundle2 part information.
127 126 """
128 127 def parseparams(s):
129 128 if ';' not in s:
130 129 return s, {}
131 130
132 131 params = {}
133 132 version, paramstr = s.split(';', 1)
134 133
135 134 for p in paramstr.split(';'):
136 135 if '=' not in p:
137 136 raise error.InvalidBundleSpecification(
138 137 _('invalid bundle specification: '
139 138 'missing "=" in parameter: %s') % p)
140 139
141 140 key, value = p.split('=', 1)
142 141 key = urlreq.unquote(key)
143 142 value = urlreq.unquote(value)
144 143 params[key] = value
145 144
146 145 return version, params
147 146
148 147
149 148 if strict and '-' not in spec:
150 149 raise error.InvalidBundleSpecification(
151 150 _('invalid bundle specification; '
152 151 'must be prefixed with compression: %s') % spec)
153 152
154 153 if '-' in spec:
155 154 compression, version = spec.split('-', 1)
156 155
157 156 if compression not in util.compengines.supportedbundlenames:
158 157 raise error.UnsupportedBundleSpecification(
159 158 _('%s compression is not supported') % compression)
160 159
161 160 version, params = parseparams(version)
162 161
163 162 if version not in _bundlespeccgversions:
164 163 raise error.UnsupportedBundleSpecification(
165 164 _('%s is not a recognized bundle version') % version)
166 165 else:
167 166 # Value could be just the compression or just the version, in which
168 167 # case some defaults are assumed (but only when not in strict mode).
169 168 assert not strict
170 169
171 170 spec, params = parseparams(spec)
172 171
173 172 if spec in util.compengines.supportedbundlenames:
174 173 compression = spec
175 174 version = 'v1'
176 175 # Generaldelta repos require v2.
177 176 if 'generaldelta' in repo.requirements:
178 177 version = 'v2'
179 178 # Modern compression engines require v2.
180 179 if compression not in _bundlespecv1compengines:
181 180 version = 'v2'
182 181 elif spec in _bundlespeccgversions:
183 182 if spec == 'packed1':
184 183 compression = 'none'
185 184 else:
186 185 compression = 'bzip2'
187 186 version = spec
188 187 else:
189 188 raise error.UnsupportedBundleSpecification(
190 189 _('%s is not a recognized bundle specification') % spec)
191 190
192 191 # Bundle version 1 only supports a known set of compression engines.
193 192 if version == 'v1' and compression not in _bundlespecv1compengines:
194 193 raise error.UnsupportedBundleSpecification(
195 194 _('compression engine %s is not supported on v1 bundles') %
196 195 compression)
197 196
198 197 # The specification for packed1 can optionally declare the data formats
199 198 # required to apply it. If we see this metadata, compare against what the
200 199 # repo supports and error if the bundle isn't compatible.
201 200 if version == 'packed1' and 'requirements' in params:
202 201 requirements = set(params['requirements'].split(','))
203 202 missingreqs = requirements - repo.supportedformats
204 203 if missingreqs:
205 204 raise error.UnsupportedBundleSpecification(
206 205 _('missing support for repository features: %s') %
207 206 ', '.join(sorted(missingreqs)))
208 207
209 208 # Compute contentopts based on the version
210 209 contentopts = _bundlespeccontentopts.get(version, {}).copy()
211 210
212 211 # Process the variants
213 212 if "stream" in params and params["stream"] == "v2":
214 213 variant = _bundlespecvariants["streamv2"]
215 214 contentopts.update(variant)
216 215
217 216 engine = util.compengines.forbundlename(compression)
218 217 compression, wirecompression = engine.bundletype()
219 218 wireversion = _bundlespeccgversions[version]
220 219
221 220 return bundlespec(compression, wirecompression, version, wireversion,
222 221 params, contentopts)
223 222
224 223 def readbundle(ui, fh, fname, vfs=None):
225 224 header = changegroup.readexactly(fh, 4)
226 225
227 226 alg = None
228 227 if not fname:
229 228 fname = "stream"
230 229 if not header.startswith('HG') and header.startswith('\0'):
231 230 fh = changegroup.headerlessfixup(fh, header)
232 231 header = "HG10"
233 232 alg = 'UN'
234 233 elif vfs:
235 234 fname = vfs.join(fname)
236 235
237 236 magic, version = header[0:2], header[2:4]
238 237
239 238 if magic != 'HG':
240 239 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
241 240 if version == '10':
242 241 if alg is None:
243 242 alg = changegroup.readexactly(fh, 2)
244 243 return changegroup.cg1unpacker(fh, alg)
245 244 elif version.startswith('2'):
246 245 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
247 246 elif version == 'S1':
248 247 return streamclone.streamcloneapplier(fh)
249 248 else:
250 249 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
251 250
252 251 def getbundlespec(ui, fh):
253 252 """Infer the bundlespec from a bundle file handle.
254 253
255 254 The input file handle is seeked and the original seek position is not
256 255 restored.
257 256 """
258 257 def speccompression(alg):
259 258 try:
260 259 return util.compengines.forbundletype(alg).bundletype()[0]
261 260 except KeyError:
262 261 return None
263 262
264 263 b = readbundle(ui, fh, None)
265 264 if isinstance(b, changegroup.cg1unpacker):
266 265 alg = b._type
267 266 if alg == '_truncatedBZ':
268 267 alg = 'BZ'
269 268 comp = speccompression(alg)
270 269 if not comp:
271 270 raise error.Abort(_('unknown compression algorithm: %s') % alg)
272 271 return '%s-v1' % comp
273 272 elif isinstance(b, bundle2.unbundle20):
274 273 if 'Compression' in b.params:
275 274 comp = speccompression(b.params['Compression'])
276 275 if not comp:
277 276 raise error.Abort(_('unknown compression algorithm: %s') % comp)
278 277 else:
279 278 comp = 'none'
280 279
281 280 version = None
282 281 for part in b.iterparts():
283 282 if part.type == 'changegroup':
284 283 version = part.params['version']
285 284 if version in ('01', '02'):
286 285 version = 'v2'
287 286 else:
288 287 raise error.Abort(_('changegroup version %s does not have '
289 288 'a known bundlespec') % version,
290 289 hint=_('try upgrading your Mercurial '
291 290 'client'))
292 291 elif part.type == 'stream2' and version is None:
293 292 # A stream2 part requires to be part of a v2 bundle
294 293 version = "v2"
295 294 requirements = urlreq.unquote(part.params['requirements'])
296 295 splitted = requirements.split()
297 296 params = bundle2._formatrequirementsparams(splitted)
298 297 return 'none-v2;stream=v2;%s' % params
299 298
300 299 if not version:
301 300 raise error.Abort(_('could not identify changegroup version in '
302 301 'bundle'))
303 302
304 303 return '%s-%s' % (comp, version)
305 304 elif isinstance(b, streamclone.streamcloneapplier):
306 305 requirements = streamclone.readbundle1header(fh)[2]
307 306 formatted = bundle2._formatrequirementsparams(requirements)
308 307 return 'none-packed1;%s' % formatted
309 308 else:
310 309 raise error.Abort(_('unknown bundle type: %s') % b)
311 310
312 311 def _computeoutgoing(repo, heads, common):
313 312 """Computes which revs are outgoing given a set of common
314 313 and a set of heads.
315 314
316 315 This is a separate function so extensions can have access to
317 316 the logic.
318 317
319 318 Returns a discovery.outgoing object.
320 319 """
321 320 cl = repo.changelog
322 321 if common:
323 322 hasnode = cl.hasnode
324 323 common = [n for n in common if hasnode(n)]
325 324 else:
326 325 common = [nullid]
327 326 if not heads:
328 327 heads = cl.heads()
329 328 return discovery.outgoing(repo, common, heads)
330 329
331 330 def _forcebundle1(op):
332 331 """return true if a pull/push must use bundle1
333 332
334 333 This function is used to allow testing of the older bundle version"""
335 334 ui = op.repo.ui
336 335 # The goal is this config is to allow developer to choose the bundle
337 336 # version used during exchanged. This is especially handy during test.
338 337 # Value is a list of bundle version to be picked from, highest version
339 338 # should be used.
340 339 #
341 340 # developer config: devel.legacy.exchange
342 341 exchange = ui.configlist('devel', 'legacy.exchange')
343 342 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
344 343 return forcebundle1 or not op.remote.capable('bundle2')
345 344
346 345 class pushoperation(object):
347 346 """A object that represent a single push operation
348 347
349 348 Its purpose is to carry push related state and very common operations.
350 349
351 350 A new pushoperation should be created at the beginning of each push and
352 351 discarded afterward.
353 352 """
354 353
355 354 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
356 355 bookmarks=(), pushvars=None):
357 356 # repo we push from
358 357 self.repo = repo
359 358 self.ui = repo.ui
360 359 # repo we push to
361 360 self.remote = remote
362 361 # force option provided
363 362 self.force = force
364 363 # revs to be pushed (None is "all")
365 364 self.revs = revs
366 365 # bookmark explicitly pushed
367 366 self.bookmarks = bookmarks
368 367 # allow push of new branch
369 368 self.newbranch = newbranch
370 369 # step already performed
371 370 # (used to check what steps have been already performed through bundle2)
372 371 self.stepsdone = set()
373 372 # Integer version of the changegroup push result
374 373 # - None means nothing to push
375 374 # - 0 means HTTP error
376 375 # - 1 means we pushed and remote head count is unchanged *or*
377 376 # we have outgoing changesets but refused to push
378 377 # - other values as described by addchangegroup()
379 378 self.cgresult = None
380 379 # Boolean value for the bookmark push
381 380 self.bkresult = None
382 381 # discover.outgoing object (contains common and outgoing data)
383 382 self.outgoing = None
384 383 # all remote topological heads before the push
385 384 self.remoteheads = None
386 385 # Details of the remote branch pre and post push
387 386 #
388 387 # mapping: {'branch': ([remoteheads],
389 388 # [newheads],
390 389 # [unsyncedheads],
391 390 # [discardedheads])}
392 391 # - branch: the branch name
393 392 # - remoteheads: the list of remote heads known locally
394 393 # None if the branch is new
395 394 # - newheads: the new remote heads (known locally) with outgoing pushed
396 395 # - unsyncedheads: the list of remote heads unknown locally.
397 396 # - discardedheads: the list of remote heads made obsolete by the push
398 397 self.pushbranchmap = None
399 398 # testable as a boolean indicating if any nodes are missing locally.
400 399 self.incoming = None
401 400 # summary of the remote phase situation
402 401 self.remotephases = None
403 402 # phases changes that must be pushed along side the changesets
404 403 self.outdatedphases = None
405 404 # phases changes that must be pushed if changeset push fails
406 405 self.fallbackoutdatedphases = None
407 406 # outgoing obsmarkers
408 407 self.outobsmarkers = set()
409 408 # outgoing bookmarks
410 409 self.outbookmarks = []
411 410 # transaction manager
412 411 self.trmanager = None
413 412 # map { pushkey partid -> callback handling failure}
414 413 # used to handle exception from mandatory pushkey part failure
415 414 self.pkfailcb = {}
416 415 # an iterable of pushvars or None
417 416 self.pushvars = pushvars
418 417
419 418 @util.propertycache
420 419 def futureheads(self):
421 420 """future remote heads if the changeset push succeeds"""
422 421 return self.outgoing.missingheads
423 422
424 423 @util.propertycache
425 424 def fallbackheads(self):
426 425 """future remote heads if the changeset push fails"""
427 426 if self.revs is None:
428 427 # not target to push, all common are relevant
429 428 return self.outgoing.commonheads
430 429 unfi = self.repo.unfiltered()
431 430 # I want cheads = heads(::missingheads and ::commonheads)
432 431 # (missingheads is revs with secret changeset filtered out)
433 432 #
434 433 # This can be expressed as:
435 434 # cheads = ( (missingheads and ::commonheads)
436 435 # + (commonheads and ::missingheads))"
437 436 # )
438 437 #
439 438 # while trying to push we already computed the following:
440 439 # common = (::commonheads)
441 440 # missing = ((commonheads::missingheads) - commonheads)
442 441 #
443 442 # We can pick:
444 443 # * missingheads part of common (::commonheads)
445 444 common = self.outgoing.common
446 445 nm = self.repo.changelog.nodemap
447 446 cheads = [node for node in self.revs if nm[node] in common]
448 447 # and
449 448 # * commonheads parents on missing
450 449 revset = unfi.set('%ln and parents(roots(%ln))',
451 450 self.outgoing.commonheads,
452 451 self.outgoing.missing)
453 452 cheads.extend(c.node() for c in revset)
454 453 return cheads
455 454
456 455 @property
457 456 def commonheads(self):
458 457 """set of all common heads after changeset bundle push"""
459 458 if self.cgresult:
460 459 return self.futureheads
461 460 else:
462 461 return self.fallbackheads
463 462
464 463 # mapping of message used when pushing bookmark
465 464 bookmsgmap = {'update': (_("updating bookmark %s\n"),
466 465 _('updating bookmark %s failed!\n')),
467 466 'export': (_("exporting bookmark %s\n"),
468 467 _('exporting bookmark %s failed!\n')),
469 468 'delete': (_("deleting remote bookmark %s\n"),
470 469 _('deleting remote bookmark %s failed!\n')),
471 470 }
472 471
473 472
474 473 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
475 474 opargs=None):
476 475 '''Push outgoing changesets (limited by revs) from a local
477 476 repository to remote. Return an integer:
478 477 - None means nothing to push
479 478 - 0 means HTTP error
480 479 - 1 means we pushed and remote head count is unchanged *or*
481 480 we have outgoing changesets but refused to push
482 481 - other values as described by addchangegroup()
483 482 '''
484 483 if opargs is None:
485 484 opargs = {}
486 485 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
487 486 **pycompat.strkwargs(opargs))
488 487 if pushop.remote.local():
489 488 missing = (set(pushop.repo.requirements)
490 489 - pushop.remote.local().supported)
491 490 if missing:
492 491 msg = _("required features are not"
493 492 " supported in the destination:"
494 493 " %s") % (', '.join(sorted(missing)))
495 494 raise error.Abort(msg)
496 495
497 496 if not pushop.remote.canpush():
498 497 raise error.Abort(_("destination does not support push"))
499 498
500 499 if not pushop.remote.capable('unbundle'):
501 500 raise error.Abort(_('cannot push: destination does not support the '
502 501 'unbundle wire protocol command'))
503 502
504 503 # get lock as we might write phase data
505 504 wlock = lock = None
506 505 try:
507 506 # bundle2 push may receive a reply bundle touching bookmarks or other
508 507 # things requiring the wlock. Take it now to ensure proper ordering.
509 508 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
510 509 if (not _forcebundle1(pushop)) and maypushback:
511 510 wlock = pushop.repo.wlock()
512 511 lock = pushop.repo.lock()
513 512 pushop.trmanager = transactionmanager(pushop.repo,
514 513 'push-response',
515 514 pushop.remote.url())
516 except IOError as err:
517 if err.errno != errno.EACCES:
518 raise
515 except error.LockUnavailable as err:
519 516 # source repo cannot be locked.
520 517 # We do not abort the push, but just disable the local phase
521 518 # synchronisation.
522 519 msg = 'cannot lock source repository: %s\n' % err
523 520 pushop.ui.debug(msg)
524 521
525 522 with wlock or util.nullcontextmanager(), \
526 523 lock or util.nullcontextmanager(), \
527 524 pushop.trmanager or util.nullcontextmanager():
528 525 pushop.repo.checkpush(pushop)
529 526 _pushdiscovery(pushop)
530 527 if not _forcebundle1(pushop):
531 528 _pushbundle2(pushop)
532 529 _pushchangeset(pushop)
533 530 _pushsyncphase(pushop)
534 531 _pushobsolete(pushop)
535 532 _pushbookmark(pushop)
536 533
537 534 return pushop
538 535
539 536 # list of steps to perform discovery before push
540 537 pushdiscoveryorder = []
541 538
542 539 # Mapping between step name and function
543 540 #
544 541 # This exists to help extensions wrap steps if necessary
545 542 pushdiscoverymapping = {}
546 543
547 544 def pushdiscovery(stepname):
548 545 """decorator for function performing discovery before push
549 546
550 547 The function is added to the step -> function mapping and appended to the
551 548 list of steps. Beware that decorated function will be added in order (this
552 549 may matter).
553 550
554 551 You can only use this decorator for a new step, if you want to wrap a step
555 552 from an extension, change the pushdiscovery dictionary directly."""
556 553 def dec(func):
557 554 assert stepname not in pushdiscoverymapping
558 555 pushdiscoverymapping[stepname] = func
559 556 pushdiscoveryorder.append(stepname)
560 557 return func
561 558 return dec
562 559
563 560 def _pushdiscovery(pushop):
564 561 """Run all discovery steps"""
565 562 for stepname in pushdiscoveryorder:
566 563 step = pushdiscoverymapping[stepname]
567 564 step(pushop)
568 565
569 566 @pushdiscovery('changeset')
570 567 def _pushdiscoverychangeset(pushop):
571 568 """discover the changeset that need to be pushed"""
572 569 fci = discovery.findcommonincoming
573 570 if pushop.revs:
574 571 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
575 572 ancestorsof=pushop.revs)
576 573 else:
577 574 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
578 575 common, inc, remoteheads = commoninc
579 576 fco = discovery.findcommonoutgoing
580 577 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
581 578 commoninc=commoninc, force=pushop.force)
582 579 pushop.outgoing = outgoing
583 580 pushop.remoteheads = remoteheads
584 581 pushop.incoming = inc
585 582
586 583 @pushdiscovery('phase')
587 584 def _pushdiscoveryphase(pushop):
588 585 """discover the phase that needs to be pushed
589 586
590 587 (computed for both success and failure case for changesets push)"""
591 588 outgoing = pushop.outgoing
592 589 unfi = pushop.repo.unfiltered()
593 590 remotephases = listkeys(pushop.remote, 'phases')
594 591
595 592 if (pushop.ui.configbool('ui', '_usedassubrepo')
596 593 and remotephases # server supports phases
597 594 and not pushop.outgoing.missing # no changesets to be pushed
598 595 and remotephases.get('publishing', False)):
599 596 # When:
600 597 # - this is a subrepo push
601 598 # - and remote support phase
602 599 # - and no changeset are to be pushed
603 600 # - and remote is publishing
604 601 # We may be in issue 3781 case!
605 602 # We drop the possible phase synchronisation done by
606 603 # courtesy to publish changesets possibly locally draft
607 604 # on the remote.
608 605 pushop.outdatedphases = []
609 606 pushop.fallbackoutdatedphases = []
610 607 return
611 608
612 609 pushop.remotephases = phases.remotephasessummary(pushop.repo,
613 610 pushop.fallbackheads,
614 611 remotephases)
615 612 droots = pushop.remotephases.draftroots
616 613
617 614 extracond = ''
618 615 if not pushop.remotephases.publishing:
619 616 extracond = ' and public()'
620 617 revset = 'heads((%%ln::%%ln) %s)' % extracond
621 618 # Get the list of all revs draft on remote by public here.
622 619 # XXX Beware that revset break if droots is not strictly
623 620 # XXX root we may want to ensure it is but it is costly
624 621 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
625 622 if not outgoing.missing:
626 623 future = fallback
627 624 else:
628 625 # adds changeset we are going to push as draft
629 626 #
630 627 # should not be necessary for publishing server, but because of an
631 628 # issue fixed in xxxxx we have to do it anyway.
632 629 fdroots = list(unfi.set('roots(%ln + %ln::)',
633 630 outgoing.missing, droots))
634 631 fdroots = [f.node() for f in fdroots]
635 632 future = list(unfi.set(revset, fdroots, pushop.futureheads))
636 633 pushop.outdatedphases = future
637 634 pushop.fallbackoutdatedphases = fallback
638 635
639 636 @pushdiscovery('obsmarker')
640 637 def _pushdiscoveryobsmarkers(pushop):
641 638 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
642 639 return
643 640
644 641 if not pushop.repo.obsstore:
645 642 return
646 643
647 644 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
648 645 return
649 646
650 647 repo = pushop.repo
651 648 # very naive computation, that can be quite expensive on big repo.
652 649 # However: evolution is currently slow on them anyway.
653 650 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
654 651 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
655 652
656 653 @pushdiscovery('bookmarks')
657 654 def _pushdiscoverybookmarks(pushop):
658 655 ui = pushop.ui
659 656 repo = pushop.repo.unfiltered()
660 657 remote = pushop.remote
661 658 ui.debug("checking for updated bookmarks\n")
662 659 ancestors = ()
663 660 if pushop.revs:
664 661 revnums = map(repo.changelog.rev, pushop.revs)
665 662 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
666 663
667 664 remotebookmark = listkeys(remote, 'bookmarks')
668 665
669 666 explicit = set([repo._bookmarks.expandname(bookmark)
670 667 for bookmark in pushop.bookmarks])
671 668
672 669 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
673 670 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
674 671
675 672 def safehex(x):
676 673 if x is None:
677 674 return x
678 675 return hex(x)
679 676
680 677 def hexifycompbookmarks(bookmarks):
681 678 return [(b, safehex(scid), safehex(dcid))
682 679 for (b, scid, dcid) in bookmarks]
683 680
684 681 comp = [hexifycompbookmarks(marks) for marks in comp]
685 682 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
686 683
687 684 def _processcompared(pushop, pushed, explicit, remotebms, comp):
688 685 """take decision on bookmark to pull from the remote bookmark
689 686
690 687 Exist to help extensions who want to alter this behavior.
691 688 """
692 689 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
693 690
694 691 repo = pushop.repo
695 692
696 693 for b, scid, dcid in advsrc:
697 694 if b in explicit:
698 695 explicit.remove(b)
699 696 if not pushed or repo[scid].rev() in pushed:
700 697 pushop.outbookmarks.append((b, dcid, scid))
701 698 # search added bookmark
702 699 for b, scid, dcid in addsrc:
703 700 if b in explicit:
704 701 explicit.remove(b)
705 702 pushop.outbookmarks.append((b, '', scid))
706 703 # search for overwritten bookmark
707 704 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
708 705 if b in explicit:
709 706 explicit.remove(b)
710 707 pushop.outbookmarks.append((b, dcid, scid))
711 708 # search for bookmark to delete
712 709 for b, scid, dcid in adddst:
713 710 if b in explicit:
714 711 explicit.remove(b)
715 712 # treat as "deleted locally"
716 713 pushop.outbookmarks.append((b, dcid, ''))
717 714 # identical bookmarks shouldn't get reported
718 715 for b, scid, dcid in same:
719 716 if b in explicit:
720 717 explicit.remove(b)
721 718
722 719 if explicit:
723 720 explicit = sorted(explicit)
724 721 # we should probably list all of them
725 722 pushop.ui.warn(_('bookmark %s does not exist on the local '
726 723 'or remote repository!\n') % explicit[0])
727 724 pushop.bkresult = 2
728 725
729 726 pushop.outbookmarks.sort()
730 727
731 728 def _pushcheckoutgoing(pushop):
732 729 outgoing = pushop.outgoing
733 730 unfi = pushop.repo.unfiltered()
734 731 if not outgoing.missing:
735 732 # nothing to push
736 733 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
737 734 return False
738 735 # something to push
739 736 if not pushop.force:
740 737 # if repo.obsstore == False --> no obsolete
741 738 # then, save the iteration
742 739 if unfi.obsstore:
743 740 # this message are here for 80 char limit reason
744 741 mso = _("push includes obsolete changeset: %s!")
745 742 mspd = _("push includes phase-divergent changeset: %s!")
746 743 mscd = _("push includes content-divergent changeset: %s!")
747 744 mst = {"orphan": _("push includes orphan changeset: %s!"),
748 745 "phase-divergent": mspd,
749 746 "content-divergent": mscd}
750 747 # If we are to push if there is at least one
751 748 # obsolete or unstable changeset in missing, at
752 749 # least one of the missinghead will be obsolete or
753 750 # unstable. So checking heads only is ok
754 751 for node in outgoing.missingheads:
755 752 ctx = unfi[node]
756 753 if ctx.obsolete():
757 754 raise error.Abort(mso % ctx)
758 755 elif ctx.isunstable():
759 756 # TODO print more than one instability in the abort
760 757 # message
761 758 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
762 759
763 760 discovery.checkheads(pushop)
764 761 return True
765 762
766 763 # List of names of steps to perform for an outgoing bundle2, order matters.
767 764 b2partsgenorder = []
768 765
769 766 # Mapping between step name and function
770 767 #
771 768 # This exists to help extensions wrap steps if necessary
772 769 b2partsgenmapping = {}
773 770
774 771 def b2partsgenerator(stepname, idx=None):
775 772 """decorator for function generating bundle2 part
776 773
777 774 The function is added to the step -> function mapping and appended to the
778 775 list of steps. Beware that decorated functions will be added in order
779 776 (this may matter).
780 777
781 778 You can only use this decorator for new steps, if you want to wrap a step
782 779 from an extension, attack the b2partsgenmapping dictionary directly."""
783 780 def dec(func):
784 781 assert stepname not in b2partsgenmapping
785 782 b2partsgenmapping[stepname] = func
786 783 if idx is None:
787 784 b2partsgenorder.append(stepname)
788 785 else:
789 786 b2partsgenorder.insert(idx, stepname)
790 787 return func
791 788 return dec
792 789
793 790 def _pushb2ctxcheckheads(pushop, bundler):
794 791 """Generate race condition checking parts
795 792
796 793 Exists as an independent function to aid extensions
797 794 """
798 795 # * 'force' do not check for push race,
799 796 # * if we don't push anything, there are nothing to check.
800 797 if not pushop.force and pushop.outgoing.missingheads:
801 798 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
802 799 emptyremote = pushop.pushbranchmap is None
803 800 if not allowunrelated or emptyremote:
804 801 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
805 802 else:
806 803 affected = set()
807 804 for branch, heads in pushop.pushbranchmap.iteritems():
808 805 remoteheads, newheads, unsyncedheads, discardedheads = heads
809 806 if remoteheads is not None:
810 807 remote = set(remoteheads)
811 808 affected |= set(discardedheads) & remote
812 809 affected |= remote - set(newheads)
813 810 if affected:
814 811 data = iter(sorted(affected))
815 812 bundler.newpart('check:updated-heads', data=data)
816 813
817 814 def _pushing(pushop):
818 815 """return True if we are pushing anything"""
819 816 return bool(pushop.outgoing.missing
820 817 or pushop.outdatedphases
821 818 or pushop.outobsmarkers
822 819 or pushop.outbookmarks)
823 820
824 821 @b2partsgenerator('check-bookmarks')
825 822 def _pushb2checkbookmarks(pushop, bundler):
826 823 """insert bookmark move checking"""
827 824 if not _pushing(pushop) or pushop.force:
828 825 return
829 826 b2caps = bundle2.bundle2caps(pushop.remote)
830 827 hasbookmarkcheck = 'bookmarks' in b2caps
831 828 if not (pushop.outbookmarks and hasbookmarkcheck):
832 829 return
833 830 data = []
834 831 for book, old, new in pushop.outbookmarks:
835 832 old = bin(old)
836 833 data.append((book, old))
837 834 checkdata = bookmod.binaryencode(data)
838 835 bundler.newpart('check:bookmarks', data=checkdata)
839 836
840 837 @b2partsgenerator('check-phases')
841 838 def _pushb2checkphases(pushop, bundler):
842 839 """insert phase move checking"""
843 840 if not _pushing(pushop) or pushop.force:
844 841 return
845 842 b2caps = bundle2.bundle2caps(pushop.remote)
846 843 hasphaseheads = 'heads' in b2caps.get('phases', ())
847 844 if pushop.remotephases is not None and hasphaseheads:
848 845 # check that the remote phase has not changed
849 846 checks = [[] for p in phases.allphases]
850 847 checks[phases.public].extend(pushop.remotephases.publicheads)
851 848 checks[phases.draft].extend(pushop.remotephases.draftroots)
852 849 if any(checks):
853 850 for nodes in checks:
854 851 nodes.sort()
855 852 checkdata = phases.binaryencode(checks)
856 853 bundler.newpart('check:phases', data=checkdata)
857 854
858 855 @b2partsgenerator('changeset')
859 856 def _pushb2ctx(pushop, bundler):
860 857 """handle changegroup push through bundle2
861 858
862 859 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
863 860 """
864 861 if 'changesets' in pushop.stepsdone:
865 862 return
866 863 pushop.stepsdone.add('changesets')
867 864 # Send known heads to the server for race detection.
868 865 if not _pushcheckoutgoing(pushop):
869 866 return
870 867 pushop.repo.prepushoutgoinghooks(pushop)
871 868
872 869 _pushb2ctxcheckheads(pushop, bundler)
873 870
874 871 b2caps = bundle2.bundle2caps(pushop.remote)
875 872 version = '01'
876 873 cgversions = b2caps.get('changegroup')
877 874 if cgversions: # 3.1 and 3.2 ship with an empty value
878 875 cgversions = [v for v in cgversions
879 876 if v in changegroup.supportedoutgoingversions(
880 877 pushop.repo)]
881 878 if not cgversions:
882 879 raise ValueError(_('no common changegroup version'))
883 880 version = max(cgversions)
884 881 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
885 882 'push')
886 883 cgpart = bundler.newpart('changegroup', data=cgstream)
887 884 if cgversions:
888 885 cgpart.addparam('version', version)
889 886 if 'treemanifest' in pushop.repo.requirements:
890 887 cgpart.addparam('treemanifest', '1')
891 888 def handlereply(op):
892 889 """extract addchangegroup returns from server reply"""
893 890 cgreplies = op.records.getreplies(cgpart.id)
894 891 assert len(cgreplies['changegroup']) == 1
895 892 pushop.cgresult = cgreplies['changegroup'][0]['return']
896 893 return handlereply
897 894
898 895 @b2partsgenerator('phase')
899 896 def _pushb2phases(pushop, bundler):
900 897 """handle phase push through bundle2"""
901 898 if 'phases' in pushop.stepsdone:
902 899 return
903 900 b2caps = bundle2.bundle2caps(pushop.remote)
904 901 ui = pushop.repo.ui
905 902
906 903 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
907 904 haspushkey = 'pushkey' in b2caps
908 905 hasphaseheads = 'heads' in b2caps.get('phases', ())
909 906
910 907 if hasphaseheads and not legacyphase:
911 908 return _pushb2phaseheads(pushop, bundler)
912 909 elif haspushkey:
913 910 return _pushb2phasespushkey(pushop, bundler)
914 911
915 912 def _pushb2phaseheads(pushop, bundler):
916 913 """push phase information through a bundle2 - binary part"""
917 914 pushop.stepsdone.add('phases')
918 915 if pushop.outdatedphases:
919 916 updates = [[] for p in phases.allphases]
920 917 updates[0].extend(h.node() for h in pushop.outdatedphases)
921 918 phasedata = phases.binaryencode(updates)
922 919 bundler.newpart('phase-heads', data=phasedata)
923 920
924 921 def _pushb2phasespushkey(pushop, bundler):
925 922 """push phase information through a bundle2 - pushkey part"""
926 923 pushop.stepsdone.add('phases')
927 924 part2node = []
928 925
929 926 def handlefailure(pushop, exc):
930 927 targetid = int(exc.partid)
931 928 for partid, node in part2node:
932 929 if partid == targetid:
933 930 raise error.Abort(_('updating %s to public failed') % node)
934 931
935 932 enc = pushkey.encode
936 933 for newremotehead in pushop.outdatedphases:
937 934 part = bundler.newpart('pushkey')
938 935 part.addparam('namespace', enc('phases'))
939 936 part.addparam('key', enc(newremotehead.hex()))
940 937 part.addparam('old', enc('%d' % phases.draft))
941 938 part.addparam('new', enc('%d' % phases.public))
942 939 part2node.append((part.id, newremotehead))
943 940 pushop.pkfailcb[part.id] = handlefailure
944 941
945 942 def handlereply(op):
946 943 for partid, node in part2node:
947 944 partrep = op.records.getreplies(partid)
948 945 results = partrep['pushkey']
949 946 assert len(results) <= 1
950 947 msg = None
951 948 if not results:
952 949 msg = _('server ignored update of %s to public!\n') % node
953 950 elif not int(results[0]['return']):
954 951 msg = _('updating %s to public failed!\n') % node
955 952 if msg is not None:
956 953 pushop.ui.warn(msg)
957 954 return handlereply
958 955
959 956 @b2partsgenerator('obsmarkers')
960 957 def _pushb2obsmarkers(pushop, bundler):
961 958 if 'obsmarkers' in pushop.stepsdone:
962 959 return
963 960 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
964 961 if obsolete.commonversion(remoteversions) is None:
965 962 return
966 963 pushop.stepsdone.add('obsmarkers')
967 964 if pushop.outobsmarkers:
968 965 markers = sorted(pushop.outobsmarkers)
969 966 bundle2.buildobsmarkerspart(bundler, markers)
970 967
971 968 @b2partsgenerator('bookmarks')
972 969 def _pushb2bookmarks(pushop, bundler):
973 970 """handle bookmark push through bundle2"""
974 971 if 'bookmarks' in pushop.stepsdone:
975 972 return
976 973 b2caps = bundle2.bundle2caps(pushop.remote)
977 974
978 975 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
979 976 legacybooks = 'bookmarks' in legacy
980 977
981 978 if not legacybooks and 'bookmarks' in b2caps:
982 979 return _pushb2bookmarkspart(pushop, bundler)
983 980 elif 'pushkey' in b2caps:
984 981 return _pushb2bookmarkspushkey(pushop, bundler)
985 982
986 983 def _bmaction(old, new):
987 984 """small utility for bookmark pushing"""
988 985 if not old:
989 986 return 'export'
990 987 elif not new:
991 988 return 'delete'
992 989 return 'update'
993 990
994 991 def _pushb2bookmarkspart(pushop, bundler):
995 992 pushop.stepsdone.add('bookmarks')
996 993 if not pushop.outbookmarks:
997 994 return
998 995
999 996 allactions = []
1000 997 data = []
1001 998 for book, old, new in pushop.outbookmarks:
1002 999 new = bin(new)
1003 1000 data.append((book, new))
1004 1001 allactions.append((book, _bmaction(old, new)))
1005 1002 checkdata = bookmod.binaryencode(data)
1006 1003 bundler.newpart('bookmarks', data=checkdata)
1007 1004
1008 1005 def handlereply(op):
1009 1006 ui = pushop.ui
1010 1007 # if success
1011 1008 for book, action in allactions:
1012 1009 ui.status(bookmsgmap[action][0] % book)
1013 1010
1014 1011 return handlereply
1015 1012
1016 1013 def _pushb2bookmarkspushkey(pushop, bundler):
1017 1014 pushop.stepsdone.add('bookmarks')
1018 1015 part2book = []
1019 1016 enc = pushkey.encode
1020 1017
1021 1018 def handlefailure(pushop, exc):
1022 1019 targetid = int(exc.partid)
1023 1020 for partid, book, action in part2book:
1024 1021 if partid == targetid:
1025 1022 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1026 1023 # we should not be called for part we did not generated
1027 1024 assert False
1028 1025
1029 1026 for book, old, new in pushop.outbookmarks:
1030 1027 part = bundler.newpart('pushkey')
1031 1028 part.addparam('namespace', enc('bookmarks'))
1032 1029 part.addparam('key', enc(book))
1033 1030 part.addparam('old', enc(old))
1034 1031 part.addparam('new', enc(new))
1035 1032 action = 'update'
1036 1033 if not old:
1037 1034 action = 'export'
1038 1035 elif not new:
1039 1036 action = 'delete'
1040 1037 part2book.append((part.id, book, action))
1041 1038 pushop.pkfailcb[part.id] = handlefailure
1042 1039
1043 1040 def handlereply(op):
1044 1041 ui = pushop.ui
1045 1042 for partid, book, action in part2book:
1046 1043 partrep = op.records.getreplies(partid)
1047 1044 results = partrep['pushkey']
1048 1045 assert len(results) <= 1
1049 1046 if not results:
1050 1047 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1051 1048 else:
1052 1049 ret = int(results[0]['return'])
1053 1050 if ret:
1054 1051 ui.status(bookmsgmap[action][0] % book)
1055 1052 else:
1056 1053 ui.warn(bookmsgmap[action][1] % book)
1057 1054 if pushop.bkresult is not None:
1058 1055 pushop.bkresult = 1
1059 1056 return handlereply
1060 1057
1061 1058 @b2partsgenerator('pushvars', idx=0)
1062 1059 def _getbundlesendvars(pushop, bundler):
1063 1060 '''send shellvars via bundle2'''
1064 1061 pushvars = pushop.pushvars
1065 1062 if pushvars:
1066 1063 shellvars = {}
1067 1064 for raw in pushvars:
1068 1065 if '=' not in raw:
1069 1066 msg = ("unable to parse variable '%s', should follow "
1070 1067 "'KEY=VALUE' or 'KEY=' format")
1071 1068 raise error.Abort(msg % raw)
1072 1069 k, v = raw.split('=', 1)
1073 1070 shellvars[k] = v
1074 1071
1075 1072 part = bundler.newpart('pushvars')
1076 1073
1077 1074 for key, value in shellvars.iteritems():
1078 1075 part.addparam(key, value, mandatory=False)
1079 1076
1080 1077 def _pushbundle2(pushop):
1081 1078 """push data to the remote using bundle2
1082 1079
1083 1080 The only currently supported type of data is changegroup but this will
1084 1081 evolve in the future."""
1085 1082 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1086 1083 pushback = (pushop.trmanager
1087 1084 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1088 1085
1089 1086 # create reply capability
1090 1087 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1091 1088 allowpushback=pushback,
1092 1089 role='client'))
1093 1090 bundler.newpart('replycaps', data=capsblob)
1094 1091 replyhandlers = []
1095 1092 for partgenname in b2partsgenorder:
1096 1093 partgen = b2partsgenmapping[partgenname]
1097 1094 ret = partgen(pushop, bundler)
1098 1095 if callable(ret):
1099 1096 replyhandlers.append(ret)
1100 1097 # do not push if nothing to push
1101 1098 if bundler.nbparts <= 1:
1102 1099 return
1103 1100 stream = util.chunkbuffer(bundler.getchunks())
1104 1101 try:
1105 1102 try:
1106 1103 with pushop.remote.commandexecutor() as e:
1107 1104 reply = e.callcommand('unbundle', {
1108 1105 'bundle': stream,
1109 1106 'heads': ['force'],
1110 1107 'url': pushop.remote.url(),
1111 1108 }).result()
1112 1109 except error.BundleValueError as exc:
1113 1110 raise error.Abort(_('missing support for %s') % exc)
1114 1111 try:
1115 1112 trgetter = None
1116 1113 if pushback:
1117 1114 trgetter = pushop.trmanager.transaction
1118 1115 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1119 1116 except error.BundleValueError as exc:
1120 1117 raise error.Abort(_('missing support for %s') % exc)
1121 1118 except bundle2.AbortFromPart as exc:
1122 1119 pushop.ui.status(_('remote: %s\n') % exc)
1123 1120 if exc.hint is not None:
1124 1121 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1125 1122 raise error.Abort(_('push failed on remote'))
1126 1123 except error.PushkeyFailed as exc:
1127 1124 partid = int(exc.partid)
1128 1125 if partid not in pushop.pkfailcb:
1129 1126 raise
1130 1127 pushop.pkfailcb[partid](pushop, exc)
1131 1128 for rephand in replyhandlers:
1132 1129 rephand(op)
1133 1130
1134 1131 def _pushchangeset(pushop):
1135 1132 """Make the actual push of changeset bundle to remote repo"""
1136 1133 if 'changesets' in pushop.stepsdone:
1137 1134 return
1138 1135 pushop.stepsdone.add('changesets')
1139 1136 if not _pushcheckoutgoing(pushop):
1140 1137 return
1141 1138
1142 1139 # Should have verified this in push().
1143 1140 assert pushop.remote.capable('unbundle')
1144 1141
1145 1142 pushop.repo.prepushoutgoinghooks(pushop)
1146 1143 outgoing = pushop.outgoing
1147 1144 # TODO: get bundlecaps from remote
1148 1145 bundlecaps = None
1149 1146 # create a changegroup from local
1150 1147 if pushop.revs is None and not (outgoing.excluded
1151 1148 or pushop.repo.changelog.filteredrevs):
1152 1149 # push everything,
1153 1150 # use the fast path, no race possible on push
1154 1151 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1155 1152 fastpath=True, bundlecaps=bundlecaps)
1156 1153 else:
1157 1154 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1158 1155 'push', bundlecaps=bundlecaps)
1159 1156
1160 1157 # apply changegroup to remote
1161 1158 # local repo finds heads on server, finds out what
1162 1159 # revs it must push. once revs transferred, if server
1163 1160 # finds it has different heads (someone else won
1164 1161 # commit/push race), server aborts.
1165 1162 if pushop.force:
1166 1163 remoteheads = ['force']
1167 1164 else:
1168 1165 remoteheads = pushop.remoteheads
1169 1166 # ssh: return remote's addchangegroup()
1170 1167 # http: return remote's addchangegroup() or 0 for error
1171 1168 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1172 1169 pushop.repo.url())
1173 1170
1174 1171 def _pushsyncphase(pushop):
1175 1172 """synchronise phase information locally and remotely"""
1176 1173 cheads = pushop.commonheads
1177 1174 # even when we don't push, exchanging phase data is useful
1178 1175 remotephases = listkeys(pushop.remote, 'phases')
1179 1176 if (pushop.ui.configbool('ui', '_usedassubrepo')
1180 1177 and remotephases # server supports phases
1181 1178 and pushop.cgresult is None # nothing was pushed
1182 1179 and remotephases.get('publishing', False)):
1183 1180 # When:
1184 1181 # - this is a subrepo push
1185 1182 # - and remote support phase
1186 1183 # - and no changeset was pushed
1187 1184 # - and remote is publishing
1188 1185 # We may be in issue 3871 case!
1189 1186 # We drop the possible phase synchronisation done by
1190 1187 # courtesy to publish changesets possibly locally draft
1191 1188 # on the remote.
1192 1189 remotephases = {'publishing': 'True'}
1193 1190 if not remotephases: # old server or public only reply from non-publishing
1194 1191 _localphasemove(pushop, cheads)
1195 1192 # don't push any phase data as there is nothing to push
1196 1193 else:
1197 1194 ana = phases.analyzeremotephases(pushop.repo, cheads,
1198 1195 remotephases)
1199 1196 pheads, droots = ana
1200 1197 ### Apply remote phase on local
1201 1198 if remotephases.get('publishing', False):
1202 1199 _localphasemove(pushop, cheads)
1203 1200 else: # publish = False
1204 1201 _localphasemove(pushop, pheads)
1205 1202 _localphasemove(pushop, cheads, phases.draft)
1206 1203 ### Apply local phase on remote
1207 1204
1208 1205 if pushop.cgresult:
1209 1206 if 'phases' in pushop.stepsdone:
1210 1207 # phases already pushed though bundle2
1211 1208 return
1212 1209 outdated = pushop.outdatedphases
1213 1210 else:
1214 1211 outdated = pushop.fallbackoutdatedphases
1215 1212
1216 1213 pushop.stepsdone.add('phases')
1217 1214
1218 1215 # filter heads already turned public by the push
1219 1216 outdated = [c for c in outdated if c.node() not in pheads]
1220 1217 # fallback to independent pushkey command
1221 1218 for newremotehead in outdated:
1222 1219 with pushop.remote.commandexecutor() as e:
1223 1220 r = e.callcommand('pushkey', {
1224 1221 'namespace': 'phases',
1225 1222 'key': newremotehead.hex(),
1226 1223 'old': '%d' % phases.draft,
1227 1224 'new': '%d' % phases.public
1228 1225 }).result()
1229 1226
1230 1227 if not r:
1231 1228 pushop.ui.warn(_('updating %s to public failed!\n')
1232 1229 % newremotehead)
1233 1230
1234 1231 def _localphasemove(pushop, nodes, phase=phases.public):
1235 1232 """move <nodes> to <phase> in the local source repo"""
1236 1233 if pushop.trmanager:
1237 1234 phases.advanceboundary(pushop.repo,
1238 1235 pushop.trmanager.transaction(),
1239 1236 phase,
1240 1237 nodes)
1241 1238 else:
1242 1239 # repo is not locked, do not change any phases!
1243 1240 # Informs the user that phases should have been moved when
1244 1241 # applicable.
1245 1242 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1246 1243 phasestr = phases.phasenames[phase]
1247 1244 if actualmoves:
1248 1245 pushop.ui.status(_('cannot lock source repo, skipping '
1249 1246 'local %s phase update\n') % phasestr)
1250 1247
1251 1248 def _pushobsolete(pushop):
1252 1249 """utility function to push obsolete markers to a remote"""
1253 1250 if 'obsmarkers' in pushop.stepsdone:
1254 1251 return
1255 1252 repo = pushop.repo
1256 1253 remote = pushop.remote
1257 1254 pushop.stepsdone.add('obsmarkers')
1258 1255 if pushop.outobsmarkers:
1259 1256 pushop.ui.debug('try to push obsolete markers to remote\n')
1260 1257 rslts = []
1261 1258 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1262 1259 for key in sorted(remotedata, reverse=True):
1263 1260 # reverse sort to ensure we end with dump0
1264 1261 data = remotedata[key]
1265 1262 rslts.append(remote.pushkey('obsolete', key, '', data))
1266 1263 if [r for r in rslts if not r]:
1267 1264 msg = _('failed to push some obsolete markers!\n')
1268 1265 repo.ui.warn(msg)
1269 1266
1270 1267 def _pushbookmark(pushop):
1271 1268 """Update bookmark position on remote"""
1272 1269 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1273 1270 return
1274 1271 pushop.stepsdone.add('bookmarks')
1275 1272 ui = pushop.ui
1276 1273 remote = pushop.remote
1277 1274
1278 1275 for b, old, new in pushop.outbookmarks:
1279 1276 action = 'update'
1280 1277 if not old:
1281 1278 action = 'export'
1282 1279 elif not new:
1283 1280 action = 'delete'
1284 1281
1285 1282 with remote.commandexecutor() as e:
1286 1283 r = e.callcommand('pushkey', {
1287 1284 'namespace': 'bookmarks',
1288 1285 'key': b,
1289 1286 'old': old,
1290 1287 'new': new,
1291 1288 }).result()
1292 1289
1293 1290 if r:
1294 1291 ui.status(bookmsgmap[action][0] % b)
1295 1292 else:
1296 1293 ui.warn(bookmsgmap[action][1] % b)
1297 1294 # discovery can have set the value form invalid entry
1298 1295 if pushop.bkresult is not None:
1299 1296 pushop.bkresult = 1
1300 1297
1301 1298 class pulloperation(object):
1302 1299 """A object that represent a single pull operation
1303 1300
1304 1301 It purpose is to carry pull related state and very common operation.
1305 1302
1306 1303 A new should be created at the beginning of each pull and discarded
1307 1304 afterward.
1308 1305 """
1309 1306
1310 1307 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1311 1308 remotebookmarks=None, streamclonerequested=None):
1312 1309 # repo we pull into
1313 1310 self.repo = repo
1314 1311 # repo we pull from
1315 1312 self.remote = remote
1316 1313 # revision we try to pull (None is "all")
1317 1314 self.heads = heads
1318 1315 # bookmark pulled explicitly
1319 1316 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1320 1317 for bookmark in bookmarks]
1321 1318 # do we force pull?
1322 1319 self.force = force
1323 1320 # whether a streaming clone was requested
1324 1321 self.streamclonerequested = streamclonerequested
1325 1322 # transaction manager
1326 1323 self.trmanager = None
1327 1324 # set of common changeset between local and remote before pull
1328 1325 self.common = None
1329 1326 # set of pulled head
1330 1327 self.rheads = None
1331 1328 # list of missing changeset to fetch remotely
1332 1329 self.fetch = None
1333 1330 # remote bookmarks data
1334 1331 self.remotebookmarks = remotebookmarks
1335 1332 # result of changegroup pulling (used as return code by pull)
1336 1333 self.cgresult = None
1337 1334 # list of step already done
1338 1335 self.stepsdone = set()
1339 1336 # Whether we attempted a clone from pre-generated bundles.
1340 1337 self.clonebundleattempted = False
1341 1338
1342 1339 @util.propertycache
1343 1340 def pulledsubset(self):
1344 1341 """heads of the set of changeset target by the pull"""
1345 1342 # compute target subset
1346 1343 if self.heads is None:
1347 1344 # We pulled every thing possible
1348 1345 # sync on everything common
1349 1346 c = set(self.common)
1350 1347 ret = list(self.common)
1351 1348 for n in self.rheads:
1352 1349 if n not in c:
1353 1350 ret.append(n)
1354 1351 return ret
1355 1352 else:
1356 1353 # We pulled a specific subset
1357 1354 # sync on this subset
1358 1355 return self.heads
1359 1356
1360 1357 @util.propertycache
1361 1358 def canusebundle2(self):
1362 1359 return not _forcebundle1(self)
1363 1360
1364 1361 @util.propertycache
1365 1362 def remotebundle2caps(self):
1366 1363 return bundle2.bundle2caps(self.remote)
1367 1364
1368 1365 def gettransaction(self):
1369 1366 # deprecated; talk to trmanager directly
1370 1367 return self.trmanager.transaction()
1371 1368
1372 1369 class transactionmanager(util.transactional):
1373 1370 """An object to manage the life cycle of a transaction
1374 1371
1375 1372 It creates the transaction on demand and calls the appropriate hooks when
1376 1373 closing the transaction."""
1377 1374 def __init__(self, repo, source, url):
1378 1375 self.repo = repo
1379 1376 self.source = source
1380 1377 self.url = url
1381 1378 self._tr = None
1382 1379
1383 1380 def transaction(self):
1384 1381 """Return an open transaction object, constructing if necessary"""
1385 1382 if not self._tr:
1386 1383 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1387 1384 self._tr = self.repo.transaction(trname)
1388 1385 self._tr.hookargs['source'] = self.source
1389 1386 self._tr.hookargs['url'] = self.url
1390 1387 return self._tr
1391 1388
1392 1389 def close(self):
1393 1390 """close transaction if created"""
1394 1391 if self._tr is not None:
1395 1392 self._tr.close()
1396 1393
1397 1394 def release(self):
1398 1395 """release transaction if created"""
1399 1396 if self._tr is not None:
1400 1397 self._tr.release()
1401 1398
1402 1399 def listkeys(remote, namespace):
1403 1400 with remote.commandexecutor() as e:
1404 1401 return e.callcommand('listkeys', {'namespace': namespace}).result()
1405 1402
1406 1403 def _fullpullbundle2(repo, pullop):
1407 1404 # The server may send a partial reply, i.e. when inlining
1408 1405 # pre-computed bundles. In that case, update the common
1409 1406 # set based on the results and pull another bundle.
1410 1407 #
1411 1408 # There are two indicators that the process is finished:
1412 1409 # - no changeset has been added, or
1413 1410 # - all remote heads are known locally.
1414 1411 # The head check must use the unfiltered view as obsoletion
1415 1412 # markers can hide heads.
1416 1413 unfi = repo.unfiltered()
1417 1414 unficl = unfi.changelog
1418 1415 def headsofdiff(h1, h2):
1419 1416 """Returns heads(h1 % h2)"""
1420 1417 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1421 1418 return set(ctx.node() for ctx in res)
1422 1419 def headsofunion(h1, h2):
1423 1420 """Returns heads((h1 + h2) - null)"""
1424 1421 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1425 1422 return set(ctx.node() for ctx in res)
1426 1423 while True:
1427 1424 old_heads = unficl.heads()
1428 1425 clstart = len(unficl)
1429 1426 _pullbundle2(pullop)
1430 1427 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1431 1428 # XXX narrow clones filter the heads on the server side during
1432 1429 # XXX getbundle and result in partial replies as well.
1433 1430 # XXX Disable pull bundles in this case as band aid to avoid
1434 1431 # XXX extra round trips.
1435 1432 break
1436 1433 if clstart == len(unficl):
1437 1434 break
1438 1435 if all(unficl.hasnode(n) for n in pullop.rheads):
1439 1436 break
1440 1437 new_heads = headsofdiff(unficl.heads(), old_heads)
1441 1438 pullop.common = headsofunion(new_heads, pullop.common)
1442 1439 pullop.rheads = set(pullop.rheads) - pullop.common
1443 1440
1444 1441 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1445 1442 streamclonerequested=None):
1446 1443 """Fetch repository data from a remote.
1447 1444
1448 1445 This is the main function used to retrieve data from a remote repository.
1449 1446
1450 1447 ``repo`` is the local repository to clone into.
1451 1448 ``remote`` is a peer instance.
1452 1449 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1453 1450 default) means to pull everything from the remote.
1454 1451 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1455 1452 default, all remote bookmarks are pulled.
1456 1453 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1457 1454 initialization.
1458 1455 ``streamclonerequested`` is a boolean indicating whether a "streaming
1459 1456 clone" is requested. A "streaming clone" is essentially a raw file copy
1460 1457 of revlogs from the server. This only works when the local repository is
1461 1458 empty. The default value of ``None`` means to respect the server
1462 1459 configuration for preferring stream clones.
1463 1460
1464 1461 Returns the ``pulloperation`` created for this pull.
1465 1462 """
1466 1463 if opargs is None:
1467 1464 opargs = {}
1468 1465 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1469 1466 streamclonerequested=streamclonerequested,
1470 1467 **pycompat.strkwargs(opargs))
1471 1468
1472 1469 peerlocal = pullop.remote.local()
1473 1470 if peerlocal:
1474 1471 missing = set(peerlocal.requirements) - pullop.repo.supported
1475 1472 if missing:
1476 1473 msg = _("required features are not"
1477 1474 " supported in the destination:"
1478 1475 " %s") % (', '.join(sorted(missing)))
1479 1476 raise error.Abort(msg)
1480 1477
1481 1478 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1482 1479 with repo.wlock(), repo.lock(), pullop.trmanager:
1483 1480 # This should ideally be in _pullbundle2(). However, it needs to run
1484 1481 # before discovery to avoid extra work.
1485 1482 _maybeapplyclonebundle(pullop)
1486 1483 streamclone.maybeperformlegacystreamclone(pullop)
1487 1484 _pulldiscovery(pullop)
1488 1485 if pullop.canusebundle2:
1489 1486 _fullpullbundle2(repo, pullop)
1490 1487 _pullchangeset(pullop)
1491 1488 _pullphase(pullop)
1492 1489 _pullbookmarks(pullop)
1493 1490 _pullobsolete(pullop)
1494 1491
1495 1492 # storing remotenames
1496 1493 if repo.ui.configbool('experimental', 'remotenames'):
1497 1494 logexchange.pullremotenames(repo, remote)
1498 1495
1499 1496 return pullop
1500 1497
1501 1498 # list of steps to perform discovery before pull
1502 1499 pulldiscoveryorder = []
1503 1500
1504 1501 # Mapping between step name and function
1505 1502 #
1506 1503 # This exists to help extensions wrap steps if necessary
1507 1504 pulldiscoverymapping = {}
1508 1505
1509 1506 def pulldiscovery(stepname):
1510 1507 """decorator for function performing discovery before pull
1511 1508
1512 1509 The function is added to the step -> function mapping and appended to the
1513 1510 list of steps. Beware that decorated function will be added in order (this
1514 1511 may matter).
1515 1512
1516 1513 You can only use this decorator for a new step, if you want to wrap a step
1517 1514 from an extension, change the pulldiscovery dictionary directly."""
1518 1515 def dec(func):
1519 1516 assert stepname not in pulldiscoverymapping
1520 1517 pulldiscoverymapping[stepname] = func
1521 1518 pulldiscoveryorder.append(stepname)
1522 1519 return func
1523 1520 return dec
1524 1521
1525 1522 def _pulldiscovery(pullop):
1526 1523 """Run all discovery steps"""
1527 1524 for stepname in pulldiscoveryorder:
1528 1525 step = pulldiscoverymapping[stepname]
1529 1526 step(pullop)
1530 1527
1531 1528 @pulldiscovery('b1:bookmarks')
1532 1529 def _pullbookmarkbundle1(pullop):
1533 1530 """fetch bookmark data in bundle1 case
1534 1531
1535 1532 If not using bundle2, we have to fetch bookmarks before changeset
1536 1533 discovery to reduce the chance and impact of race conditions."""
1537 1534 if pullop.remotebookmarks is not None:
1538 1535 return
1539 1536 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1540 1537 # all known bundle2 servers now support listkeys, but lets be nice with
1541 1538 # new implementation.
1542 1539 return
1543 1540 books = listkeys(pullop.remote, 'bookmarks')
1544 1541 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1545 1542
1546 1543
1547 1544 @pulldiscovery('changegroup')
1548 1545 def _pulldiscoverychangegroup(pullop):
1549 1546 """discovery phase for the pull
1550 1547
1551 1548 Current handle changeset discovery only, will change handle all discovery
1552 1549 at some point."""
1553 1550 tmp = discovery.findcommonincoming(pullop.repo,
1554 1551 pullop.remote,
1555 1552 heads=pullop.heads,
1556 1553 force=pullop.force)
1557 1554 common, fetch, rheads = tmp
1558 1555 nm = pullop.repo.unfiltered().changelog.nodemap
1559 1556 if fetch and rheads:
1560 1557 # If a remote heads is filtered locally, put in back in common.
1561 1558 #
1562 1559 # This is a hackish solution to catch most of "common but locally
1563 1560 # hidden situation". We do not performs discovery on unfiltered
1564 1561 # repository because it end up doing a pathological amount of round
1565 1562 # trip for w huge amount of changeset we do not care about.
1566 1563 #
1567 1564 # If a set of such "common but filtered" changeset exist on the server
1568 1565 # but are not including a remote heads, we'll not be able to detect it,
1569 1566 scommon = set(common)
1570 1567 for n in rheads:
1571 1568 if n in nm:
1572 1569 if n not in scommon:
1573 1570 common.append(n)
1574 1571 if set(rheads).issubset(set(common)):
1575 1572 fetch = []
1576 1573 pullop.common = common
1577 1574 pullop.fetch = fetch
1578 1575 pullop.rheads = rheads
1579 1576
1580 1577 def _pullbundle2(pullop):
1581 1578 """pull data using bundle2
1582 1579
1583 1580 For now, the only supported data are changegroup."""
1584 1581 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1585 1582
1586 1583 # make ui easier to access
1587 1584 ui = pullop.repo.ui
1588 1585
1589 1586 # At the moment we don't do stream clones over bundle2. If that is
1590 1587 # implemented then here's where the check for that will go.
1591 1588 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1592 1589
1593 1590 # declare pull perimeters
1594 1591 kwargs['common'] = pullop.common
1595 1592 kwargs['heads'] = pullop.heads or pullop.rheads
1596 1593
1597 1594 if streaming:
1598 1595 kwargs['cg'] = False
1599 1596 kwargs['stream'] = True
1600 1597 pullop.stepsdone.add('changegroup')
1601 1598 pullop.stepsdone.add('phases')
1602 1599
1603 1600 else:
1604 1601 # pulling changegroup
1605 1602 pullop.stepsdone.add('changegroup')
1606 1603
1607 1604 kwargs['cg'] = pullop.fetch
1608 1605
1609 1606 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1610 1607 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1611 1608 if (not legacyphase and hasbinaryphase):
1612 1609 kwargs['phases'] = True
1613 1610 pullop.stepsdone.add('phases')
1614 1611
1615 1612 if 'listkeys' in pullop.remotebundle2caps:
1616 1613 if 'phases' not in pullop.stepsdone:
1617 1614 kwargs['listkeys'] = ['phases']
1618 1615
1619 1616 bookmarksrequested = False
1620 1617 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1621 1618 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1622 1619
1623 1620 if pullop.remotebookmarks is not None:
1624 1621 pullop.stepsdone.add('request-bookmarks')
1625 1622
1626 1623 if ('request-bookmarks' not in pullop.stepsdone
1627 1624 and pullop.remotebookmarks is None
1628 1625 and not legacybookmark and hasbinarybook):
1629 1626 kwargs['bookmarks'] = True
1630 1627 bookmarksrequested = True
1631 1628
1632 1629 if 'listkeys' in pullop.remotebundle2caps:
1633 1630 if 'request-bookmarks' not in pullop.stepsdone:
1634 1631 # make sure to always includes bookmark data when migrating
1635 1632 # `hg incoming --bundle` to using this function.
1636 1633 pullop.stepsdone.add('request-bookmarks')
1637 1634 kwargs.setdefault('listkeys', []).append('bookmarks')
1638 1635
1639 1636 # If this is a full pull / clone and the server supports the clone bundles
1640 1637 # feature, tell the server whether we attempted a clone bundle. The
1641 1638 # presence of this flag indicates the client supports clone bundles. This
1642 1639 # will enable the server to treat clients that support clone bundles
1643 1640 # differently from those that don't.
1644 1641 if (pullop.remote.capable('clonebundles')
1645 1642 and pullop.heads is None and list(pullop.common) == [nullid]):
1646 1643 kwargs['cbattempted'] = pullop.clonebundleattempted
1647 1644
1648 1645 if streaming:
1649 1646 pullop.repo.ui.status(_('streaming all changes\n'))
1650 1647 elif not pullop.fetch:
1651 1648 pullop.repo.ui.status(_("no changes found\n"))
1652 1649 pullop.cgresult = 0
1653 1650 else:
1654 1651 if pullop.heads is None and list(pullop.common) == [nullid]:
1655 1652 pullop.repo.ui.status(_("requesting all changes\n"))
1656 1653 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1657 1654 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1658 1655 if obsolete.commonversion(remoteversions) is not None:
1659 1656 kwargs['obsmarkers'] = True
1660 1657 pullop.stepsdone.add('obsmarkers')
1661 1658 _pullbundle2extraprepare(pullop, kwargs)
1662 1659
1663 1660 with pullop.remote.commandexecutor() as e:
1664 1661 args = dict(kwargs)
1665 1662 args['source'] = 'pull'
1666 1663 bundle = e.callcommand('getbundle', args).result()
1667 1664
1668 1665 try:
1669 1666 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1670 1667 source='pull')
1671 1668 op.modes['bookmarks'] = 'records'
1672 1669 bundle2.processbundle(pullop.repo, bundle, op=op)
1673 1670 except bundle2.AbortFromPart as exc:
1674 1671 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1675 1672 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1676 1673 except error.BundleValueError as exc:
1677 1674 raise error.Abort(_('missing support for %s') % exc)
1678 1675
1679 1676 if pullop.fetch:
1680 1677 pullop.cgresult = bundle2.combinechangegroupresults(op)
1681 1678
1682 1679 # processing phases change
1683 1680 for namespace, value in op.records['listkeys']:
1684 1681 if namespace == 'phases':
1685 1682 _pullapplyphases(pullop, value)
1686 1683
1687 1684 # processing bookmark update
1688 1685 if bookmarksrequested:
1689 1686 books = {}
1690 1687 for record in op.records['bookmarks']:
1691 1688 books[record['bookmark']] = record["node"]
1692 1689 pullop.remotebookmarks = books
1693 1690 else:
1694 1691 for namespace, value in op.records['listkeys']:
1695 1692 if namespace == 'bookmarks':
1696 1693 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1697 1694
1698 1695 # bookmark data were either already there or pulled in the bundle
1699 1696 if pullop.remotebookmarks is not None:
1700 1697 _pullbookmarks(pullop)
1701 1698
1702 1699 def _pullbundle2extraprepare(pullop, kwargs):
1703 1700 """hook function so that extensions can extend the getbundle call"""
1704 1701
1705 1702 def _pullchangeset(pullop):
1706 1703 """pull changeset from unbundle into the local repo"""
1707 1704 # We delay the open of the transaction as late as possible so we
1708 1705 # don't open transaction for nothing or you break future useful
1709 1706 # rollback call
1710 1707 if 'changegroup' in pullop.stepsdone:
1711 1708 return
1712 1709 pullop.stepsdone.add('changegroup')
1713 1710 if not pullop.fetch:
1714 1711 pullop.repo.ui.status(_("no changes found\n"))
1715 1712 pullop.cgresult = 0
1716 1713 return
1717 1714 tr = pullop.gettransaction()
1718 1715 if pullop.heads is None and list(pullop.common) == [nullid]:
1719 1716 pullop.repo.ui.status(_("requesting all changes\n"))
1720 1717 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1721 1718 # issue1320, avoid a race if remote changed after discovery
1722 1719 pullop.heads = pullop.rheads
1723 1720
1724 1721 if pullop.remote.capable('getbundle'):
1725 1722 # TODO: get bundlecaps from remote
1726 1723 cg = pullop.remote.getbundle('pull', common=pullop.common,
1727 1724 heads=pullop.heads or pullop.rheads)
1728 1725 elif pullop.heads is None:
1729 1726 with pullop.remote.commandexecutor() as e:
1730 1727 cg = e.callcommand('changegroup', {
1731 1728 'nodes': pullop.fetch,
1732 1729 'source': 'pull',
1733 1730 }).result()
1734 1731
1735 1732 elif not pullop.remote.capable('changegroupsubset'):
1736 1733 raise error.Abort(_("partial pull cannot be done because "
1737 1734 "other repository doesn't support "
1738 1735 "changegroupsubset."))
1739 1736 else:
1740 1737 with pullop.remote.commandexecutor() as e:
1741 1738 cg = e.callcommand('changegroupsubset', {
1742 1739 'bases': pullop.fetch,
1743 1740 'heads': pullop.heads,
1744 1741 'source': 'pull',
1745 1742 }).result()
1746 1743
1747 1744 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1748 1745 pullop.remote.url())
1749 1746 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1750 1747
1751 1748 def _pullphase(pullop):
1752 1749 # Get remote phases data from remote
1753 1750 if 'phases' in pullop.stepsdone:
1754 1751 return
1755 1752 remotephases = listkeys(pullop.remote, 'phases')
1756 1753 _pullapplyphases(pullop, remotephases)
1757 1754
1758 1755 def _pullapplyphases(pullop, remotephases):
1759 1756 """apply phase movement from observed remote state"""
1760 1757 if 'phases' in pullop.stepsdone:
1761 1758 return
1762 1759 pullop.stepsdone.add('phases')
1763 1760 publishing = bool(remotephases.get('publishing', False))
1764 1761 if remotephases and not publishing:
1765 1762 # remote is new and non-publishing
1766 1763 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1767 1764 pullop.pulledsubset,
1768 1765 remotephases)
1769 1766 dheads = pullop.pulledsubset
1770 1767 else:
1771 1768 # Remote is old or publishing all common changesets
1772 1769 # should be seen as public
1773 1770 pheads = pullop.pulledsubset
1774 1771 dheads = []
1775 1772 unfi = pullop.repo.unfiltered()
1776 1773 phase = unfi._phasecache.phase
1777 1774 rev = unfi.changelog.nodemap.get
1778 1775 public = phases.public
1779 1776 draft = phases.draft
1780 1777
1781 1778 # exclude changesets already public locally and update the others
1782 1779 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1783 1780 if pheads:
1784 1781 tr = pullop.gettransaction()
1785 1782 phases.advanceboundary(pullop.repo, tr, public, pheads)
1786 1783
1787 1784 # exclude changesets already draft locally and update the others
1788 1785 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1789 1786 if dheads:
1790 1787 tr = pullop.gettransaction()
1791 1788 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1792 1789
1793 1790 def _pullbookmarks(pullop):
1794 1791 """process the remote bookmark information to update the local one"""
1795 1792 if 'bookmarks' in pullop.stepsdone:
1796 1793 return
1797 1794 pullop.stepsdone.add('bookmarks')
1798 1795 repo = pullop.repo
1799 1796 remotebookmarks = pullop.remotebookmarks
1800 1797 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1801 1798 pullop.remote.url(),
1802 1799 pullop.gettransaction,
1803 1800 explicit=pullop.explicitbookmarks)
1804 1801
1805 1802 def _pullobsolete(pullop):
1806 1803 """utility function to pull obsolete markers from a remote
1807 1804
1808 1805 The `gettransaction` is function that return the pull transaction, creating
1809 1806 one if necessary. We return the transaction to inform the calling code that
1810 1807 a new transaction have been created (when applicable).
1811 1808
1812 1809 Exists mostly to allow overriding for experimentation purpose"""
1813 1810 if 'obsmarkers' in pullop.stepsdone:
1814 1811 return
1815 1812 pullop.stepsdone.add('obsmarkers')
1816 1813 tr = None
1817 1814 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1818 1815 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1819 1816 remoteobs = listkeys(pullop.remote, 'obsolete')
1820 1817 if 'dump0' in remoteobs:
1821 1818 tr = pullop.gettransaction()
1822 1819 markers = []
1823 1820 for key in sorted(remoteobs, reverse=True):
1824 1821 if key.startswith('dump'):
1825 1822 data = util.b85decode(remoteobs[key])
1826 1823 version, newmarks = obsolete._readmarkers(data)
1827 1824 markers += newmarks
1828 1825 if markers:
1829 1826 pullop.repo.obsstore.add(tr, markers)
1830 1827 pullop.repo.invalidatevolatilesets()
1831 1828 return tr
1832 1829
1833 1830 def caps20to10(repo, role):
1834 1831 """return a set with appropriate options to use bundle20 during getbundle"""
1835 1832 caps = {'HG20'}
1836 1833 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1837 1834 caps.add('bundle2=' + urlreq.quote(capsblob))
1838 1835 return caps
1839 1836
1840 1837 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1841 1838 getbundle2partsorder = []
1842 1839
1843 1840 # Mapping between step name and function
1844 1841 #
1845 1842 # This exists to help extensions wrap steps if necessary
1846 1843 getbundle2partsmapping = {}
1847 1844
1848 1845 def getbundle2partsgenerator(stepname, idx=None):
1849 1846 """decorator for function generating bundle2 part for getbundle
1850 1847
1851 1848 The function is added to the step -> function mapping and appended to the
1852 1849 list of steps. Beware that decorated functions will be added in order
1853 1850 (this may matter).
1854 1851
1855 1852 You can only use this decorator for new steps, if you want to wrap a step
1856 1853 from an extension, attack the getbundle2partsmapping dictionary directly."""
1857 1854 def dec(func):
1858 1855 assert stepname not in getbundle2partsmapping
1859 1856 getbundle2partsmapping[stepname] = func
1860 1857 if idx is None:
1861 1858 getbundle2partsorder.append(stepname)
1862 1859 else:
1863 1860 getbundle2partsorder.insert(idx, stepname)
1864 1861 return func
1865 1862 return dec
1866 1863
1867 1864 def bundle2requested(bundlecaps):
1868 1865 if bundlecaps is not None:
1869 1866 return any(cap.startswith('HG2') for cap in bundlecaps)
1870 1867 return False
1871 1868
1872 1869 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1873 1870 **kwargs):
1874 1871 """Return chunks constituting a bundle's raw data.
1875 1872
1876 1873 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1877 1874 passed.
1878 1875
1879 1876 Returns a 2-tuple of a dict with metadata about the generated bundle
1880 1877 and an iterator over raw chunks (of varying sizes).
1881 1878 """
1882 1879 kwargs = pycompat.byteskwargs(kwargs)
1883 1880 info = {}
1884 1881 usebundle2 = bundle2requested(bundlecaps)
1885 1882 # bundle10 case
1886 1883 if not usebundle2:
1887 1884 if bundlecaps and not kwargs.get('cg', True):
1888 1885 raise ValueError(_('request for bundle10 must include changegroup'))
1889 1886
1890 1887 if kwargs:
1891 1888 raise ValueError(_('unsupported getbundle arguments: %s')
1892 1889 % ', '.join(sorted(kwargs.keys())))
1893 1890 outgoing = _computeoutgoing(repo, heads, common)
1894 1891 info['bundleversion'] = 1
1895 1892 return info, changegroup.makestream(repo, outgoing, '01', source,
1896 1893 bundlecaps=bundlecaps)
1897 1894
1898 1895 # bundle20 case
1899 1896 info['bundleversion'] = 2
1900 1897 b2caps = {}
1901 1898 for bcaps in bundlecaps:
1902 1899 if bcaps.startswith('bundle2='):
1903 1900 blob = urlreq.unquote(bcaps[len('bundle2='):])
1904 1901 b2caps.update(bundle2.decodecaps(blob))
1905 1902 bundler = bundle2.bundle20(repo.ui, b2caps)
1906 1903
1907 1904 kwargs['heads'] = heads
1908 1905 kwargs['common'] = common
1909 1906
1910 1907 for name in getbundle2partsorder:
1911 1908 func = getbundle2partsmapping[name]
1912 1909 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1913 1910 **pycompat.strkwargs(kwargs))
1914 1911
1915 1912 info['prefercompressed'] = bundler.prefercompressed
1916 1913
1917 1914 return info, bundler.getchunks()
1918 1915
1919 1916 @getbundle2partsgenerator('stream2')
1920 1917 def _getbundlestream2(bundler, repo, *args, **kwargs):
1921 1918 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1922 1919
1923 1920 @getbundle2partsgenerator('changegroup')
1924 1921 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1925 1922 b2caps=None, heads=None, common=None, **kwargs):
1926 1923 """add a changegroup part to the requested bundle"""
1927 1924 cgstream = None
1928 1925 if kwargs.get(r'cg', True):
1929 1926 # build changegroup bundle here.
1930 1927 version = '01'
1931 1928 cgversions = b2caps.get('changegroup')
1932 1929 if cgversions: # 3.1 and 3.2 ship with an empty value
1933 1930 cgversions = [v for v in cgversions
1934 1931 if v in changegroup.supportedoutgoingversions(repo)]
1935 1932 if not cgversions:
1936 1933 raise ValueError(_('no common changegroup version'))
1937 1934 version = max(cgversions)
1938 1935 outgoing = _computeoutgoing(repo, heads, common)
1939 1936 if outgoing.missing:
1940 1937 cgstream = changegroup.makestream(repo, outgoing, version, source,
1941 1938 bundlecaps=bundlecaps)
1942 1939
1943 1940 if cgstream:
1944 1941 part = bundler.newpart('changegroup', data=cgstream)
1945 1942 if cgversions:
1946 1943 part.addparam('version', version)
1947 1944 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1948 1945 mandatory=False)
1949 1946 if 'treemanifest' in repo.requirements:
1950 1947 part.addparam('treemanifest', '1')
1951 1948
1952 1949 @getbundle2partsgenerator('bookmarks')
1953 1950 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1954 1951 b2caps=None, **kwargs):
1955 1952 """add a bookmark part to the requested bundle"""
1956 1953 if not kwargs.get(r'bookmarks', False):
1957 1954 return
1958 1955 if 'bookmarks' not in b2caps:
1959 1956 raise ValueError(_('no common bookmarks exchange method'))
1960 1957 books = bookmod.listbinbookmarks(repo)
1961 1958 data = bookmod.binaryencode(books)
1962 1959 if data:
1963 1960 bundler.newpart('bookmarks', data=data)
1964 1961
1965 1962 @getbundle2partsgenerator('listkeys')
1966 1963 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1967 1964 b2caps=None, **kwargs):
1968 1965 """add parts containing listkeys namespaces to the requested bundle"""
1969 1966 listkeys = kwargs.get(r'listkeys', ())
1970 1967 for namespace in listkeys:
1971 1968 part = bundler.newpart('listkeys')
1972 1969 part.addparam('namespace', namespace)
1973 1970 keys = repo.listkeys(namespace).items()
1974 1971 part.data = pushkey.encodekeys(keys)
1975 1972
1976 1973 @getbundle2partsgenerator('obsmarkers')
1977 1974 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1978 1975 b2caps=None, heads=None, **kwargs):
1979 1976 """add an obsolescence markers part to the requested bundle"""
1980 1977 if kwargs.get(r'obsmarkers', False):
1981 1978 if heads is None:
1982 1979 heads = repo.heads()
1983 1980 subset = [c.node() for c in repo.set('::%ln', heads)]
1984 1981 markers = repo.obsstore.relevantmarkers(subset)
1985 1982 markers = sorted(markers)
1986 1983 bundle2.buildobsmarkerspart(bundler, markers)
1987 1984
1988 1985 @getbundle2partsgenerator('phases')
1989 1986 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1990 1987 b2caps=None, heads=None, **kwargs):
1991 1988 """add phase heads part to the requested bundle"""
1992 1989 if kwargs.get(r'phases', False):
1993 1990 if not 'heads' in b2caps.get('phases'):
1994 1991 raise ValueError(_('no common phases exchange method'))
1995 1992 if heads is None:
1996 1993 heads = repo.heads()
1997 1994
1998 1995 headsbyphase = collections.defaultdict(set)
1999 1996 if repo.publishing():
2000 1997 headsbyphase[phases.public] = heads
2001 1998 else:
2002 1999 # find the appropriate heads to move
2003 2000
2004 2001 phase = repo._phasecache.phase
2005 2002 node = repo.changelog.node
2006 2003 rev = repo.changelog.rev
2007 2004 for h in heads:
2008 2005 headsbyphase[phase(repo, rev(h))].add(h)
2009 2006 seenphases = list(headsbyphase.keys())
2010 2007
2011 2008 # We do not handle anything but public and draft phase for now)
2012 2009 if seenphases:
2013 2010 assert max(seenphases) <= phases.draft
2014 2011
2015 2012 # if client is pulling non-public changesets, we need to find
2016 2013 # intermediate public heads.
2017 2014 draftheads = headsbyphase.get(phases.draft, set())
2018 2015 if draftheads:
2019 2016 publicheads = headsbyphase.get(phases.public, set())
2020 2017
2021 2018 revset = 'heads(only(%ln, %ln) and public())'
2022 2019 extraheads = repo.revs(revset, draftheads, publicheads)
2023 2020 for r in extraheads:
2024 2021 headsbyphase[phases.public].add(node(r))
2025 2022
2026 2023 # transform data in a format used by the encoding function
2027 2024 phasemapping = []
2028 2025 for phase in phases.allphases:
2029 2026 phasemapping.append(sorted(headsbyphase[phase]))
2030 2027
2031 2028 # generate the actual part
2032 2029 phasedata = phases.binaryencode(phasemapping)
2033 2030 bundler.newpart('phase-heads', data=phasedata)
2034 2031
2035 2032 @getbundle2partsgenerator('hgtagsfnodes')
2036 2033 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2037 2034 b2caps=None, heads=None, common=None,
2038 2035 **kwargs):
2039 2036 """Transfer the .hgtags filenodes mapping.
2040 2037
2041 2038 Only values for heads in this bundle will be transferred.
2042 2039
2043 2040 The part data consists of pairs of 20 byte changeset node and .hgtags
2044 2041 filenodes raw values.
2045 2042 """
2046 2043 # Don't send unless:
2047 2044 # - changeset are being exchanged,
2048 2045 # - the client supports it.
2049 2046 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2050 2047 return
2051 2048
2052 2049 outgoing = _computeoutgoing(repo, heads, common)
2053 2050 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2054 2051
2055 2052 @getbundle2partsgenerator('cache:rev-branch-cache')
2056 2053 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2057 2054 b2caps=None, heads=None, common=None,
2058 2055 **kwargs):
2059 2056 """Transfer the rev-branch-cache mapping
2060 2057
2061 2058 The payload is a series of data related to each branch
2062 2059
2063 2060 1) branch name length
2064 2061 2) number of open heads
2065 2062 3) number of closed heads
2066 2063 4) open heads nodes
2067 2064 5) closed heads nodes
2068 2065 """
2069 2066 # Don't send unless:
2070 2067 # - changeset are being exchanged,
2071 2068 # - the client supports it.
2072 2069 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
2073 2070 return
2074 2071 outgoing = _computeoutgoing(repo, heads, common)
2075 2072 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2076 2073
2077 2074 def check_heads(repo, their_heads, context):
2078 2075 """check if the heads of a repo have been modified
2079 2076
2080 2077 Used by peer for unbundling.
2081 2078 """
2082 2079 heads = repo.heads()
2083 2080 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2084 2081 if not (their_heads == ['force'] or their_heads == heads or
2085 2082 their_heads == ['hashed', heads_hash]):
2086 2083 # someone else committed/pushed/unbundled while we
2087 2084 # were transferring data
2088 2085 raise error.PushRaced('repository changed while %s - '
2089 2086 'please try again' % context)
2090 2087
2091 2088 def unbundle(repo, cg, heads, source, url):
2092 2089 """Apply a bundle to a repo.
2093 2090
2094 2091 this function makes sure the repo is locked during the application and have
2095 2092 mechanism to check that no push race occurred between the creation of the
2096 2093 bundle and its application.
2097 2094
2098 2095 If the push was raced as PushRaced exception is raised."""
2099 2096 r = 0
2100 2097 # need a transaction when processing a bundle2 stream
2101 2098 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2102 2099 lockandtr = [None, None, None]
2103 2100 recordout = None
2104 2101 # quick fix for output mismatch with bundle2 in 3.4
2105 2102 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2106 2103 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2107 2104 captureoutput = True
2108 2105 try:
2109 2106 # note: outside bundle1, 'heads' is expected to be empty and this
2110 2107 # 'check_heads' call wil be a no-op
2111 2108 check_heads(repo, heads, 'uploading changes')
2112 2109 # push can proceed
2113 2110 if not isinstance(cg, bundle2.unbundle20):
2114 2111 # legacy case: bundle1 (changegroup 01)
2115 2112 txnname = "\n".join([source, util.hidepassword(url)])
2116 2113 with repo.lock(), repo.transaction(txnname) as tr:
2117 2114 op = bundle2.applybundle(repo, cg, tr, source, url)
2118 2115 r = bundle2.combinechangegroupresults(op)
2119 2116 else:
2120 2117 r = None
2121 2118 try:
2122 2119 def gettransaction():
2123 2120 if not lockandtr[2]:
2124 2121 lockandtr[0] = repo.wlock()
2125 2122 lockandtr[1] = repo.lock()
2126 2123 lockandtr[2] = repo.transaction(source)
2127 2124 lockandtr[2].hookargs['source'] = source
2128 2125 lockandtr[2].hookargs['url'] = url
2129 2126 lockandtr[2].hookargs['bundle2'] = '1'
2130 2127 return lockandtr[2]
2131 2128
2132 2129 # Do greedy locking by default until we're satisfied with lazy
2133 2130 # locking.
2134 2131 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2135 2132 gettransaction()
2136 2133
2137 2134 op = bundle2.bundleoperation(repo, gettransaction,
2138 2135 captureoutput=captureoutput,
2139 2136 source='push')
2140 2137 try:
2141 2138 op = bundle2.processbundle(repo, cg, op=op)
2142 2139 finally:
2143 2140 r = op.reply
2144 2141 if captureoutput and r is not None:
2145 2142 repo.ui.pushbuffer(error=True, subproc=True)
2146 2143 def recordout(output):
2147 2144 r.newpart('output', data=output, mandatory=False)
2148 2145 if lockandtr[2] is not None:
2149 2146 lockandtr[2].close()
2150 2147 except BaseException as exc:
2151 2148 exc.duringunbundle2 = True
2152 2149 if captureoutput and r is not None:
2153 2150 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2154 2151 def recordout(output):
2155 2152 part = bundle2.bundlepart('output', data=output,
2156 2153 mandatory=False)
2157 2154 parts.append(part)
2158 2155 raise
2159 2156 finally:
2160 2157 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2161 2158 if recordout is not None:
2162 2159 recordout(repo.ui.popbuffer())
2163 2160 return r
2164 2161
2165 2162 def _maybeapplyclonebundle(pullop):
2166 2163 """Apply a clone bundle from a remote, if possible."""
2167 2164
2168 2165 repo = pullop.repo
2169 2166 remote = pullop.remote
2170 2167
2171 2168 if not repo.ui.configbool('ui', 'clonebundles'):
2172 2169 return
2173 2170
2174 2171 # Only run if local repo is empty.
2175 2172 if len(repo):
2176 2173 return
2177 2174
2178 2175 if pullop.heads:
2179 2176 return
2180 2177
2181 2178 if not remote.capable('clonebundles'):
2182 2179 return
2183 2180
2184 2181 with remote.commandexecutor() as e:
2185 2182 res = e.callcommand('clonebundles', {}).result()
2186 2183
2187 2184 # If we call the wire protocol command, that's good enough to record the
2188 2185 # attempt.
2189 2186 pullop.clonebundleattempted = True
2190 2187
2191 2188 entries = parseclonebundlesmanifest(repo, res)
2192 2189 if not entries:
2193 2190 repo.ui.note(_('no clone bundles available on remote; '
2194 2191 'falling back to regular clone\n'))
2195 2192 return
2196 2193
2197 2194 entries = filterclonebundleentries(
2198 2195 repo, entries, streamclonerequested=pullop.streamclonerequested)
2199 2196
2200 2197 if not entries:
2201 2198 # There is a thundering herd concern here. However, if a server
2202 2199 # operator doesn't advertise bundles appropriate for its clients,
2203 2200 # they deserve what's coming. Furthermore, from a client's
2204 2201 # perspective, no automatic fallback would mean not being able to
2205 2202 # clone!
2206 2203 repo.ui.warn(_('no compatible clone bundles available on server; '
2207 2204 'falling back to regular clone\n'))
2208 2205 repo.ui.warn(_('(you may want to report this to the server '
2209 2206 'operator)\n'))
2210 2207 return
2211 2208
2212 2209 entries = sortclonebundleentries(repo.ui, entries)
2213 2210
2214 2211 url = entries[0]['URL']
2215 2212 repo.ui.status(_('applying clone bundle from %s\n') % url)
2216 2213 if trypullbundlefromurl(repo.ui, repo, url):
2217 2214 repo.ui.status(_('finished applying clone bundle\n'))
2218 2215 # Bundle failed.
2219 2216 #
2220 2217 # We abort by default to avoid the thundering herd of
2221 2218 # clients flooding a server that was expecting expensive
2222 2219 # clone load to be offloaded.
2223 2220 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2224 2221 repo.ui.warn(_('falling back to normal clone\n'))
2225 2222 else:
2226 2223 raise error.Abort(_('error applying bundle'),
2227 2224 hint=_('if this error persists, consider contacting '
2228 2225 'the server operator or disable clone '
2229 2226 'bundles via '
2230 2227 '"--config ui.clonebundles=false"'))
2231 2228
2232 2229 def parseclonebundlesmanifest(repo, s):
2233 2230 """Parses the raw text of a clone bundles manifest.
2234 2231
2235 2232 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2236 2233 to the URL and other keys are the attributes for the entry.
2237 2234 """
2238 2235 m = []
2239 2236 for line in s.splitlines():
2240 2237 fields = line.split()
2241 2238 if not fields:
2242 2239 continue
2243 2240 attrs = {'URL': fields[0]}
2244 2241 for rawattr in fields[1:]:
2245 2242 key, value = rawattr.split('=', 1)
2246 2243 key = urlreq.unquote(key)
2247 2244 value = urlreq.unquote(value)
2248 2245 attrs[key] = value
2249 2246
2250 2247 # Parse BUNDLESPEC into components. This makes client-side
2251 2248 # preferences easier to specify since you can prefer a single
2252 2249 # component of the BUNDLESPEC.
2253 2250 if key == 'BUNDLESPEC':
2254 2251 try:
2255 2252 bundlespec = parsebundlespec(repo, value)
2256 2253 attrs['COMPRESSION'] = bundlespec.compression
2257 2254 attrs['VERSION'] = bundlespec.version
2258 2255 except error.InvalidBundleSpecification:
2259 2256 pass
2260 2257 except error.UnsupportedBundleSpecification:
2261 2258 pass
2262 2259
2263 2260 m.append(attrs)
2264 2261
2265 2262 return m
2266 2263
2267 2264 def isstreamclonespec(bundlespec):
2268 2265 # Stream clone v1
2269 2266 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2270 2267 return True
2271 2268
2272 2269 # Stream clone v2
2273 2270 if (bundlespec.wirecompression == 'UN' and \
2274 2271 bundlespec.wireversion == '02' and \
2275 2272 bundlespec.contentopts.get('streamv2')):
2276 2273 return True
2277 2274
2278 2275 return False
2279 2276
2280 2277 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2281 2278 """Remove incompatible clone bundle manifest entries.
2282 2279
2283 2280 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2284 2281 and returns a new list consisting of only the entries that this client
2285 2282 should be able to apply.
2286 2283
2287 2284 There is no guarantee we'll be able to apply all returned entries because
2288 2285 the metadata we use to filter on may be missing or wrong.
2289 2286 """
2290 2287 newentries = []
2291 2288 for entry in entries:
2292 2289 spec = entry.get('BUNDLESPEC')
2293 2290 if spec:
2294 2291 try:
2295 2292 bundlespec = parsebundlespec(repo, spec, strict=True)
2296 2293
2297 2294 # If a stream clone was requested, filter out non-streamclone
2298 2295 # entries.
2299 2296 if streamclonerequested and not isstreamclonespec(bundlespec):
2300 2297 repo.ui.debug('filtering %s because not a stream clone\n' %
2301 2298 entry['URL'])
2302 2299 continue
2303 2300
2304 2301 except error.InvalidBundleSpecification as e:
2305 2302 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2306 2303 continue
2307 2304 except error.UnsupportedBundleSpecification as e:
2308 2305 repo.ui.debug('filtering %s because unsupported bundle '
2309 2306 'spec: %s\n' % (
2310 2307 entry['URL'], stringutil.forcebytestr(e)))
2311 2308 continue
2312 2309 # If we don't have a spec and requested a stream clone, we don't know
2313 2310 # what the entry is so don't attempt to apply it.
2314 2311 elif streamclonerequested:
2315 2312 repo.ui.debug('filtering %s because cannot determine if a stream '
2316 2313 'clone bundle\n' % entry['URL'])
2317 2314 continue
2318 2315
2319 2316 if 'REQUIRESNI' in entry and not sslutil.hassni:
2320 2317 repo.ui.debug('filtering %s because SNI not supported\n' %
2321 2318 entry['URL'])
2322 2319 continue
2323 2320
2324 2321 newentries.append(entry)
2325 2322
2326 2323 return newentries
2327 2324
2328 2325 class clonebundleentry(object):
2329 2326 """Represents an item in a clone bundles manifest.
2330 2327
2331 2328 This rich class is needed to support sorting since sorted() in Python 3
2332 2329 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2333 2330 won't work.
2334 2331 """
2335 2332
2336 2333 def __init__(self, value, prefers):
2337 2334 self.value = value
2338 2335 self.prefers = prefers
2339 2336
2340 2337 def _cmp(self, other):
2341 2338 for prefkey, prefvalue in self.prefers:
2342 2339 avalue = self.value.get(prefkey)
2343 2340 bvalue = other.value.get(prefkey)
2344 2341
2345 2342 # Special case for b missing attribute and a matches exactly.
2346 2343 if avalue is not None and bvalue is None and avalue == prefvalue:
2347 2344 return -1
2348 2345
2349 2346 # Special case for a missing attribute and b matches exactly.
2350 2347 if bvalue is not None and avalue is None and bvalue == prefvalue:
2351 2348 return 1
2352 2349
2353 2350 # We can't compare unless attribute present on both.
2354 2351 if avalue is None or bvalue is None:
2355 2352 continue
2356 2353
2357 2354 # Same values should fall back to next attribute.
2358 2355 if avalue == bvalue:
2359 2356 continue
2360 2357
2361 2358 # Exact matches come first.
2362 2359 if avalue == prefvalue:
2363 2360 return -1
2364 2361 if bvalue == prefvalue:
2365 2362 return 1
2366 2363
2367 2364 # Fall back to next attribute.
2368 2365 continue
2369 2366
2370 2367 # If we got here we couldn't sort by attributes and prefers. Fall
2371 2368 # back to index order.
2372 2369 return 0
2373 2370
2374 2371 def __lt__(self, other):
2375 2372 return self._cmp(other) < 0
2376 2373
2377 2374 def __gt__(self, other):
2378 2375 return self._cmp(other) > 0
2379 2376
2380 2377 def __eq__(self, other):
2381 2378 return self._cmp(other) == 0
2382 2379
2383 2380 def __le__(self, other):
2384 2381 return self._cmp(other) <= 0
2385 2382
2386 2383 def __ge__(self, other):
2387 2384 return self._cmp(other) >= 0
2388 2385
2389 2386 def __ne__(self, other):
2390 2387 return self._cmp(other) != 0
2391 2388
2392 2389 def sortclonebundleentries(ui, entries):
2393 2390 prefers = ui.configlist('ui', 'clonebundleprefers')
2394 2391 if not prefers:
2395 2392 return list(entries)
2396 2393
2397 2394 prefers = [p.split('=', 1) for p in prefers]
2398 2395
2399 2396 items = sorted(clonebundleentry(v, prefers) for v in entries)
2400 2397 return [i.value for i in items]
2401 2398
2402 2399 def trypullbundlefromurl(ui, repo, url):
2403 2400 """Attempt to apply a bundle from a URL."""
2404 2401 with repo.lock(), repo.transaction('bundleurl') as tr:
2405 2402 try:
2406 2403 fh = urlmod.open(ui, url)
2407 2404 cg = readbundle(ui, fh, 'stream')
2408 2405
2409 2406 if isinstance(cg, streamclone.streamcloneapplier):
2410 2407 cg.apply(repo)
2411 2408 else:
2412 2409 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2413 2410 return True
2414 2411 except urlerr.httperror as e:
2415 2412 ui.warn(_('HTTP error fetching bundle: %s\n') %
2416 2413 stringutil.forcebytestr(e))
2417 2414 except urlerr.urlerror as e:
2418 2415 ui.warn(_('error fetching bundle: %s\n') %
2419 2416 stringutil.forcebytestr(e.reason))
2420 2417
2421 2418 return False
@@ -1,1367 +1,1379 b''
1 1 #require killdaemons
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > phasereport=$TESTDIR/testlib/ext-phase-report.py
6 6 > EOF
7 7
8 8 $ hgph() { hg log -G --template "{rev} {phase} {desc} - {node|short}\n" $*; }
9 9
10 10 $ mkcommit() {
11 11 > echo "$1" > "$1"
12 12 > hg add "$1"
13 13 > message="$1"
14 14 > shift
15 15 > hg ci -m "$message" $*
16 16 > }
17 17
18 18 $ hg init alpha
19 19 $ cd alpha
20 20 $ mkcommit a-A
21 21 test-debug-phase: new rev 0: x -> 1
22 22 $ mkcommit a-B
23 23 test-debug-phase: new rev 1: x -> 1
24 24 $ mkcommit a-C
25 25 test-debug-phase: new rev 2: x -> 1
26 26 $ mkcommit a-D
27 27 test-debug-phase: new rev 3: x -> 1
28 28 $ hgph
29 29 @ 3 draft a-D - b555f63b6063
30 30 |
31 31 o 2 draft a-C - 54acac6f23ab
32 32 |
33 33 o 1 draft a-B - 548a3d25dbf0
34 34 |
35 35 o 0 draft a-A - 054250a37db4
36 36
37 37
38 38 $ hg init ../beta
39 39 $ hg push -r 1 ../beta
40 40 pushing to ../beta
41 41 searching for changes
42 42 adding changesets
43 43 adding manifests
44 44 adding file changes
45 45 added 2 changesets with 2 changes to 2 files
46 46 test-debug-phase: new rev 0: x -> 0
47 47 test-debug-phase: new rev 1: x -> 0
48 48 test-debug-phase: move rev 0: 1 -> 0
49 49 test-debug-phase: move rev 1: 1 -> 0
50 50 $ hgph
51 51 @ 3 draft a-D - b555f63b6063
52 52 |
53 53 o 2 draft a-C - 54acac6f23ab
54 54 |
55 55 o 1 public a-B - 548a3d25dbf0
56 56 |
57 57 o 0 public a-A - 054250a37db4
58 58
59 59
60 60 $ cd ../beta
61 61 $ hgph
62 62 o 1 public a-B - 548a3d25dbf0
63 63 |
64 64 o 0 public a-A - 054250a37db4
65 65
66 66 $ hg up -q
67 67 $ mkcommit b-A
68 68 test-debug-phase: new rev 2: x -> 1
69 69 $ hgph
70 70 @ 2 draft b-A - f54f1bb90ff3
71 71 |
72 72 o 1 public a-B - 548a3d25dbf0
73 73 |
74 74 o 0 public a-A - 054250a37db4
75 75
76 76 $ hg pull ../alpha
77 77 pulling from ../alpha
78 78 searching for changes
79 79 adding changesets
80 80 adding manifests
81 81 adding file changes
82 82 added 2 changesets with 2 changes to 2 files (+1 heads)
83 83 new changesets 54acac6f23ab:b555f63b6063
84 84 test-debug-phase: new rev 3: x -> 0
85 85 test-debug-phase: new rev 4: x -> 0
86 86 (run 'hg heads' to see heads, 'hg merge' to merge)
87 87 $ hgph
88 88 o 4 public a-D - b555f63b6063
89 89 |
90 90 o 3 public a-C - 54acac6f23ab
91 91 |
92 92 | @ 2 draft b-A - f54f1bb90ff3
93 93 |/
94 94 o 1 public a-B - 548a3d25dbf0
95 95 |
96 96 o 0 public a-A - 054250a37db4
97 97
98 98
99 99 pull did not updated ../alpha state.
100 100 push from alpha to beta should update phase even if nothing is transferred
101 101
102 102 $ cd ../alpha
103 103 $ hgph # not updated by remote pull
104 104 @ 3 draft a-D - b555f63b6063
105 105 |
106 106 o 2 draft a-C - 54acac6f23ab
107 107 |
108 108 o 1 public a-B - 548a3d25dbf0
109 109 |
110 110 o 0 public a-A - 054250a37db4
111 111
112 112 $ hg push -r 2 ../beta
113 113 pushing to ../beta
114 114 searching for changes
115 115 no changes found
116 116 test-debug-phase: move rev 2: 1 -> 0
117 117 [1]
118 118 $ hgph
119 119 @ 3 draft a-D - b555f63b6063
120 120 |
121 121 o 2 public a-C - 54acac6f23ab
122 122 |
123 123 o 1 public a-B - 548a3d25dbf0
124 124 |
125 125 o 0 public a-A - 054250a37db4
126 126
127 127 $ hg push ../beta
128 128 pushing to ../beta
129 129 searching for changes
130 130 no changes found
131 131 test-debug-phase: move rev 3: 1 -> 0
132 132 [1]
133 133 $ hgph
134 134 @ 3 public a-D - b555f63b6063
135 135 |
136 136 o 2 public a-C - 54acac6f23ab
137 137 |
138 138 o 1 public a-B - 548a3d25dbf0
139 139 |
140 140 o 0 public a-A - 054250a37db4
141 141
142 142
143 143 update must update phase of common changeset too
144 144
145 145 $ hg pull ../beta # getting b-A
146 146 pulling from ../beta
147 147 searching for changes
148 148 adding changesets
149 149 adding manifests
150 150 adding file changes
151 151 added 1 changesets with 1 changes to 1 files (+1 heads)
152 152 new changesets f54f1bb90ff3
153 153 test-debug-phase: new rev 4: x -> 0
154 154 (run 'hg heads' to see heads, 'hg merge' to merge)
155 155
156 156 $ cd ../beta
157 157 $ hgph # not updated by remote pull
158 158 o 4 public a-D - b555f63b6063
159 159 |
160 160 o 3 public a-C - 54acac6f23ab
161 161 |
162 162 | @ 2 draft b-A - f54f1bb90ff3
163 163 |/
164 164 o 1 public a-B - 548a3d25dbf0
165 165 |
166 166 o 0 public a-A - 054250a37db4
167 167
168 168 $ hg pull ../alpha
169 169 pulling from ../alpha
170 170 searching for changes
171 171 no changes found
172 172 test-debug-phase: move rev 2: 1 -> 0
173 173 $ hgph
174 174 o 4 public a-D - b555f63b6063
175 175 |
176 176 o 3 public a-C - 54acac6f23ab
177 177 |
178 178 | @ 2 public b-A - f54f1bb90ff3
179 179 |/
180 180 o 1 public a-B - 548a3d25dbf0
181 181 |
182 182 o 0 public a-A - 054250a37db4
183 183
184 184
185 185 Publish configuration option
186 186 ----------------------------
187 187
188 188 Pull
189 189 ````
190 190
191 191 changegroup are added without phase movement
192 192
193 193 $ hg bundle -a ../base.bundle
194 194 5 changesets found
195 195 $ cd ..
196 196 $ hg init mu
197 197 $ cd mu
198 198 $ cat > .hg/hgrc << EOF
199 199 > [phases]
200 200 > publish=0
201 201 > EOF
202 202 $ hg unbundle ../base.bundle
203 203 adding changesets
204 204 adding manifests
205 205 adding file changes
206 206 added 5 changesets with 5 changes to 5 files (+1 heads)
207 207 new changesets 054250a37db4:b555f63b6063
208 208 test-debug-phase: new rev 0: x -> 1
209 209 test-debug-phase: new rev 1: x -> 1
210 210 test-debug-phase: new rev 2: x -> 1
211 211 test-debug-phase: new rev 3: x -> 1
212 212 test-debug-phase: new rev 4: x -> 1
213 213 (run 'hg heads' to see heads, 'hg merge' to merge)
214 214 $ hgph
215 215 o 4 draft a-D - b555f63b6063
216 216 |
217 217 o 3 draft a-C - 54acac6f23ab
218 218 |
219 219 | o 2 draft b-A - f54f1bb90ff3
220 220 |/
221 221 o 1 draft a-B - 548a3d25dbf0
222 222 |
223 223 o 0 draft a-A - 054250a37db4
224 224
225 225 $ cd ..
226 226
227 227 Pulling from publish=False to publish=False does not move boundary.
228 228
229 229 $ hg init nu
230 230 $ cd nu
231 231 $ cat > .hg/hgrc << EOF
232 232 > [phases]
233 233 > publish=0
234 234 > EOF
235 235 $ hg pull ../mu -r 54acac6f23ab
236 236 pulling from ../mu
237 237 adding changesets
238 238 adding manifests
239 239 adding file changes
240 240 added 3 changesets with 3 changes to 3 files
241 241 new changesets 054250a37db4:54acac6f23ab
242 242 test-debug-phase: new rev 0: x -> 1
243 243 test-debug-phase: new rev 1: x -> 1
244 244 test-debug-phase: new rev 2: x -> 1
245 245 (run 'hg update' to get a working copy)
246 246 $ hgph
247 247 o 2 draft a-C - 54acac6f23ab
248 248 |
249 249 o 1 draft a-B - 548a3d25dbf0
250 250 |
251 251 o 0 draft a-A - 054250a37db4
252 252
253 253
254 254 Even for common
255 255
256 256 $ hg pull ../mu -r f54f1bb90ff3
257 257 pulling from ../mu
258 258 searching for changes
259 259 adding changesets
260 260 adding manifests
261 261 adding file changes
262 262 added 1 changesets with 1 changes to 1 files (+1 heads)
263 263 new changesets f54f1bb90ff3
264 264 test-debug-phase: new rev 3: x -> 1
265 265 (run 'hg heads' to see heads, 'hg merge' to merge)
266 266 $ hgph
267 267 o 3 draft b-A - f54f1bb90ff3
268 268 |
269 269 | o 2 draft a-C - 54acac6f23ab
270 270 |/
271 271 o 1 draft a-B - 548a3d25dbf0
272 272 |
273 273 o 0 draft a-A - 054250a37db4
274 274
275 275
276 276
277 277 Pulling from Publish=True to Publish=False move boundary in common set.
278 278 we are in nu
279 279
280 280 $ hg pull ../alpha -r b555f63b6063
281 281 pulling from ../alpha
282 282 searching for changes
283 283 adding changesets
284 284 adding manifests
285 285 adding file changes
286 286 added 1 changesets with 1 changes to 1 files
287 287 new changesets b555f63b6063
288 288 test-debug-phase: move rev 0: 1 -> 0
289 289 test-debug-phase: move rev 1: 1 -> 0
290 290 test-debug-phase: move rev 2: 1 -> 0
291 291 test-debug-phase: new rev 4: x -> 0
292 292 (run 'hg update' to get a working copy)
293 293 $ hgph # f54f1bb90ff3 stay draft, not ancestor of -r
294 294 o 4 public a-D - b555f63b6063
295 295 |
296 296 | o 3 draft b-A - f54f1bb90ff3
297 297 | |
298 298 o | 2 public a-C - 54acac6f23ab
299 299 |/
300 300 o 1 public a-B - 548a3d25dbf0
301 301 |
302 302 o 0 public a-A - 054250a37db4
303 303
304 304
305 305 pulling from Publish=False to publish=False with some public
306 306
307 307 $ hg up -q f54f1bb90ff3
308 308 $ mkcommit n-A
309 309 test-debug-phase: new rev 5: x -> 1
310 310 $ mkcommit n-B
311 311 test-debug-phase: new rev 6: x -> 1
312 312 $ hgph
313 313 @ 6 draft n-B - 145e75495359
314 314 |
315 315 o 5 draft n-A - d6bcb4f74035
316 316 |
317 317 | o 4 public a-D - b555f63b6063
318 318 | |
319 319 o | 3 draft b-A - f54f1bb90ff3
320 320 | |
321 321 | o 2 public a-C - 54acac6f23ab
322 322 |/
323 323 o 1 public a-B - 548a3d25dbf0
324 324 |
325 325 o 0 public a-A - 054250a37db4
326 326
327 327 $ cd ../mu
328 328 $ hg pull ../nu
329 329 pulling from ../nu
330 330 searching for changes
331 331 adding changesets
332 332 adding manifests
333 333 adding file changes
334 334 added 2 changesets with 2 changes to 2 files
335 335 new changesets d6bcb4f74035:145e75495359
336 336 test-debug-phase: move rev 0: 1 -> 0
337 337 test-debug-phase: move rev 1: 1 -> 0
338 338 test-debug-phase: move rev 3: 1 -> 0
339 339 test-debug-phase: move rev 4: 1 -> 0
340 340 test-debug-phase: new rev 5: x -> 1
341 341 test-debug-phase: new rev 6: x -> 1
342 342 (run 'hg update' to get a working copy)
343 343 $ hgph
344 344 o 6 draft n-B - 145e75495359
345 345 |
346 346 o 5 draft n-A - d6bcb4f74035
347 347 |
348 348 | o 4 public a-D - b555f63b6063
349 349 | |
350 350 | o 3 public a-C - 54acac6f23ab
351 351 | |
352 352 o | 2 draft b-A - f54f1bb90ff3
353 353 |/
354 354 o 1 public a-B - 548a3d25dbf0
355 355 |
356 356 o 0 public a-A - 054250a37db4
357 357
358 358 $ cd ..
359 359
360 360 pulling into publish=True
361 361
362 362 $ cd alpha
363 363 $ hgph
364 364 o 4 public b-A - f54f1bb90ff3
365 365 |
366 366 | @ 3 public a-D - b555f63b6063
367 367 | |
368 368 | o 2 public a-C - 54acac6f23ab
369 369 |/
370 370 o 1 public a-B - 548a3d25dbf0
371 371 |
372 372 o 0 public a-A - 054250a37db4
373 373
374 374 $ hg pull ../mu
375 375 pulling from ../mu
376 376 searching for changes
377 377 adding changesets
378 378 adding manifests
379 379 adding file changes
380 380 added 2 changesets with 2 changes to 2 files
381 381 new changesets d6bcb4f74035:145e75495359
382 382 test-debug-phase: new rev 5: x -> 1
383 383 test-debug-phase: new rev 6: x -> 1
384 384 (run 'hg update' to get a working copy)
385 385 $ hgph
386 386 o 6 draft n-B - 145e75495359
387 387 |
388 388 o 5 draft n-A - d6bcb4f74035
389 389 |
390 390 o 4 public b-A - f54f1bb90ff3
391 391 |
392 392 | @ 3 public a-D - b555f63b6063
393 393 | |
394 394 | o 2 public a-C - 54acac6f23ab
395 395 |/
396 396 o 1 public a-B - 548a3d25dbf0
397 397 |
398 398 o 0 public a-A - 054250a37db4
399 399
400 400 $ cd ..
401 401
402 402 pulling back into original repo
403 403
404 404 $ cd nu
405 405 $ hg pull ../alpha
406 406 pulling from ../alpha
407 407 searching for changes
408 408 no changes found
409 409 test-debug-phase: move rev 3: 1 -> 0
410 410 test-debug-phase: move rev 5: 1 -> 0
411 411 test-debug-phase: move rev 6: 1 -> 0
412 412 $ hgph
413 413 @ 6 public n-B - 145e75495359
414 414 |
415 415 o 5 public n-A - d6bcb4f74035
416 416 |
417 417 | o 4 public a-D - b555f63b6063
418 418 | |
419 419 o | 3 public b-A - f54f1bb90ff3
420 420 | |
421 421 | o 2 public a-C - 54acac6f23ab
422 422 |/
423 423 o 1 public a-B - 548a3d25dbf0
424 424 |
425 425 o 0 public a-A - 054250a37db4
426 426
427 427
428 428 Push
429 429 ````
430 430
431 431 (inserted)
432 432
433 433 Test that phase are pushed even when they are nothing to pus
434 434 (this might be tested later bu are very convenient to not alter too much test)
435 435
436 436 Push back to alpha
437 437
438 438 $ hg push ../alpha # from nu
439 439 pushing to ../alpha
440 440 searching for changes
441 441 no changes found
442 442 test-debug-phase: move rev 5: 1 -> 0
443 443 test-debug-phase: move rev 6: 1 -> 0
444 444 [1]
445 445 $ cd ..
446 446 $ cd alpha
447 447 $ hgph
448 448 o 6 public n-B - 145e75495359
449 449 |
450 450 o 5 public n-A - d6bcb4f74035
451 451 |
452 452 o 4 public b-A - f54f1bb90ff3
453 453 |
454 454 | @ 3 public a-D - b555f63b6063
455 455 | |
456 456 | o 2 public a-C - 54acac6f23ab
457 457 |/
458 458 o 1 public a-B - 548a3d25dbf0
459 459 |
460 460 o 0 public a-A - 054250a37db4
461 461
462 462
463 463 (end insertion)
464 464
465 465
466 466 initial setup
467 467
468 468 $ hg log -G # of alpha
469 469 o changeset: 6:145e75495359
470 470 | tag: tip
471 471 | user: test
472 472 | date: Thu Jan 01 00:00:00 1970 +0000
473 473 | summary: n-B
474 474 |
475 475 o changeset: 5:d6bcb4f74035
476 476 | user: test
477 477 | date: Thu Jan 01 00:00:00 1970 +0000
478 478 | summary: n-A
479 479 |
480 480 o changeset: 4:f54f1bb90ff3
481 481 | parent: 1:548a3d25dbf0
482 482 | user: test
483 483 | date: Thu Jan 01 00:00:00 1970 +0000
484 484 | summary: b-A
485 485 |
486 486 | @ changeset: 3:b555f63b6063
487 487 | | user: test
488 488 | | date: Thu Jan 01 00:00:00 1970 +0000
489 489 | | summary: a-D
490 490 | |
491 491 | o changeset: 2:54acac6f23ab
492 492 |/ user: test
493 493 | date: Thu Jan 01 00:00:00 1970 +0000
494 494 | summary: a-C
495 495 |
496 496 o changeset: 1:548a3d25dbf0
497 497 | user: test
498 498 | date: Thu Jan 01 00:00:00 1970 +0000
499 499 | summary: a-B
500 500 |
501 501 o changeset: 0:054250a37db4
502 502 user: test
503 503 date: Thu Jan 01 00:00:00 1970 +0000
504 504 summary: a-A
505 505
506 506 $ mkcommit a-E
507 507 test-debug-phase: new rev 7: x -> 1
508 508 $ mkcommit a-F
509 509 test-debug-phase: new rev 8: x -> 1
510 510 $ mkcommit a-G
511 511 test-debug-phase: new rev 9: x -> 1
512 512 $ hg up d6bcb4f74035 -q
513 513 $ mkcommit a-H
514 514 test-debug-phase: new rev 10: x -> 1
515 515 created new head
516 516 $ hgph
517 517 @ 10 draft a-H - 967b449fbc94
518 518 |
519 519 | o 9 draft a-G - 3e27b6f1eee1
520 520 | |
521 521 | o 8 draft a-F - b740e3e5c05d
522 522 | |
523 523 | o 7 draft a-E - e9f537e46dea
524 524 | |
525 525 +---o 6 public n-B - 145e75495359
526 526 | |
527 527 o | 5 public n-A - d6bcb4f74035
528 528 | |
529 529 o | 4 public b-A - f54f1bb90ff3
530 530 | |
531 531 | o 3 public a-D - b555f63b6063
532 532 | |
533 533 | o 2 public a-C - 54acac6f23ab
534 534 |/
535 535 o 1 public a-B - 548a3d25dbf0
536 536 |
537 537 o 0 public a-A - 054250a37db4
538 538
539 539
540 540 Pulling from bundle does not alter phases of changeset not present in the bundle
541 541
542 542 #if repobundlerepo
543 543 $ hg bundle --base 1 -r 6 -r 3 ../partial-bundle.hg
544 544 5 changesets found
545 545 $ hg pull ../partial-bundle.hg
546 546 pulling from ../partial-bundle.hg
547 547 searching for changes
548 548 no changes found
549 549 $ hgph
550 550 @ 10 draft a-H - 967b449fbc94
551 551 |
552 552 | o 9 draft a-G - 3e27b6f1eee1
553 553 | |
554 554 | o 8 draft a-F - b740e3e5c05d
555 555 | |
556 556 | o 7 draft a-E - e9f537e46dea
557 557 | |
558 558 +---o 6 public n-B - 145e75495359
559 559 | |
560 560 o | 5 public n-A - d6bcb4f74035
561 561 | |
562 562 o | 4 public b-A - f54f1bb90ff3
563 563 | |
564 564 | o 3 public a-D - b555f63b6063
565 565 | |
566 566 | o 2 public a-C - 54acac6f23ab
567 567 |/
568 568 o 1 public a-B - 548a3d25dbf0
569 569 |
570 570 o 0 public a-A - 054250a37db4
571 571
572 572 #endif
573 573
574 574 Pushing to Publish=False (unknown changeset)
575 575
576 576 $ hg push ../mu -r b740e3e5c05d # a-F
577 577 pushing to ../mu
578 578 searching for changes
579 579 adding changesets
580 580 adding manifests
581 581 adding file changes
582 582 added 2 changesets with 2 changes to 2 files
583 583 test-debug-phase: new rev 7: x -> 1
584 584 test-debug-phase: new rev 8: x -> 1
585 585 $ hgph
586 586 @ 10 draft a-H - 967b449fbc94
587 587 |
588 588 | o 9 draft a-G - 3e27b6f1eee1
589 589 | |
590 590 | o 8 draft a-F - b740e3e5c05d
591 591 | |
592 592 | o 7 draft a-E - e9f537e46dea
593 593 | |
594 594 +---o 6 public n-B - 145e75495359
595 595 | |
596 596 o | 5 public n-A - d6bcb4f74035
597 597 | |
598 598 o | 4 public b-A - f54f1bb90ff3
599 599 | |
600 600 | o 3 public a-D - b555f63b6063
601 601 | |
602 602 | o 2 public a-C - 54acac6f23ab
603 603 |/
604 604 o 1 public a-B - 548a3d25dbf0
605 605 |
606 606 o 0 public a-A - 054250a37db4
607 607
608 608
609 609 $ cd ../mu
610 610 $ hgph # again f54f1bb90ff3, d6bcb4f74035 and 145e75495359 stay draft,
611 611 > # not ancestor of -r
612 612 o 8 draft a-F - b740e3e5c05d
613 613 |
614 614 o 7 draft a-E - e9f537e46dea
615 615 |
616 616 | o 6 draft n-B - 145e75495359
617 617 | |
618 618 | o 5 draft n-A - d6bcb4f74035
619 619 | |
620 620 o | 4 public a-D - b555f63b6063
621 621 | |
622 622 o | 3 public a-C - 54acac6f23ab
623 623 | |
624 624 | o 2 draft b-A - f54f1bb90ff3
625 625 |/
626 626 o 1 public a-B - 548a3d25dbf0
627 627 |
628 628 o 0 public a-A - 054250a37db4
629 629
630 630
631 631 Pushing to Publish=True (unknown changeset)
632 632
633 633 $ hg push ../beta -r b740e3e5c05d
634 634 pushing to ../beta
635 635 searching for changes
636 636 adding changesets
637 637 adding manifests
638 638 adding file changes
639 639 added 2 changesets with 2 changes to 2 files
640 640 test-debug-phase: new rev 5: x -> 0
641 641 test-debug-phase: new rev 6: x -> 0
642 642 test-debug-phase: move rev 7: 1 -> 0
643 643 test-debug-phase: move rev 8: 1 -> 0
644 644 $ hgph # again f54f1bb90ff3, d6bcb4f74035 and 145e75495359 stay draft,
645 645 > # not ancestor of -r
646 646 o 8 public a-F - b740e3e5c05d
647 647 |
648 648 o 7 public a-E - e9f537e46dea
649 649 |
650 650 | o 6 draft n-B - 145e75495359
651 651 | |
652 652 | o 5 draft n-A - d6bcb4f74035
653 653 | |
654 654 o | 4 public a-D - b555f63b6063
655 655 | |
656 656 o | 3 public a-C - 54acac6f23ab
657 657 | |
658 658 | o 2 draft b-A - f54f1bb90ff3
659 659 |/
660 660 o 1 public a-B - 548a3d25dbf0
661 661 |
662 662 o 0 public a-A - 054250a37db4
663 663
664 664
665 665 Pushing to Publish=True (common changeset)
666 666
667 667 $ cd ../beta
668 668 $ hg push ../alpha
669 669 pushing to ../alpha
670 670 searching for changes
671 671 no changes found
672 672 test-debug-phase: move rev 7: 1 -> 0
673 673 test-debug-phase: move rev 8: 1 -> 0
674 674 [1]
675 675 $ hgph
676 676 o 6 public a-F - b740e3e5c05d
677 677 |
678 678 o 5 public a-E - e9f537e46dea
679 679 |
680 680 o 4 public a-D - b555f63b6063
681 681 |
682 682 o 3 public a-C - 54acac6f23ab
683 683 |
684 684 | @ 2 public b-A - f54f1bb90ff3
685 685 |/
686 686 o 1 public a-B - 548a3d25dbf0
687 687 |
688 688 o 0 public a-A - 054250a37db4
689 689
690 690 $ cd ../alpha
691 691 $ hgph
692 692 @ 10 draft a-H - 967b449fbc94
693 693 |
694 694 | o 9 draft a-G - 3e27b6f1eee1
695 695 | |
696 696 | o 8 public a-F - b740e3e5c05d
697 697 | |
698 698 | o 7 public a-E - e9f537e46dea
699 699 | |
700 700 +---o 6 public n-B - 145e75495359
701 701 | |
702 702 o | 5 public n-A - d6bcb4f74035
703 703 | |
704 704 o | 4 public b-A - f54f1bb90ff3
705 705 | |
706 706 | o 3 public a-D - b555f63b6063
707 707 | |
708 708 | o 2 public a-C - 54acac6f23ab
709 709 |/
710 710 o 1 public a-B - 548a3d25dbf0
711 711 |
712 712 o 0 public a-A - 054250a37db4
713 713
714 714
715 715 Pushing to Publish=False (common changeset that change phase + unknown one)
716 716
717 717 $ hg push ../mu -r 967b449fbc94 -f
718 718 pushing to ../mu
719 719 searching for changes
720 720 adding changesets
721 721 adding manifests
722 722 adding file changes
723 723 added 1 changesets with 1 changes to 1 files (+1 heads)
724 724 test-debug-phase: move rev 2: 1 -> 0
725 725 test-debug-phase: move rev 5: 1 -> 0
726 726 test-debug-phase: new rev 9: x -> 1
727 727 $ hgph
728 728 @ 10 draft a-H - 967b449fbc94
729 729 |
730 730 | o 9 draft a-G - 3e27b6f1eee1
731 731 | |
732 732 | o 8 public a-F - b740e3e5c05d
733 733 | |
734 734 | o 7 public a-E - e9f537e46dea
735 735 | |
736 736 +---o 6 public n-B - 145e75495359
737 737 | |
738 738 o | 5 public n-A - d6bcb4f74035
739 739 | |
740 740 o | 4 public b-A - f54f1bb90ff3
741 741 | |
742 742 | o 3 public a-D - b555f63b6063
743 743 | |
744 744 | o 2 public a-C - 54acac6f23ab
745 745 |/
746 746 o 1 public a-B - 548a3d25dbf0
747 747 |
748 748 o 0 public a-A - 054250a37db4
749 749
750 750 $ cd ../mu
751 751 $ hgph # d6bcb4f74035 should have changed phase
752 752 > # 145e75495359 is still draft. not ancestor of -r
753 753 o 9 draft a-H - 967b449fbc94
754 754 |
755 755 | o 8 public a-F - b740e3e5c05d
756 756 | |
757 757 | o 7 public a-E - e9f537e46dea
758 758 | |
759 759 +---o 6 draft n-B - 145e75495359
760 760 | |
761 761 o | 5 public n-A - d6bcb4f74035
762 762 | |
763 763 | o 4 public a-D - b555f63b6063
764 764 | |
765 765 | o 3 public a-C - 54acac6f23ab
766 766 | |
767 767 o | 2 public b-A - f54f1bb90ff3
768 768 |/
769 769 o 1 public a-B - 548a3d25dbf0
770 770 |
771 771 o 0 public a-A - 054250a37db4
772 772
773 773
774 774
775 775 Pushing to Publish=True (common changeset from publish=False)
776 776
777 777 (in mu)
778 778 $ hg push ../alpha
779 779 pushing to ../alpha
780 780 searching for changes
781 781 no changes found
782 782 test-debug-phase: move rev 10: 1 -> 0
783 783 test-debug-phase: move rev 6: 1 -> 0
784 784 test-debug-phase: move rev 9: 1 -> 0
785 785 [1]
786 786 $ hgph
787 787 o 9 public a-H - 967b449fbc94
788 788 |
789 789 | o 8 public a-F - b740e3e5c05d
790 790 | |
791 791 | o 7 public a-E - e9f537e46dea
792 792 | |
793 793 +---o 6 public n-B - 145e75495359
794 794 | |
795 795 o | 5 public n-A - d6bcb4f74035
796 796 | |
797 797 | o 4 public a-D - b555f63b6063
798 798 | |
799 799 | o 3 public a-C - 54acac6f23ab
800 800 | |
801 801 o | 2 public b-A - f54f1bb90ff3
802 802 |/
803 803 o 1 public a-B - 548a3d25dbf0
804 804 |
805 805 o 0 public a-A - 054250a37db4
806 806
807 807 $ hgph -R ../alpha # a-H should have been synced to 0
808 808 @ 10 public a-H - 967b449fbc94
809 809 |
810 810 | o 9 draft a-G - 3e27b6f1eee1
811 811 | |
812 812 | o 8 public a-F - b740e3e5c05d
813 813 | |
814 814 | o 7 public a-E - e9f537e46dea
815 815 | |
816 816 +---o 6 public n-B - 145e75495359
817 817 | |
818 818 o | 5 public n-A - d6bcb4f74035
819 819 | |
820 820 o | 4 public b-A - f54f1bb90ff3
821 821 | |
822 822 | o 3 public a-D - b555f63b6063
823 823 | |
824 824 | o 2 public a-C - 54acac6f23ab
825 825 |/
826 826 o 1 public a-B - 548a3d25dbf0
827 827 |
828 828 o 0 public a-A - 054250a37db4
829 829
830 830
831 831
832 832 Bare push with next changeset and common changeset needing sync (issue3575)
833 833
834 834 (reset some stat on remote repo to avoid confusing other tests)
835 835
836 836 $ hg -R ../alpha --config extensions.strip= strip --no-backup 967b449fbc94
837 837 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
838 838 $ hg phase --force --draft b740e3e5c05d 967b449fbc94
839 839 test-debug-phase: move rev 8: 0 -> 1
840 840 test-debug-phase: move rev 9: 0 -> 1
841 841 $ hg push -fv ../alpha
842 842 pushing to ../alpha
843 843 searching for changes
844 844 1 changesets found
845 845 uncompressed size of bundle content:
846 846 178 (changelog)
847 847 165 (manifests)
848 848 131 a-H
849 849 adding changesets
850 850 adding manifests
851 851 adding file changes
852 852 added 1 changesets with 1 changes to 1 files (+1 heads)
853 853 test-debug-phase: new rev 10: x -> 0
854 854 test-debug-phase: move rev 8: 1 -> 0
855 855 test-debug-phase: move rev 9: 1 -> 0
856 856 $ hgph
857 857 o 9 public a-H - 967b449fbc94
858 858 |
859 859 | o 8 public a-F - b740e3e5c05d
860 860 | |
861 861 | o 7 public a-E - e9f537e46dea
862 862 | |
863 863 +---o 6 public n-B - 145e75495359
864 864 | |
865 865 o | 5 public n-A - d6bcb4f74035
866 866 | |
867 867 | o 4 public a-D - b555f63b6063
868 868 | |
869 869 | o 3 public a-C - 54acac6f23ab
870 870 | |
871 871 o | 2 public b-A - f54f1bb90ff3
872 872 |/
873 873 o 1 public a-B - 548a3d25dbf0
874 874 |
875 875 o 0 public a-A - 054250a37db4
876 876
877 877
878 878 $ hg -R ../alpha update 967b449fbc94 #for latter test consistency
879 879 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
880 880 $ hgph -R ../alpha
881 881 @ 10 public a-H - 967b449fbc94
882 882 |
883 883 | o 9 draft a-G - 3e27b6f1eee1
884 884 | |
885 885 | o 8 public a-F - b740e3e5c05d
886 886 | |
887 887 | o 7 public a-E - e9f537e46dea
888 888 | |
889 889 +---o 6 public n-B - 145e75495359
890 890 | |
891 891 o | 5 public n-A - d6bcb4f74035
892 892 | |
893 893 o | 4 public b-A - f54f1bb90ff3
894 894 | |
895 895 | o 3 public a-D - b555f63b6063
896 896 | |
897 897 | o 2 public a-C - 54acac6f23ab
898 898 |/
899 899 o 1 public a-B - 548a3d25dbf0
900 900 |
901 901 o 0 public a-A - 054250a37db4
902 902
903 903
904 904 Discovery locally secret changeset on a remote repository:
905 905
906 906 - should make it non-secret
907 907
908 908 $ cd ../alpha
909 909 $ mkcommit A-secret --config phases.new-commit=2
910 910 test-debug-phase: new rev 11: x -> 2
911 911 $ hgph
912 912 @ 11 secret A-secret - 435b5d83910c
913 913 |
914 914 o 10 public a-H - 967b449fbc94
915 915 |
916 916 | o 9 draft a-G - 3e27b6f1eee1
917 917 | |
918 918 | o 8 public a-F - b740e3e5c05d
919 919 | |
920 920 | o 7 public a-E - e9f537e46dea
921 921 | |
922 922 +---o 6 public n-B - 145e75495359
923 923 | |
924 924 o | 5 public n-A - d6bcb4f74035
925 925 | |
926 926 o | 4 public b-A - f54f1bb90ff3
927 927 | |
928 928 | o 3 public a-D - b555f63b6063
929 929 | |
930 930 | o 2 public a-C - 54acac6f23ab
931 931 |/
932 932 o 1 public a-B - 548a3d25dbf0
933 933 |
934 934 o 0 public a-A - 054250a37db4
935 935
936 936 $ hg bundle --base 'parents(.)' -r . ../secret-bundle.hg
937 937 1 changesets found
938 938 $ hg -R ../mu unbundle ../secret-bundle.hg
939 939 adding changesets
940 940 adding manifests
941 941 adding file changes
942 942 added 1 changesets with 1 changes to 1 files
943 943 new changesets 435b5d83910c
944 944 test-debug-phase: new rev 10: x -> 1
945 945 (run 'hg update' to get a working copy)
946 946 $ hgph -R ../mu
947 947 o 10 draft A-secret - 435b5d83910c
948 948 |
949 949 o 9 public a-H - 967b449fbc94
950 950 |
951 951 | o 8 public a-F - b740e3e5c05d
952 952 | |
953 953 | o 7 public a-E - e9f537e46dea
954 954 | |
955 955 +---o 6 public n-B - 145e75495359
956 956 | |
957 957 o | 5 public n-A - d6bcb4f74035
958 958 | |
959 959 | o 4 public a-D - b555f63b6063
960 960 | |
961 961 | o 3 public a-C - 54acac6f23ab
962 962 | |
963 963 o | 2 public b-A - f54f1bb90ff3
964 964 |/
965 965 o 1 public a-B - 548a3d25dbf0
966 966 |
967 967 o 0 public a-A - 054250a37db4
968 968
969 969 $ hg pull ../mu
970 970 pulling from ../mu
971 971 searching for changes
972 972 no changes found
973 973 test-debug-phase: move rev 11: 2 -> 1
974 974 $ hgph
975 975 @ 11 draft A-secret - 435b5d83910c
976 976 |
977 977 o 10 public a-H - 967b449fbc94
978 978 |
979 979 | o 9 draft a-G - 3e27b6f1eee1
980 980 | |
981 981 | o 8 public a-F - b740e3e5c05d
982 982 | |
983 983 | o 7 public a-E - e9f537e46dea
984 984 | |
985 985 +---o 6 public n-B - 145e75495359
986 986 | |
987 987 o | 5 public n-A - d6bcb4f74035
988 988 | |
989 989 o | 4 public b-A - f54f1bb90ff3
990 990 | |
991 991 | o 3 public a-D - b555f63b6063
992 992 | |
993 993 | o 2 public a-C - 54acac6f23ab
994 994 |/
995 995 o 1 public a-B - 548a3d25dbf0
996 996 |
997 997 o 0 public a-A - 054250a37db4
998 998
999 999
1000 1000 pushing a locally public and draft changesets remotely secret should make them
1001 1001 appear on the remote side.
1002 1002
1003 1003 $ hg -R ../mu phase --secret --force 967b449fbc94
1004 1004 test-debug-phase: move rev 9: 0 -> 2
1005 1005 test-debug-phase: move rev 10: 1 -> 2
1006 1006 $ hg push -r 435b5d83910c ../mu
1007 1007 pushing to ../mu
1008 1008 searching for changes
1009 1009 abort: push creates new remote head 435b5d83910c!
1010 1010 (merge or see 'hg help push' for details about pushing new heads)
1011 1011 [255]
1012 1012 $ hg push -fr 435b5d83910c ../mu # because the push will create new visible head
1013 1013 pushing to ../mu
1014 1014 searching for changes
1015 1015 adding changesets
1016 1016 adding manifests
1017 1017 adding file changes
1018 1018 added 0 changesets with 0 changes to 2 files
1019 1019 test-debug-phase: move rev 9: 2 -> 0
1020 1020 test-debug-phase: move rev 10: 2 -> 1
1021 1021 $ hgph -R ../mu
1022 1022 o 10 draft A-secret - 435b5d83910c
1023 1023 |
1024 1024 o 9 public a-H - 967b449fbc94
1025 1025 |
1026 1026 | o 8 public a-F - b740e3e5c05d
1027 1027 | |
1028 1028 | o 7 public a-E - e9f537e46dea
1029 1029 | |
1030 1030 +---o 6 public n-B - 145e75495359
1031 1031 | |
1032 1032 o | 5 public n-A - d6bcb4f74035
1033 1033 | |
1034 1034 | o 4 public a-D - b555f63b6063
1035 1035 | |
1036 1036 | o 3 public a-C - 54acac6f23ab
1037 1037 | |
1038 1038 o | 2 public b-A - f54f1bb90ff3
1039 1039 |/
1040 1040 o 1 public a-B - 548a3d25dbf0
1041 1041 |
1042 1042 o 0 public a-A - 054250a37db4
1043 1043
1044 1044
1045 1045 pull new changeset with common draft locally
1046 1046
1047 1047 $ hg up -q 967b449fbc94 # create a new root for draft
1048 1048 $ mkcommit 'alpha-more'
1049 1049 test-debug-phase: new rev 12: x -> 1
1050 1050 created new head
1051 1051 $ hg push -fr . ../mu
1052 1052 pushing to ../mu
1053 1053 searching for changes
1054 1054 adding changesets
1055 1055 adding manifests
1056 1056 adding file changes
1057 1057 added 1 changesets with 1 changes to 1 files (+1 heads)
1058 1058 test-debug-phase: new rev 11: x -> 1
1059 1059 $ cd ../mu
1060 1060 $ hg phase --secret --force 1c5cfd894796
1061 1061 test-debug-phase: move rev 11: 1 -> 2
1062 1062 $ hg up -q 435b5d83910c
1063 1063 $ mkcommit 'mu-more'
1064 1064 test-debug-phase: new rev 12: x -> 1
1065 1065 $ cd ../alpha
1066 1066 $ hg pull ../mu
1067 1067 pulling from ../mu
1068 1068 searching for changes
1069 1069 adding changesets
1070 1070 adding manifests
1071 1071 adding file changes
1072 1072 added 1 changesets with 1 changes to 1 files
1073 1073 new changesets 5237fb433fc8
1074 1074 test-debug-phase: new rev 13: x -> 1
1075 1075 (run 'hg update' to get a working copy)
1076 1076 $ hgph
1077 1077 o 13 draft mu-more - 5237fb433fc8
1078 1078 |
1079 1079 | @ 12 draft alpha-more - 1c5cfd894796
1080 1080 | |
1081 1081 o | 11 draft A-secret - 435b5d83910c
1082 1082 |/
1083 1083 o 10 public a-H - 967b449fbc94
1084 1084 |
1085 1085 | o 9 draft a-G - 3e27b6f1eee1
1086 1086 | |
1087 1087 | o 8 public a-F - b740e3e5c05d
1088 1088 | |
1089 1089 | o 7 public a-E - e9f537e46dea
1090 1090 | |
1091 1091 +---o 6 public n-B - 145e75495359
1092 1092 | |
1093 1093 o | 5 public n-A - d6bcb4f74035
1094 1094 | |
1095 1095 o | 4 public b-A - f54f1bb90ff3
1096 1096 | |
1097 1097 | o 3 public a-D - b555f63b6063
1098 1098 | |
1099 1099 | o 2 public a-C - 54acac6f23ab
1100 1100 |/
1101 1101 o 1 public a-B - 548a3d25dbf0
1102 1102 |
1103 1103 o 0 public a-A - 054250a37db4
1104 1104
1105 1105
1106 1106 Test that test are properly ignored on remote event when existing locally
1107 1107
1108 1108 $ cd ..
1109 1109 $ hg clone -qU -r b555f63b6063 -r f54f1bb90ff3 beta gamma
1110 1110 test-debug-phase: new rev 0: x -> 0
1111 1111 test-debug-phase: new rev 1: x -> 0
1112 1112 test-debug-phase: new rev 2: x -> 0
1113 1113 test-debug-phase: new rev 3: x -> 0
1114 1114 test-debug-phase: new rev 4: x -> 0
1115 1115
1116 1116 # pathological case are
1117 1117 #
1118 1118 # * secret remotely
1119 1119 # * known locally
1120 1120 # * repo have uncommon changeset
1121 1121
1122 1122 $ hg -R beta phase --secret --force f54f1bb90ff3
1123 1123 test-debug-phase: move rev 2: 0 -> 2
1124 1124 $ hg -R gamma phase --draft --force f54f1bb90ff3
1125 1125 test-debug-phase: move rev 2: 0 -> 1
1126 1126
1127 1127 $ cd gamma
1128 1128 $ hg pull ../beta
1129 1129 pulling from ../beta
1130 1130 searching for changes
1131 1131 adding changesets
1132 1132 adding manifests
1133 1133 adding file changes
1134 1134 added 2 changesets with 2 changes to 2 files
1135 1135 new changesets e9f537e46dea:b740e3e5c05d
1136 1136 test-debug-phase: new rev 5: x -> 0
1137 1137 test-debug-phase: new rev 6: x -> 0
1138 1138 (run 'hg update' to get a working copy)
1139 1139 $ hg phase f54f1bb90ff3
1140 1140 2: draft
1141 1141
1142 1142 same over the wire
1143 1143
1144 1144 $ cd ../beta
1145 1145 $ hg serve -p $HGPORT -d --pid-file=../beta.pid -E ../beta-error.log
1146 1146 $ cat ../beta.pid >> $DAEMON_PIDS
1147 1147 $ cd ../gamma
1148 1148
1149 1149 $ hg pull http://localhost:$HGPORT/ # bundle2+
1150 1150 pulling from http://localhost:$HGPORT/
1151 1151 searching for changes
1152 1152 no changes found
1153 1153 $ hg phase f54f1bb90ff3
1154 1154 2: draft
1155 1155
1156 1156 enforce bundle1
1157 1157
1158 1158 $ hg pull http://localhost:$HGPORT/ --config devel.legacy.exchange=bundle1
1159 1159 pulling from http://localhost:$HGPORT/
1160 1160 searching for changes
1161 1161 no changes found
1162 1162 $ hg phase f54f1bb90ff3
1163 1163 2: draft
1164 1164
1165 1165 check that secret local on both side are not synced to public
1166 1166
1167 1167 $ hg push -r b555f63b6063 http://localhost:$HGPORT/
1168 1168 pushing to http://localhost:$HGPORT/
1169 1169 searching for changes
1170 1170 no changes found
1171 1171 [1]
1172 1172 $ hg phase f54f1bb90ff3
1173 1173 2: draft
1174 1174
1175 1175 put the changeset in the draft state again
1176 1176 (first test after this one expect to be able to copy)
1177 1177
1178 1178 $ cd ..
1179 1179
1180 1180
1181 1181 Test Clone behavior
1182 1182
1183 1183 A. Clone without secret changeset
1184 1184
1185 1185 1. cloning non-publishing repository
1186 1186 (Phase should be preserved)
1187 1187
1188 1188 # make sure there is no secret so we can use a copy clone
1189 1189
1190 1190 $ hg -R mu phase --draft 'secret()'
1191 1191 test-debug-phase: move rev 11: 2 -> 1
1192 1192
1193 1193 $ hg clone -U mu Tau
1194 1194 $ hgph -R Tau
1195 1195 o 12 draft mu-more - 5237fb433fc8
1196 1196 |
1197 1197 | o 11 draft alpha-more - 1c5cfd894796
1198 1198 | |
1199 1199 o | 10 draft A-secret - 435b5d83910c
1200 1200 |/
1201 1201 o 9 public a-H - 967b449fbc94
1202 1202 |
1203 1203 | o 8 public a-F - b740e3e5c05d
1204 1204 | |
1205 1205 | o 7 public a-E - e9f537e46dea
1206 1206 | |
1207 1207 +---o 6 public n-B - 145e75495359
1208 1208 | |
1209 1209 o | 5 public n-A - d6bcb4f74035
1210 1210 | |
1211 1211 | o 4 public a-D - b555f63b6063
1212 1212 | |
1213 1213 | o 3 public a-C - 54acac6f23ab
1214 1214 | |
1215 1215 o | 2 public b-A - f54f1bb90ff3
1216 1216 |/
1217 1217 o 1 public a-B - 548a3d25dbf0
1218 1218 |
1219 1219 o 0 public a-A - 054250a37db4
1220 1220
1221 1221
1222 1222 2. cloning publishing repository
1223 1223
1224 1224 (everything should be public)
1225 1225
1226 1226 $ hg clone -U alpha Upsilon
1227 1227 $ hgph -R Upsilon
1228 1228 o 13 public mu-more - 5237fb433fc8
1229 1229 |
1230 1230 | o 12 public alpha-more - 1c5cfd894796
1231 1231 | |
1232 1232 o | 11 public A-secret - 435b5d83910c
1233 1233 |/
1234 1234 o 10 public a-H - 967b449fbc94
1235 1235 |
1236 1236 | o 9 public a-G - 3e27b6f1eee1
1237 1237 | |
1238 1238 | o 8 public a-F - b740e3e5c05d
1239 1239 | |
1240 1240 | o 7 public a-E - e9f537e46dea
1241 1241 | |
1242 1242 +---o 6 public n-B - 145e75495359
1243 1243 | |
1244 1244 o | 5 public n-A - d6bcb4f74035
1245 1245 | |
1246 1246 o | 4 public b-A - f54f1bb90ff3
1247 1247 | |
1248 1248 | o 3 public a-D - b555f63b6063
1249 1249 | |
1250 1250 | o 2 public a-C - 54acac6f23ab
1251 1251 |/
1252 1252 o 1 public a-B - 548a3d25dbf0
1253 1253 |
1254 1254 o 0 public a-A - 054250a37db4
1255 1255
1256 1256 #if unix-permissions no-root
1257 1257
1258 1258 Pushing From an unlockable repo
1259 1259 --------------------------------
1260 1260 (issue3684)
1261 1261
1262 1262 Unability to lock the source repo should not prevent the push. It will prevent
1263 1263 the retrieval of remote phase during push. For example, pushing to a publishing
1264 1264 server won't turn changeset public.
1265 1265
1266 1266 1. Test that push is not prevented
1267 1267
1268 1268 $ hg init Phi
1269 1269 $ cd Upsilon
1270 1270 $ chmod -R -w .hg
1271 1271 $ hg push ../Phi
1272 1272 pushing to ../Phi
1273 1273 searching for changes
1274 1274 adding changesets
1275 1275 adding manifests
1276 1276 adding file changes
1277 1277 added 14 changesets with 14 changes to 14 files (+3 heads)
1278 1278 test-debug-phase: new rev 0: x -> 0
1279 1279 test-debug-phase: new rev 1: x -> 0
1280 1280 test-debug-phase: new rev 2: x -> 0
1281 1281 test-debug-phase: new rev 3: x -> 0
1282 1282 test-debug-phase: new rev 4: x -> 0
1283 1283 test-debug-phase: new rev 5: x -> 0
1284 1284 test-debug-phase: new rev 6: x -> 0
1285 1285 test-debug-phase: new rev 7: x -> 0
1286 1286 test-debug-phase: new rev 8: x -> 0
1287 1287 test-debug-phase: new rev 9: x -> 0
1288 1288 test-debug-phase: new rev 10: x -> 0
1289 1289 test-debug-phase: new rev 11: x -> 0
1290 1290 test-debug-phase: new rev 12: x -> 0
1291 1291 test-debug-phase: new rev 13: x -> 0
1292 1292 $ chmod -R +w .hg
1293 1293
1294 1294 2. Test that failed phases movement are reported
1295 1295
1296 1296 $ hg phase --force --draft 3
1297 1297 test-debug-phase: move rev 3: 0 -> 1
1298 1298 test-debug-phase: move rev 7: 0 -> 1
1299 1299 test-debug-phase: move rev 8: 0 -> 1
1300 1300 test-debug-phase: move rev 9: 0 -> 1
1301 1301 $ chmod -R -w .hg
1302 1302 $ hg push ../Phi
1303 1303 pushing to ../Phi
1304 1304 searching for changes
1305 1305 no changes found
1306 1306 cannot lock source repo, skipping local public phase update
1307 1307 [1]
1308 1308 $ chmod -R +w .hg
1309 1309
1310 3. Test that push is prevented if lock was already acquired (not a permission
1311 error, but EEXIST)
1312
1313 $ touch .hg/store/lock
1314 $ hg push ../Phi --config ui.timeout=1
1315 pushing to ../Phi
1316 waiting for lock on repository $TESTTMP/Upsilon held by ''
1317 abort: repository $TESTTMP/Upsilon: timed out waiting for lock held by ''
1318 (lock might be very busy)
1319 [255]
1320 $ rm .hg/store/lock
1321
1310 1322 $ cd ..
1311 1323
1312 1324 #endif
1313 1325
1314 1326 Test that clone behaves like pull and doesn't publish changesets as plain push
1315 1327 does. The conditional output accounts for changes in the conditional block
1316 1328 above.
1317 1329
1318 1330 #if unix-permissions no-root
1319 1331 $ hg -R Upsilon phase -q --force --draft 2
1320 1332 test-debug-phase: move rev 2: 0 -> 1
1321 1333 #else
1322 1334 $ hg -R Upsilon phase -q --force --draft 2
1323 1335 test-debug-phase: move rev 2: 0 -> 1
1324 1336 test-debug-phase: move rev 3: 0 -> 1
1325 1337 test-debug-phase: move rev 7: 0 -> 1
1326 1338 test-debug-phase: move rev 8: 0 -> 1
1327 1339 test-debug-phase: move rev 9: 0 -> 1
1328 1340 #endif
1329 1341
1330 1342 $ hg clone -q Upsilon Pi -r 7
1331 1343 test-debug-phase: new rev 0: x -> 0
1332 1344 test-debug-phase: new rev 1: x -> 0
1333 1345 test-debug-phase: new rev 2: x -> 0
1334 1346 test-debug-phase: new rev 3: x -> 0
1335 1347 test-debug-phase: new rev 4: x -> 0
1336 1348 $ hgph Upsilon -r 'min(draft())'
1337 1349 o 2 draft a-C - 54acac6f23ab
1338 1350 |
1339 1351 ~
1340 1352
1341 1353 $ hg -R Upsilon push Pi -r 7
1342 1354 pushing to Pi
1343 1355 searching for changes
1344 1356 no changes found
1345 1357 test-debug-phase: move rev 2: 1 -> 0
1346 1358 test-debug-phase: move rev 3: 1 -> 0
1347 1359 test-debug-phase: move rev 7: 1 -> 0
1348 1360 [1]
1349 1361 $ hgph Upsilon -r 'min(draft())'
1350 1362 o 8 draft a-F - b740e3e5c05d
1351 1363 |
1352 1364 ~
1353 1365
1354 1366 $ hg -R Upsilon push Pi -r 8
1355 1367 pushing to Pi
1356 1368 searching for changes
1357 1369 adding changesets
1358 1370 adding manifests
1359 1371 adding file changes
1360 1372 added 1 changesets with 1 changes to 1 files
1361 1373 test-debug-phase: new rev 5: x -> 0
1362 1374 test-debug-phase: move rev 8: 1 -> 0
1363 1375
1364 1376 $ hgph Upsilon -r 'min(draft())'
1365 1377 o 9 draft a-G - 3e27b6f1eee1
1366 1378 |
1367 1379 ~
General Comments 0
You need to be logged in to leave comments. Login now