##// END OF EJS Templates
exchange: abort on pushing bookmarks pointing to secret changesets (issue6159)...
Navaneeth Suresh -
r43082:3332bde5 stable
parent child Browse files
Show More
@@ -1,2701 +1,2709 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 nullid,
18 18 nullrev,
19 19 )
20 20 from .thirdparty import (
21 21 attr,
22 22 )
23 23 from . import (
24 24 bookmarks as bookmod,
25 25 bundle2,
26 26 changegroup,
27 27 discovery,
28 28 error,
29 29 exchangev2,
30 30 lock as lockmod,
31 31 logexchange,
32 32 narrowspec,
33 33 obsolete,
34 34 phases,
35 35 pushkey,
36 36 pycompat,
37 37 repository,
38 38 scmutil,
39 39 sslutil,
40 40 streamclone,
41 41 url as urlmod,
42 42 util,
43 43 wireprototypes,
44 44 )
45 45 from .utils import (
46 46 stringutil,
47 47 )
48 48
49 49 urlerr = util.urlerr
50 50 urlreq = util.urlreq
51 51
52 52 _NARROWACL_SECTION = 'narrowacl'
53 53
54 54 # Maps bundle version human names to changegroup versions.
55 55 _bundlespeccgversions = {'v1': '01',
56 56 'v2': '02',
57 57 'packed1': 's1',
58 58 'bundle2': '02', #legacy
59 59 }
60 60
61 61 # Maps bundle version with content opts to choose which part to bundle
62 62 _bundlespeccontentopts = {
63 63 'v1': {
64 64 'changegroup': True,
65 65 'cg.version': '01',
66 66 'obsolescence': False,
67 67 'phases': False,
68 68 'tagsfnodescache': False,
69 69 'revbranchcache': False
70 70 },
71 71 'v2': {
72 72 'changegroup': True,
73 73 'cg.version': '02',
74 74 'obsolescence': False,
75 75 'phases': False,
76 76 'tagsfnodescache': True,
77 77 'revbranchcache': True
78 78 },
79 79 'packed1' : {
80 80 'cg.version': 's1'
81 81 }
82 82 }
83 83 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
84 84
85 85 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
86 86 "tagsfnodescache": False,
87 87 "revbranchcache": False}}
88 88
89 89 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
90 90 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
91 91
92 92 @attr.s
93 93 class bundlespec(object):
94 94 compression = attr.ib()
95 95 wirecompression = attr.ib()
96 96 version = attr.ib()
97 97 wireversion = attr.ib()
98 98 params = attr.ib()
99 99 contentopts = attr.ib()
100 100
101 101 def parsebundlespec(repo, spec, strict=True):
102 102 """Parse a bundle string specification into parts.
103 103
104 104 Bundle specifications denote a well-defined bundle/exchange format.
105 105 The content of a given specification should not change over time in
106 106 order to ensure that bundles produced by a newer version of Mercurial are
107 107 readable from an older version.
108 108
109 109 The string currently has the form:
110 110
111 111 <compression>-<type>[;<parameter0>[;<parameter1>]]
112 112
113 113 Where <compression> is one of the supported compression formats
114 114 and <type> is (currently) a version string. A ";" can follow the type and
115 115 all text afterwards is interpreted as URI encoded, ";" delimited key=value
116 116 pairs.
117 117
118 118 If ``strict`` is True (the default) <compression> is required. Otherwise,
119 119 it is optional.
120 120
121 121 Returns a bundlespec object of (compression, version, parameters).
122 122 Compression will be ``None`` if not in strict mode and a compression isn't
123 123 defined.
124 124
125 125 An ``InvalidBundleSpecification`` is raised when the specification is
126 126 not syntactically well formed.
127 127
128 128 An ``UnsupportedBundleSpecification`` is raised when the compression or
129 129 bundle type/version is not recognized.
130 130
131 131 Note: this function will likely eventually return a more complex data
132 132 structure, including bundle2 part information.
133 133 """
134 134 def parseparams(s):
135 135 if ';' not in s:
136 136 return s, {}
137 137
138 138 params = {}
139 139 version, paramstr = s.split(';', 1)
140 140
141 141 for p in paramstr.split(';'):
142 142 if '=' not in p:
143 143 raise error.InvalidBundleSpecification(
144 144 _('invalid bundle specification: '
145 145 'missing "=" in parameter: %s') % p)
146 146
147 147 key, value = p.split('=', 1)
148 148 key = urlreq.unquote(key)
149 149 value = urlreq.unquote(value)
150 150 params[key] = value
151 151
152 152 return version, params
153 153
154 154
155 155 if strict and '-' not in spec:
156 156 raise error.InvalidBundleSpecification(
157 157 _('invalid bundle specification; '
158 158 'must be prefixed with compression: %s') % spec)
159 159
160 160 if '-' in spec:
161 161 compression, version = spec.split('-', 1)
162 162
163 163 if compression not in util.compengines.supportedbundlenames:
164 164 raise error.UnsupportedBundleSpecification(
165 165 _('%s compression is not supported') % compression)
166 166
167 167 version, params = parseparams(version)
168 168
169 169 if version not in _bundlespeccgversions:
170 170 raise error.UnsupportedBundleSpecification(
171 171 _('%s is not a recognized bundle version') % version)
172 172 else:
173 173 # Value could be just the compression or just the version, in which
174 174 # case some defaults are assumed (but only when not in strict mode).
175 175 assert not strict
176 176
177 177 spec, params = parseparams(spec)
178 178
179 179 if spec in util.compengines.supportedbundlenames:
180 180 compression = spec
181 181 version = 'v1'
182 182 # Generaldelta repos require v2.
183 183 if 'generaldelta' in repo.requirements:
184 184 version = 'v2'
185 185 # Modern compression engines require v2.
186 186 if compression not in _bundlespecv1compengines:
187 187 version = 'v2'
188 188 elif spec in _bundlespeccgversions:
189 189 if spec == 'packed1':
190 190 compression = 'none'
191 191 else:
192 192 compression = 'bzip2'
193 193 version = spec
194 194 else:
195 195 raise error.UnsupportedBundleSpecification(
196 196 _('%s is not a recognized bundle specification') % spec)
197 197
198 198 # Bundle version 1 only supports a known set of compression engines.
199 199 if version == 'v1' and compression not in _bundlespecv1compengines:
200 200 raise error.UnsupportedBundleSpecification(
201 201 _('compression engine %s is not supported on v1 bundles') %
202 202 compression)
203 203
204 204 # The specification for packed1 can optionally declare the data formats
205 205 # required to apply it. If we see this metadata, compare against what the
206 206 # repo supports and error if the bundle isn't compatible.
207 207 if version == 'packed1' and 'requirements' in params:
208 208 requirements = set(params['requirements'].split(','))
209 209 missingreqs = requirements - repo.supportedformats
210 210 if missingreqs:
211 211 raise error.UnsupportedBundleSpecification(
212 212 _('missing support for repository features: %s') %
213 213 ', '.join(sorted(missingreqs)))
214 214
215 215 # Compute contentopts based on the version
216 216 contentopts = _bundlespeccontentopts.get(version, {}).copy()
217 217
218 218 # Process the variants
219 219 if "stream" in params and params["stream"] == "v2":
220 220 variant = _bundlespecvariants["streamv2"]
221 221 contentopts.update(variant)
222 222
223 223 engine = util.compengines.forbundlename(compression)
224 224 compression, wirecompression = engine.bundletype()
225 225 wireversion = _bundlespeccgversions[version]
226 226
227 227 return bundlespec(compression, wirecompression, version, wireversion,
228 228 params, contentopts)
229 229
230 230 def readbundle(ui, fh, fname, vfs=None):
231 231 header = changegroup.readexactly(fh, 4)
232 232
233 233 alg = None
234 234 if not fname:
235 235 fname = "stream"
236 236 if not header.startswith('HG') and header.startswith('\0'):
237 237 fh = changegroup.headerlessfixup(fh, header)
238 238 header = "HG10"
239 239 alg = 'UN'
240 240 elif vfs:
241 241 fname = vfs.join(fname)
242 242
243 243 magic, version = header[0:2], header[2:4]
244 244
245 245 if magic != 'HG':
246 246 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
247 247 if version == '10':
248 248 if alg is None:
249 249 alg = changegroup.readexactly(fh, 2)
250 250 return changegroup.cg1unpacker(fh, alg)
251 251 elif version.startswith('2'):
252 252 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
253 253 elif version == 'S1':
254 254 return streamclone.streamcloneapplier(fh)
255 255 else:
256 256 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
257 257
258 258 def getbundlespec(ui, fh):
259 259 """Infer the bundlespec from a bundle file handle.
260 260
261 261 The input file handle is seeked and the original seek position is not
262 262 restored.
263 263 """
264 264 def speccompression(alg):
265 265 try:
266 266 return util.compengines.forbundletype(alg).bundletype()[0]
267 267 except KeyError:
268 268 return None
269 269
270 270 b = readbundle(ui, fh, None)
271 271 if isinstance(b, changegroup.cg1unpacker):
272 272 alg = b._type
273 273 if alg == '_truncatedBZ':
274 274 alg = 'BZ'
275 275 comp = speccompression(alg)
276 276 if not comp:
277 277 raise error.Abort(_('unknown compression algorithm: %s') % alg)
278 278 return '%s-v1' % comp
279 279 elif isinstance(b, bundle2.unbundle20):
280 280 if 'Compression' in b.params:
281 281 comp = speccompression(b.params['Compression'])
282 282 if not comp:
283 283 raise error.Abort(_('unknown compression algorithm: %s') % comp)
284 284 else:
285 285 comp = 'none'
286 286
287 287 version = None
288 288 for part in b.iterparts():
289 289 if part.type == 'changegroup':
290 290 version = part.params['version']
291 291 if version in ('01', '02'):
292 292 version = 'v2'
293 293 else:
294 294 raise error.Abort(_('changegroup version %s does not have '
295 295 'a known bundlespec') % version,
296 296 hint=_('try upgrading your Mercurial '
297 297 'client'))
298 298 elif part.type == 'stream2' and version is None:
299 299 # A stream2 part requires to be part of a v2 bundle
300 300 requirements = urlreq.unquote(part.params['requirements'])
301 301 splitted = requirements.split()
302 302 params = bundle2._formatrequirementsparams(splitted)
303 303 return 'none-v2;stream=v2;%s' % params
304 304
305 305 if not version:
306 306 raise error.Abort(_('could not identify changegroup version in '
307 307 'bundle'))
308 308
309 309 return '%s-%s' % (comp, version)
310 310 elif isinstance(b, streamclone.streamcloneapplier):
311 311 requirements = streamclone.readbundle1header(fh)[2]
312 312 formatted = bundle2._formatrequirementsparams(requirements)
313 313 return 'none-packed1;%s' % formatted
314 314 else:
315 315 raise error.Abort(_('unknown bundle type: %s') % b)
316 316
317 317 def _computeoutgoing(repo, heads, common):
318 318 """Computes which revs are outgoing given a set of common
319 319 and a set of heads.
320 320
321 321 This is a separate function so extensions can have access to
322 322 the logic.
323 323
324 324 Returns a discovery.outgoing object.
325 325 """
326 326 cl = repo.changelog
327 327 if common:
328 328 hasnode = cl.hasnode
329 329 common = [n for n in common if hasnode(n)]
330 330 else:
331 331 common = [nullid]
332 332 if not heads:
333 333 heads = cl.heads()
334 334 return discovery.outgoing(repo, common, heads)
335 335
336 336 def _checkpublish(pushop):
337 337 repo = pushop.repo
338 338 ui = repo.ui
339 339 behavior = ui.config('experimental', 'auto-publish')
340 340 if pushop.publish or behavior not in ('warn', 'confirm', 'abort'):
341 341 return
342 342 remotephases = listkeys(pushop.remote, 'phases')
343 343 if not remotephases.get('publishing', False):
344 344 return
345 345
346 346 if pushop.revs is None:
347 347 published = repo.filtered('served').revs('not public()')
348 348 else:
349 349 published = repo.revs('::%ln - public()', pushop.revs)
350 350 if published:
351 351 if behavior == 'warn':
352 352 ui.warn(_('%i changesets about to be published\n')
353 353 % len(published))
354 354 elif behavior == 'confirm':
355 355 if ui.promptchoice(_('push and publish %i changesets (yn)?'
356 356 '$$ &Yes $$ &No') % len(published)):
357 357 raise error.Abort(_('user quit'))
358 358 elif behavior == 'abort':
359 359 msg = _('push would publish %i changesets') % len(published)
360 360 hint = _("use --publish or adjust 'experimental.auto-publish'"
361 361 " config")
362 362 raise error.Abort(msg, hint=hint)
363 363
364 364 def _forcebundle1(op):
365 365 """return true if a pull/push must use bundle1
366 366
367 367 This function is used to allow testing of the older bundle version"""
368 368 ui = op.repo.ui
369 369 # The goal is this config is to allow developer to choose the bundle
370 370 # version used during exchanged. This is especially handy during test.
371 371 # Value is a list of bundle version to be picked from, highest version
372 372 # should be used.
373 373 #
374 374 # developer config: devel.legacy.exchange
375 375 exchange = ui.configlist('devel', 'legacy.exchange')
376 376 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
377 377 return forcebundle1 or not op.remote.capable('bundle2')
378 378
379 379 class pushoperation(object):
380 380 """A object that represent a single push operation
381 381
382 382 Its purpose is to carry push related state and very common operations.
383 383
384 384 A new pushoperation should be created at the beginning of each push and
385 385 discarded afterward.
386 386 """
387 387
388 388 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
389 389 bookmarks=(), publish=False, pushvars=None):
390 390 # repo we push from
391 391 self.repo = repo
392 392 self.ui = repo.ui
393 393 # repo we push to
394 394 self.remote = remote
395 395 # force option provided
396 396 self.force = force
397 397 # revs to be pushed (None is "all")
398 398 self.revs = revs
399 399 # bookmark explicitly pushed
400 400 self.bookmarks = bookmarks
401 401 # allow push of new branch
402 402 self.newbranch = newbranch
403 403 # step already performed
404 404 # (used to check what steps have been already performed through bundle2)
405 405 self.stepsdone = set()
406 406 # Integer version of the changegroup push result
407 407 # - None means nothing to push
408 408 # - 0 means HTTP error
409 409 # - 1 means we pushed and remote head count is unchanged *or*
410 410 # we have outgoing changesets but refused to push
411 411 # - other values as described by addchangegroup()
412 412 self.cgresult = None
413 413 # Boolean value for the bookmark push
414 414 self.bkresult = None
415 415 # discover.outgoing object (contains common and outgoing data)
416 416 self.outgoing = None
417 417 # all remote topological heads before the push
418 418 self.remoteheads = None
419 419 # Details of the remote branch pre and post push
420 420 #
421 421 # mapping: {'branch': ([remoteheads],
422 422 # [newheads],
423 423 # [unsyncedheads],
424 424 # [discardedheads])}
425 425 # - branch: the branch name
426 426 # - remoteheads: the list of remote heads known locally
427 427 # None if the branch is new
428 428 # - newheads: the new remote heads (known locally) with outgoing pushed
429 429 # - unsyncedheads: the list of remote heads unknown locally.
430 430 # - discardedheads: the list of remote heads made obsolete by the push
431 431 self.pushbranchmap = None
432 432 # testable as a boolean indicating if any nodes are missing locally.
433 433 self.incoming = None
434 434 # summary of the remote phase situation
435 435 self.remotephases = None
436 436 # phases changes that must be pushed along side the changesets
437 437 self.outdatedphases = None
438 438 # phases changes that must be pushed if changeset push fails
439 439 self.fallbackoutdatedphases = None
440 440 # outgoing obsmarkers
441 441 self.outobsmarkers = set()
442 442 # outgoing bookmarks
443 443 self.outbookmarks = []
444 444 # transaction manager
445 445 self.trmanager = None
446 446 # map { pushkey partid -> callback handling failure}
447 447 # used to handle exception from mandatory pushkey part failure
448 448 self.pkfailcb = {}
449 449 # an iterable of pushvars or None
450 450 self.pushvars = pushvars
451 451 # publish pushed changesets
452 452 self.publish = publish
453 453
454 454 @util.propertycache
455 455 def futureheads(self):
456 456 """future remote heads if the changeset push succeeds"""
457 457 return self.outgoing.missingheads
458 458
459 459 @util.propertycache
460 460 def fallbackheads(self):
461 461 """future remote heads if the changeset push fails"""
462 462 if self.revs is None:
463 463 # not target to push, all common are relevant
464 464 return self.outgoing.commonheads
465 465 unfi = self.repo.unfiltered()
466 466 # I want cheads = heads(::missingheads and ::commonheads)
467 467 # (missingheads is revs with secret changeset filtered out)
468 468 #
469 469 # This can be expressed as:
470 470 # cheads = ( (missingheads and ::commonheads)
471 471 # + (commonheads and ::missingheads))"
472 472 # )
473 473 #
474 474 # while trying to push we already computed the following:
475 475 # common = (::commonheads)
476 476 # missing = ((commonheads::missingheads) - commonheads)
477 477 #
478 478 # We can pick:
479 479 # * missingheads part of common (::commonheads)
480 480 common = self.outgoing.common
481 481 nm = self.repo.changelog.nodemap
482 482 cheads = [node for node in self.revs if nm[node] in common]
483 483 # and
484 484 # * commonheads parents on missing
485 485 revset = unfi.set('%ln and parents(roots(%ln))',
486 486 self.outgoing.commonheads,
487 487 self.outgoing.missing)
488 488 cheads.extend(c.node() for c in revset)
489 489 return cheads
490 490
491 491 @property
492 492 def commonheads(self):
493 493 """set of all common heads after changeset bundle push"""
494 494 if self.cgresult:
495 495 return self.futureheads
496 496 else:
497 497 return self.fallbackheads
498 498
499 499 # mapping of message used when pushing bookmark
500 500 bookmsgmap = {'update': (_("updating bookmark %s\n"),
501 501 _('updating bookmark %s failed!\n')),
502 502 'export': (_("exporting bookmark %s\n"),
503 503 _('exporting bookmark %s failed!\n')),
504 504 'delete': (_("deleting remote bookmark %s\n"),
505 505 _('deleting remote bookmark %s failed!\n')),
506 506 }
507 507
508 508
509 509 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
510 510 publish=False, opargs=None):
511 511 '''Push outgoing changesets (limited by revs) from a local
512 512 repository to remote. Return an integer:
513 513 - None means nothing to push
514 514 - 0 means HTTP error
515 515 - 1 means we pushed and remote head count is unchanged *or*
516 516 we have outgoing changesets but refused to push
517 517 - other values as described by addchangegroup()
518 518 '''
519 519 if opargs is None:
520 520 opargs = {}
521 521 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
522 522 publish, **pycompat.strkwargs(opargs))
523 523 if pushop.remote.local():
524 524 missing = (set(pushop.repo.requirements)
525 525 - pushop.remote.local().supported)
526 526 if missing:
527 527 msg = _("required features are not"
528 528 " supported in the destination:"
529 529 " %s") % (', '.join(sorted(missing)))
530 530 raise error.Abort(msg)
531 531
532 532 if not pushop.remote.canpush():
533 533 raise error.Abort(_("destination does not support push"))
534 534
535 535 if not pushop.remote.capable('unbundle'):
536 536 raise error.Abort(_('cannot push: destination does not support the '
537 537 'unbundle wire protocol command'))
538 538
539 539 # get lock as we might write phase data
540 540 wlock = lock = None
541 541 try:
542 542 # bundle2 push may receive a reply bundle touching bookmarks
543 543 # requiring the wlock. Take it now to ensure proper ordering.
544 544 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
545 545 if ((not _forcebundle1(pushop)) and
546 546 maypushback and
547 547 not bookmod.bookmarksinstore(repo)):
548 548 wlock = pushop.repo.wlock()
549 549 lock = pushop.repo.lock()
550 550 pushop.trmanager = transactionmanager(pushop.repo,
551 551 'push-response',
552 552 pushop.remote.url())
553 553 except error.LockUnavailable as err:
554 554 # source repo cannot be locked.
555 555 # We do not abort the push, but just disable the local phase
556 556 # synchronisation.
557 557 msg = ('cannot lock source repository: %s\n'
558 558 % stringutil.forcebytestr(err))
559 559 pushop.ui.debug(msg)
560 560
561 561 with wlock or util.nullcontextmanager():
562 562 with lock or util.nullcontextmanager():
563 563 with pushop.trmanager or util.nullcontextmanager():
564 564 pushop.repo.checkpush(pushop)
565 565 _checkpublish(pushop)
566 566 _pushdiscovery(pushop)
567 567 if not _forcebundle1(pushop):
568 568 _pushbundle2(pushop)
569 569 _pushchangeset(pushop)
570 570 _pushsyncphase(pushop)
571 571 _pushobsolete(pushop)
572 572 _pushbookmark(pushop)
573 573
574 574 if repo.ui.configbool('experimental', 'remotenames'):
575 575 logexchange.pullremotenames(repo, remote)
576 576
577 577 return pushop
578 578
579 579 # list of steps to perform discovery before push
580 580 pushdiscoveryorder = []
581 581
582 582 # Mapping between step name and function
583 583 #
584 584 # This exists to help extensions wrap steps if necessary
585 585 pushdiscoverymapping = {}
586 586
587 587 def pushdiscovery(stepname):
588 588 """decorator for function performing discovery before push
589 589
590 590 The function is added to the step -> function mapping and appended to the
591 591 list of steps. Beware that decorated function will be added in order (this
592 592 may matter).
593 593
594 594 You can only use this decorator for a new step, if you want to wrap a step
595 595 from an extension, change the pushdiscovery dictionary directly."""
596 596 def dec(func):
597 597 assert stepname not in pushdiscoverymapping
598 598 pushdiscoverymapping[stepname] = func
599 599 pushdiscoveryorder.append(stepname)
600 600 return func
601 601 return dec
602 602
603 603 def _pushdiscovery(pushop):
604 604 """Run all discovery steps"""
605 605 for stepname in pushdiscoveryorder:
606 606 step = pushdiscoverymapping[stepname]
607 607 step(pushop)
608 608
609 609 @pushdiscovery('changeset')
610 610 def _pushdiscoverychangeset(pushop):
611 611 """discover the changeset that need to be pushed"""
612 612 fci = discovery.findcommonincoming
613 613 if pushop.revs:
614 614 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
615 615 ancestorsof=pushop.revs)
616 616 else:
617 617 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
618 618 common, inc, remoteheads = commoninc
619 619 fco = discovery.findcommonoutgoing
620 620 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
621 621 commoninc=commoninc, force=pushop.force)
622 622 pushop.outgoing = outgoing
623 623 pushop.remoteheads = remoteheads
624 624 pushop.incoming = inc
625 625
626 626 @pushdiscovery('phase')
627 627 def _pushdiscoveryphase(pushop):
628 628 """discover the phase that needs to be pushed
629 629
630 630 (computed for both success and failure case for changesets push)"""
631 631 outgoing = pushop.outgoing
632 632 unfi = pushop.repo.unfiltered()
633 633 remotephases = listkeys(pushop.remote, 'phases')
634 634
635 635 if (pushop.ui.configbool('ui', '_usedassubrepo')
636 636 and remotephases # server supports phases
637 637 and not pushop.outgoing.missing # no changesets to be pushed
638 638 and remotephases.get('publishing', False)):
639 639 # When:
640 640 # - this is a subrepo push
641 641 # - and remote support phase
642 642 # - and no changeset are to be pushed
643 643 # - and remote is publishing
644 644 # We may be in issue 3781 case!
645 645 # We drop the possible phase synchronisation done by
646 646 # courtesy to publish changesets possibly locally draft
647 647 # on the remote.
648 648 pushop.outdatedphases = []
649 649 pushop.fallbackoutdatedphases = []
650 650 return
651 651
652 652 pushop.remotephases = phases.remotephasessummary(pushop.repo,
653 653 pushop.fallbackheads,
654 654 remotephases)
655 655 droots = pushop.remotephases.draftroots
656 656
657 657 extracond = ''
658 658 if not pushop.remotephases.publishing:
659 659 extracond = ' and public()'
660 660 revset = 'heads((%%ln::%%ln) %s)' % extracond
661 661 # Get the list of all revs draft on remote by public here.
662 662 # XXX Beware that revset break if droots is not strictly
663 663 # XXX root we may want to ensure it is but it is costly
664 664 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
665 665 if not pushop.remotephases.publishing and pushop.publish:
666 666 future = list(unfi.set('%ln and (not public() or %ln::)',
667 667 pushop.futureheads, droots))
668 668 elif not outgoing.missing:
669 669 future = fallback
670 670 else:
671 671 # adds changeset we are going to push as draft
672 672 #
673 673 # should not be necessary for publishing server, but because of an
674 674 # issue fixed in xxxxx we have to do it anyway.
675 675 fdroots = list(unfi.set('roots(%ln + %ln::)',
676 676 outgoing.missing, droots))
677 677 fdroots = [f.node() for f in fdroots]
678 678 future = list(unfi.set(revset, fdroots, pushop.futureheads))
679 679 pushop.outdatedphases = future
680 680 pushop.fallbackoutdatedphases = fallback
681 681
682 682 @pushdiscovery('obsmarker')
683 683 def _pushdiscoveryobsmarkers(pushop):
684 684 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
685 685 return
686 686
687 687 if not pushop.repo.obsstore:
688 688 return
689 689
690 690 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
691 691 return
692 692
693 693 repo = pushop.repo
694 694 # very naive computation, that can be quite expensive on big repo.
695 695 # However: evolution is currently slow on them anyway.
696 696 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
697 697 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
698 698
699 699 @pushdiscovery('bookmarks')
700 700 def _pushdiscoverybookmarks(pushop):
701 701 ui = pushop.ui
702 702 repo = pushop.repo.unfiltered()
703 703 remote = pushop.remote
704 704 ui.debug("checking for updated bookmarks\n")
705 705 ancestors = ()
706 706 if pushop.revs:
707 707 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
708 708 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
709 709
710 710 remotebookmark = listkeys(remote, 'bookmarks')
711 711
712 712 explicit = {repo._bookmarks.expandname(bookmark)
713 713 for bookmark in pushop.bookmarks}
714 714
715 715 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
716 716 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
717 717
718 718 def safehex(x):
719 719 if x is None:
720 720 return x
721 721 return hex(x)
722 722
723 723 def hexifycompbookmarks(bookmarks):
724 724 return [(b, safehex(scid), safehex(dcid))
725 725 for (b, scid, dcid) in bookmarks]
726 726
727 727 comp = [hexifycompbookmarks(marks) for marks in comp]
728 728 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
729 729
730 730 def _processcompared(pushop, pushed, explicit, remotebms, comp):
731 731 """take decision on bookmark to pull from the remote bookmark
732 732
733 733 Exist to help extensions who want to alter this behavior.
734 734 """
735 735 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
736 736
737 737 repo = pushop.repo
738 738
739 739 for b, scid, dcid in advsrc:
740 740 if b in explicit:
741 741 explicit.remove(b)
742 742 if not pushed or repo[scid].rev() in pushed:
743 743 pushop.outbookmarks.append((b, dcid, scid))
744 744 # search added bookmark
745 745 for b, scid, dcid in addsrc:
746 746 if b in explicit:
747 747 explicit.remove(b)
748 748 pushop.outbookmarks.append((b, '', scid))
749 749 # search for overwritten bookmark
750 750 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
751 751 if b in explicit:
752 752 explicit.remove(b)
753 753 pushop.outbookmarks.append((b, dcid, scid))
754 754 # search for bookmark to delete
755 755 for b, scid, dcid in adddst:
756 756 if b in explicit:
757 757 explicit.remove(b)
758 758 # treat as "deleted locally"
759 759 pushop.outbookmarks.append((b, dcid, ''))
760 760 # identical bookmarks shouldn't get reported
761 761 for b, scid, dcid in same:
762 762 if b in explicit:
763 763 explicit.remove(b)
764 764
765 765 if explicit:
766 766 explicit = sorted(explicit)
767 767 # we should probably list all of them
768 768 pushop.ui.warn(_('bookmark %s does not exist on the local '
769 769 'or remote repository!\n') % explicit[0])
770 770 pushop.bkresult = 2
771 771
772 772 pushop.outbookmarks.sort()
773 773
774 774 def _pushcheckoutgoing(pushop):
775 775 outgoing = pushop.outgoing
776 776 unfi = pushop.repo.unfiltered()
777 777 if not outgoing.missing:
778 778 # nothing to push
779 779 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
780 780 return False
781 781 # something to push
782 782 if not pushop.force:
783 783 # if repo.obsstore == False --> no obsolete
784 784 # then, save the iteration
785 785 if unfi.obsstore:
786 786 # this message are here for 80 char limit reason
787 787 mso = _("push includes obsolete changeset: %s!")
788 788 mspd = _("push includes phase-divergent changeset: %s!")
789 789 mscd = _("push includes content-divergent changeset: %s!")
790 790 mst = {"orphan": _("push includes orphan changeset: %s!"),
791 791 "phase-divergent": mspd,
792 792 "content-divergent": mscd}
793 793 # If we are to push if there is at least one
794 794 # obsolete or unstable changeset in missing, at
795 795 # least one of the missinghead will be obsolete or
796 796 # unstable. So checking heads only is ok
797 797 for node in outgoing.missingheads:
798 798 ctx = unfi[node]
799 799 if ctx.obsolete():
800 800 raise error.Abort(mso % ctx)
801 801 elif ctx.isunstable():
802 802 # TODO print more than one instability in the abort
803 803 # message
804 804 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
805 805
806 806 discovery.checkheads(pushop)
807 807 return True
808 808
809 809 # List of names of steps to perform for an outgoing bundle2, order matters.
810 810 b2partsgenorder = []
811 811
812 812 # Mapping between step name and function
813 813 #
814 814 # This exists to help extensions wrap steps if necessary
815 815 b2partsgenmapping = {}
816 816
817 817 def b2partsgenerator(stepname, idx=None):
818 818 """decorator for function generating bundle2 part
819 819
820 820 The function is added to the step -> function mapping and appended to the
821 821 list of steps. Beware that decorated functions will be added in order
822 822 (this may matter).
823 823
824 824 You can only use this decorator for new steps, if you want to wrap a step
825 825 from an extension, attack the b2partsgenmapping dictionary directly."""
826 826 def dec(func):
827 827 assert stepname not in b2partsgenmapping
828 828 b2partsgenmapping[stepname] = func
829 829 if idx is None:
830 830 b2partsgenorder.append(stepname)
831 831 else:
832 832 b2partsgenorder.insert(idx, stepname)
833 833 return func
834 834 return dec
835 835
836 836 def _pushb2ctxcheckheads(pushop, bundler):
837 837 """Generate race condition checking parts
838 838
839 839 Exists as an independent function to aid extensions
840 840 """
841 841 # * 'force' do not check for push race,
842 842 # * if we don't push anything, there are nothing to check.
843 843 if not pushop.force and pushop.outgoing.missingheads:
844 844 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
845 845 emptyremote = pushop.pushbranchmap is None
846 846 if not allowunrelated or emptyremote:
847 847 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
848 848 else:
849 849 affected = set()
850 850 for branch, heads in pushop.pushbranchmap.iteritems():
851 851 remoteheads, newheads, unsyncedheads, discardedheads = heads
852 852 if remoteheads is not None:
853 853 remote = set(remoteheads)
854 854 affected |= set(discardedheads) & remote
855 855 affected |= remote - set(newheads)
856 856 if affected:
857 857 data = iter(sorted(affected))
858 858 bundler.newpart('check:updated-heads', data=data)
859 859
860 860 def _pushing(pushop):
861 861 """return True if we are pushing anything"""
862 862 return bool(pushop.outgoing.missing
863 863 or pushop.outdatedphases
864 864 or pushop.outobsmarkers
865 865 or pushop.outbookmarks)
866 866
867 867 @b2partsgenerator('check-bookmarks')
868 868 def _pushb2checkbookmarks(pushop, bundler):
869 869 """insert bookmark move checking"""
870 870 if not _pushing(pushop) or pushop.force:
871 871 return
872 872 b2caps = bundle2.bundle2caps(pushop.remote)
873 873 hasbookmarkcheck = 'bookmarks' in b2caps
874 874 if not (pushop.outbookmarks and hasbookmarkcheck):
875 875 return
876 876 data = []
877 877 for book, old, new in pushop.outbookmarks:
878 878 old = bin(old)
879 879 data.append((book, old))
880 880 checkdata = bookmod.binaryencode(data)
881 881 bundler.newpart('check:bookmarks', data=checkdata)
882 882
883 883 @b2partsgenerator('check-phases')
884 884 def _pushb2checkphases(pushop, bundler):
885 885 """insert phase move checking"""
886 886 if not _pushing(pushop) or pushop.force:
887 887 return
888 888 b2caps = bundle2.bundle2caps(pushop.remote)
889 889 hasphaseheads = 'heads' in b2caps.get('phases', ())
890 890 if pushop.remotephases is not None and hasphaseheads:
891 891 # check that the remote phase has not changed
892 892 checks = [[] for p in phases.allphases]
893 893 checks[phases.public].extend(pushop.remotephases.publicheads)
894 894 checks[phases.draft].extend(pushop.remotephases.draftroots)
895 895 if any(checks):
896 896 for nodes in checks:
897 897 nodes.sort()
898 898 checkdata = phases.binaryencode(checks)
899 899 bundler.newpart('check:phases', data=checkdata)
900 900
901 901 @b2partsgenerator('changeset')
902 902 def _pushb2ctx(pushop, bundler):
903 903 """handle changegroup push through bundle2
904 904
905 905 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
906 906 """
907 907 if 'changesets' in pushop.stepsdone:
908 908 return
909 909 pushop.stepsdone.add('changesets')
910 910 # Send known heads to the server for race detection.
911 911 if not _pushcheckoutgoing(pushop):
912 912 return
913 913 pushop.repo.prepushoutgoinghooks(pushop)
914 914
915 915 _pushb2ctxcheckheads(pushop, bundler)
916 916
917 917 b2caps = bundle2.bundle2caps(pushop.remote)
918 918 version = '01'
919 919 cgversions = b2caps.get('changegroup')
920 920 if cgversions: # 3.1 and 3.2 ship with an empty value
921 921 cgversions = [v for v in cgversions
922 922 if v in changegroup.supportedoutgoingversions(
923 923 pushop.repo)]
924 924 if not cgversions:
925 925 raise error.Abort(_('no common changegroup version'))
926 926 version = max(cgversions)
927 927 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
928 928 'push')
929 929 cgpart = bundler.newpart('changegroup', data=cgstream)
930 930 if cgversions:
931 931 cgpart.addparam('version', version)
932 932 if 'treemanifest' in pushop.repo.requirements:
933 933 cgpart.addparam('treemanifest', '1')
934 934 def handlereply(op):
935 935 """extract addchangegroup returns from server reply"""
936 936 cgreplies = op.records.getreplies(cgpart.id)
937 937 assert len(cgreplies['changegroup']) == 1
938 938 pushop.cgresult = cgreplies['changegroup'][0]['return']
939 939 return handlereply
940 940
941 941 @b2partsgenerator('phase')
942 942 def _pushb2phases(pushop, bundler):
943 943 """handle phase push through bundle2"""
944 944 if 'phases' in pushop.stepsdone:
945 945 return
946 946 b2caps = bundle2.bundle2caps(pushop.remote)
947 947 ui = pushop.repo.ui
948 948
949 949 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
950 950 haspushkey = 'pushkey' in b2caps
951 951 hasphaseheads = 'heads' in b2caps.get('phases', ())
952 952
953 953 if hasphaseheads and not legacyphase:
954 954 return _pushb2phaseheads(pushop, bundler)
955 955 elif haspushkey:
956 956 return _pushb2phasespushkey(pushop, bundler)
957 957
958 958 def _pushb2phaseheads(pushop, bundler):
959 959 """push phase information through a bundle2 - binary part"""
960 960 pushop.stepsdone.add('phases')
961 961 if pushop.outdatedphases:
962 962 updates = [[] for p in phases.allphases]
963 963 updates[0].extend(h.node() for h in pushop.outdatedphases)
964 964 phasedata = phases.binaryencode(updates)
965 965 bundler.newpart('phase-heads', data=phasedata)
966 966
967 967 def _pushb2phasespushkey(pushop, bundler):
968 968 """push phase information through a bundle2 - pushkey part"""
969 969 pushop.stepsdone.add('phases')
970 970 part2node = []
971 971
972 972 def handlefailure(pushop, exc):
973 973 targetid = int(exc.partid)
974 974 for partid, node in part2node:
975 975 if partid == targetid:
976 976 raise error.Abort(_('updating %s to public failed') % node)
977 977
978 978 enc = pushkey.encode
979 979 for newremotehead in pushop.outdatedphases:
980 980 part = bundler.newpart('pushkey')
981 981 part.addparam('namespace', enc('phases'))
982 982 part.addparam('key', enc(newremotehead.hex()))
983 983 part.addparam('old', enc('%d' % phases.draft))
984 984 part.addparam('new', enc('%d' % phases.public))
985 985 part2node.append((part.id, newremotehead))
986 986 pushop.pkfailcb[part.id] = handlefailure
987 987
988 988 def handlereply(op):
989 989 for partid, node in part2node:
990 990 partrep = op.records.getreplies(partid)
991 991 results = partrep['pushkey']
992 992 assert len(results) <= 1
993 993 msg = None
994 994 if not results:
995 995 msg = _('server ignored update of %s to public!\n') % node
996 996 elif not int(results[0]['return']):
997 997 msg = _('updating %s to public failed!\n') % node
998 998 if msg is not None:
999 999 pushop.ui.warn(msg)
1000 1000 return handlereply
1001 1001
1002 1002 @b2partsgenerator('obsmarkers')
1003 1003 def _pushb2obsmarkers(pushop, bundler):
1004 1004 if 'obsmarkers' in pushop.stepsdone:
1005 1005 return
1006 1006 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1007 1007 if obsolete.commonversion(remoteversions) is None:
1008 1008 return
1009 1009 pushop.stepsdone.add('obsmarkers')
1010 1010 if pushop.outobsmarkers:
1011 1011 markers = sorted(pushop.outobsmarkers)
1012 1012 bundle2.buildobsmarkerspart(bundler, markers)
1013 1013
1014 1014 @b2partsgenerator('bookmarks')
1015 1015 def _pushb2bookmarks(pushop, bundler):
1016 1016 """handle bookmark push through bundle2"""
1017 1017 if 'bookmarks' in pushop.stepsdone:
1018 1018 return
1019 1019 b2caps = bundle2.bundle2caps(pushop.remote)
1020 1020
1021 1021 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
1022 1022 legacybooks = 'bookmarks' in legacy
1023 1023
1024 1024 if not legacybooks and 'bookmarks' in b2caps:
1025 1025 return _pushb2bookmarkspart(pushop, bundler)
1026 1026 elif 'pushkey' in b2caps:
1027 1027 return _pushb2bookmarkspushkey(pushop, bundler)
1028 1028
1029 1029 def _bmaction(old, new):
1030 1030 """small utility for bookmark pushing"""
1031 1031 if not old:
1032 1032 return 'export'
1033 1033 elif not new:
1034 1034 return 'delete'
1035 1035 return 'update'
1036 1036
1037 def _abortonsecretctx(pushop, node, b):
1038 """abort if a given bookmark points to a secret changeset"""
1039 if node and pushop.repo[node].phase() == phases.secret:
1040 raise error.Abort(_('cannot push bookmark %s as it points to a secret'
1041 ' changeset') % b)
1042
1037 1043 def _pushb2bookmarkspart(pushop, bundler):
1038 1044 pushop.stepsdone.add('bookmarks')
1039 1045 if not pushop.outbookmarks:
1040 1046 return
1041 1047
1042 1048 allactions = []
1043 1049 data = []
1044 1050 for book, old, new in pushop.outbookmarks:
1051 _abortonsecretctx(pushop, new, book)
1045 1052 new = bin(new)
1046 1053 data.append((book, new))
1047 1054 allactions.append((book, _bmaction(old, new)))
1048 1055 checkdata = bookmod.binaryencode(data)
1049 1056 bundler.newpart('bookmarks', data=checkdata)
1050 1057
1051 1058 def handlereply(op):
1052 1059 ui = pushop.ui
1053 1060 # if success
1054 1061 for book, action in allactions:
1055 1062 ui.status(bookmsgmap[action][0] % book)
1056 1063
1057 1064 return handlereply
1058 1065
1059 1066 def _pushb2bookmarkspushkey(pushop, bundler):
1060 1067 pushop.stepsdone.add('bookmarks')
1061 1068 part2book = []
1062 1069 enc = pushkey.encode
1063 1070
1064 1071 def handlefailure(pushop, exc):
1065 1072 targetid = int(exc.partid)
1066 1073 for partid, book, action in part2book:
1067 1074 if partid == targetid:
1068 1075 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1069 1076 # we should not be called for part we did not generated
1070 1077 assert False
1071 1078
1072 1079 for book, old, new in pushop.outbookmarks:
1080 _abortonsecretctx(pushop, new, book)
1073 1081 part = bundler.newpart('pushkey')
1074 1082 part.addparam('namespace', enc('bookmarks'))
1075 1083 part.addparam('key', enc(book))
1076 1084 part.addparam('old', enc(old))
1077 1085 part.addparam('new', enc(new))
1078 1086 action = 'update'
1079 1087 if not old:
1080 1088 action = 'export'
1081 1089 elif not new:
1082 1090 action = 'delete'
1083 1091 part2book.append((part.id, book, action))
1084 1092 pushop.pkfailcb[part.id] = handlefailure
1085 1093
1086 1094 def handlereply(op):
1087 1095 ui = pushop.ui
1088 1096 for partid, book, action in part2book:
1089 1097 partrep = op.records.getreplies(partid)
1090 1098 results = partrep['pushkey']
1091 1099 assert len(results) <= 1
1092 1100 if not results:
1093 1101 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1094 1102 else:
1095 1103 ret = int(results[0]['return'])
1096 1104 if ret:
1097 1105 ui.status(bookmsgmap[action][0] % book)
1098 1106 else:
1099 1107 ui.warn(bookmsgmap[action][1] % book)
1100 1108 if pushop.bkresult is not None:
1101 1109 pushop.bkresult = 1
1102 1110 return handlereply
1103 1111
1104 1112 @b2partsgenerator('pushvars', idx=0)
1105 1113 def _getbundlesendvars(pushop, bundler):
1106 1114 '''send shellvars via bundle2'''
1107 1115 pushvars = pushop.pushvars
1108 1116 if pushvars:
1109 1117 shellvars = {}
1110 1118 for raw in pushvars:
1111 1119 if '=' not in raw:
1112 1120 msg = ("unable to parse variable '%s', should follow "
1113 1121 "'KEY=VALUE' or 'KEY=' format")
1114 1122 raise error.Abort(msg % raw)
1115 1123 k, v = raw.split('=', 1)
1116 1124 shellvars[k] = v
1117 1125
1118 1126 part = bundler.newpart('pushvars')
1119 1127
1120 1128 for key, value in shellvars.iteritems():
1121 1129 part.addparam(key, value, mandatory=False)
1122 1130
1123 1131 def _pushbundle2(pushop):
1124 1132 """push data to the remote using bundle2
1125 1133
1126 1134 The only currently supported type of data is changegroup but this will
1127 1135 evolve in the future."""
1128 1136 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1129 1137 pushback = (pushop.trmanager
1130 1138 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1131 1139
1132 1140 # create reply capability
1133 1141 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1134 1142 allowpushback=pushback,
1135 1143 role='client'))
1136 1144 bundler.newpart('replycaps', data=capsblob)
1137 1145 replyhandlers = []
1138 1146 for partgenname in b2partsgenorder:
1139 1147 partgen = b2partsgenmapping[partgenname]
1140 1148 ret = partgen(pushop, bundler)
1141 1149 if callable(ret):
1142 1150 replyhandlers.append(ret)
1143 1151 # do not push if nothing to push
1144 1152 if bundler.nbparts <= 1:
1145 1153 return
1146 1154 stream = util.chunkbuffer(bundler.getchunks())
1147 1155 try:
1148 1156 try:
1149 1157 with pushop.remote.commandexecutor() as e:
1150 1158 reply = e.callcommand('unbundle', {
1151 1159 'bundle': stream,
1152 1160 'heads': ['force'],
1153 1161 'url': pushop.remote.url(),
1154 1162 }).result()
1155 1163 except error.BundleValueError as exc:
1156 1164 raise error.Abort(_('missing support for %s') % exc)
1157 1165 try:
1158 1166 trgetter = None
1159 1167 if pushback:
1160 1168 trgetter = pushop.trmanager.transaction
1161 1169 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1162 1170 except error.BundleValueError as exc:
1163 1171 raise error.Abort(_('missing support for %s') % exc)
1164 1172 except bundle2.AbortFromPart as exc:
1165 1173 pushop.ui.status(_('remote: %s\n') % exc)
1166 1174 if exc.hint is not None:
1167 1175 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1168 1176 raise error.Abort(_('push failed on remote'))
1169 1177 except error.PushkeyFailed as exc:
1170 1178 partid = int(exc.partid)
1171 1179 if partid not in pushop.pkfailcb:
1172 1180 raise
1173 1181 pushop.pkfailcb[partid](pushop, exc)
1174 1182 for rephand in replyhandlers:
1175 1183 rephand(op)
1176 1184
1177 1185 def _pushchangeset(pushop):
1178 1186 """Make the actual push of changeset bundle to remote repo"""
1179 1187 if 'changesets' in pushop.stepsdone:
1180 1188 return
1181 1189 pushop.stepsdone.add('changesets')
1182 1190 if not _pushcheckoutgoing(pushop):
1183 1191 return
1184 1192
1185 1193 # Should have verified this in push().
1186 1194 assert pushop.remote.capable('unbundle')
1187 1195
1188 1196 pushop.repo.prepushoutgoinghooks(pushop)
1189 1197 outgoing = pushop.outgoing
1190 1198 # TODO: get bundlecaps from remote
1191 1199 bundlecaps = None
1192 1200 # create a changegroup from local
1193 1201 if pushop.revs is None and not (outgoing.excluded
1194 1202 or pushop.repo.changelog.filteredrevs):
1195 1203 # push everything,
1196 1204 # use the fast path, no race possible on push
1197 1205 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1198 1206 fastpath=True, bundlecaps=bundlecaps)
1199 1207 else:
1200 1208 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1201 1209 'push', bundlecaps=bundlecaps)
1202 1210
1203 1211 # apply changegroup to remote
1204 1212 # local repo finds heads on server, finds out what
1205 1213 # revs it must push. once revs transferred, if server
1206 1214 # finds it has different heads (someone else won
1207 1215 # commit/push race), server aborts.
1208 1216 if pushop.force:
1209 1217 remoteheads = ['force']
1210 1218 else:
1211 1219 remoteheads = pushop.remoteheads
1212 1220 # ssh: return remote's addchangegroup()
1213 1221 # http: return remote's addchangegroup() or 0 for error
1214 1222 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1215 1223 pushop.repo.url())
1216 1224
1217 1225 def _pushsyncphase(pushop):
1218 1226 """synchronise phase information locally and remotely"""
1219 1227 cheads = pushop.commonheads
1220 1228 # even when we don't push, exchanging phase data is useful
1221 1229 remotephases = listkeys(pushop.remote, 'phases')
1222 1230 if (pushop.ui.configbool('ui', '_usedassubrepo')
1223 1231 and remotephases # server supports phases
1224 1232 and pushop.cgresult is None # nothing was pushed
1225 1233 and remotephases.get('publishing', False)):
1226 1234 # When:
1227 1235 # - this is a subrepo push
1228 1236 # - and remote support phase
1229 1237 # - and no changeset was pushed
1230 1238 # - and remote is publishing
1231 1239 # We may be in issue 3871 case!
1232 1240 # We drop the possible phase synchronisation done by
1233 1241 # courtesy to publish changesets possibly locally draft
1234 1242 # on the remote.
1235 1243 remotephases = {'publishing': 'True'}
1236 1244 if not remotephases: # old server or public only reply from non-publishing
1237 1245 _localphasemove(pushop, cheads)
1238 1246 # don't push any phase data as there is nothing to push
1239 1247 else:
1240 1248 ana = phases.analyzeremotephases(pushop.repo, cheads,
1241 1249 remotephases)
1242 1250 pheads, droots = ana
1243 1251 ### Apply remote phase on local
1244 1252 if remotephases.get('publishing', False):
1245 1253 _localphasemove(pushop, cheads)
1246 1254 else: # publish = False
1247 1255 _localphasemove(pushop, pheads)
1248 1256 _localphasemove(pushop, cheads, phases.draft)
1249 1257 ### Apply local phase on remote
1250 1258
1251 1259 if pushop.cgresult:
1252 1260 if 'phases' in pushop.stepsdone:
1253 1261 # phases already pushed though bundle2
1254 1262 return
1255 1263 outdated = pushop.outdatedphases
1256 1264 else:
1257 1265 outdated = pushop.fallbackoutdatedphases
1258 1266
1259 1267 pushop.stepsdone.add('phases')
1260 1268
1261 1269 # filter heads already turned public by the push
1262 1270 outdated = [c for c in outdated if c.node() not in pheads]
1263 1271 # fallback to independent pushkey command
1264 1272 for newremotehead in outdated:
1265 1273 with pushop.remote.commandexecutor() as e:
1266 1274 r = e.callcommand('pushkey', {
1267 1275 'namespace': 'phases',
1268 1276 'key': newremotehead.hex(),
1269 1277 'old': '%d' % phases.draft,
1270 1278 'new': '%d' % phases.public
1271 1279 }).result()
1272 1280
1273 1281 if not r:
1274 1282 pushop.ui.warn(_('updating %s to public failed!\n')
1275 1283 % newremotehead)
1276 1284
1277 1285 def _localphasemove(pushop, nodes, phase=phases.public):
1278 1286 """move <nodes> to <phase> in the local source repo"""
1279 1287 if pushop.trmanager:
1280 1288 phases.advanceboundary(pushop.repo,
1281 1289 pushop.trmanager.transaction(),
1282 1290 phase,
1283 1291 nodes)
1284 1292 else:
1285 1293 # repo is not locked, do not change any phases!
1286 1294 # Informs the user that phases should have been moved when
1287 1295 # applicable.
1288 1296 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1289 1297 phasestr = phases.phasenames[phase]
1290 1298 if actualmoves:
1291 1299 pushop.ui.status(_('cannot lock source repo, skipping '
1292 1300 'local %s phase update\n') % phasestr)
1293 1301
1294 1302 def _pushobsolete(pushop):
1295 1303 """utility function to push obsolete markers to a remote"""
1296 1304 if 'obsmarkers' in pushop.stepsdone:
1297 1305 return
1298 1306 repo = pushop.repo
1299 1307 remote = pushop.remote
1300 1308 pushop.stepsdone.add('obsmarkers')
1301 1309 if pushop.outobsmarkers:
1302 1310 pushop.ui.debug('try to push obsolete markers to remote\n')
1303 1311 rslts = []
1304 1312 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1305 1313 for key in sorted(remotedata, reverse=True):
1306 1314 # reverse sort to ensure we end with dump0
1307 1315 data = remotedata[key]
1308 1316 rslts.append(remote.pushkey('obsolete', key, '', data))
1309 1317 if [r for r in rslts if not r]:
1310 1318 msg = _('failed to push some obsolete markers!\n')
1311 1319 repo.ui.warn(msg)
1312 1320
1313 1321 def _pushbookmark(pushop):
1314 1322 """Update bookmark position on remote"""
1315 1323 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1316 1324 return
1317 1325 pushop.stepsdone.add('bookmarks')
1318 1326 ui = pushop.ui
1319 1327 remote = pushop.remote
1320 1328
1321 1329 for b, old, new in pushop.outbookmarks:
1322 1330 action = 'update'
1323 1331 if not old:
1324 1332 action = 'export'
1325 1333 elif not new:
1326 1334 action = 'delete'
1327 1335
1328 1336 with remote.commandexecutor() as e:
1329 1337 r = e.callcommand('pushkey', {
1330 1338 'namespace': 'bookmarks',
1331 1339 'key': b,
1332 1340 'old': old,
1333 1341 'new': new,
1334 1342 }).result()
1335 1343
1336 1344 if r:
1337 1345 ui.status(bookmsgmap[action][0] % b)
1338 1346 else:
1339 1347 ui.warn(bookmsgmap[action][1] % b)
1340 1348 # discovery can have set the value form invalid entry
1341 1349 if pushop.bkresult is not None:
1342 1350 pushop.bkresult = 1
1343 1351
1344 1352 class pulloperation(object):
1345 1353 """A object that represent a single pull operation
1346 1354
1347 1355 It purpose is to carry pull related state and very common operation.
1348 1356
1349 1357 A new should be created at the beginning of each pull and discarded
1350 1358 afterward.
1351 1359 """
1352 1360
1353 1361 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1354 1362 remotebookmarks=None, streamclonerequested=None,
1355 1363 includepats=None, excludepats=None, depth=None):
1356 1364 # repo we pull into
1357 1365 self.repo = repo
1358 1366 # repo we pull from
1359 1367 self.remote = remote
1360 1368 # revision we try to pull (None is "all")
1361 1369 self.heads = heads
1362 1370 # bookmark pulled explicitly
1363 1371 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1364 1372 for bookmark in bookmarks]
1365 1373 # do we force pull?
1366 1374 self.force = force
1367 1375 # whether a streaming clone was requested
1368 1376 self.streamclonerequested = streamclonerequested
1369 1377 # transaction manager
1370 1378 self.trmanager = None
1371 1379 # set of common changeset between local and remote before pull
1372 1380 self.common = None
1373 1381 # set of pulled head
1374 1382 self.rheads = None
1375 1383 # list of missing changeset to fetch remotely
1376 1384 self.fetch = None
1377 1385 # remote bookmarks data
1378 1386 self.remotebookmarks = remotebookmarks
1379 1387 # result of changegroup pulling (used as return code by pull)
1380 1388 self.cgresult = None
1381 1389 # list of step already done
1382 1390 self.stepsdone = set()
1383 1391 # Whether we attempted a clone from pre-generated bundles.
1384 1392 self.clonebundleattempted = False
1385 1393 # Set of file patterns to include.
1386 1394 self.includepats = includepats
1387 1395 # Set of file patterns to exclude.
1388 1396 self.excludepats = excludepats
1389 1397 # Number of ancestor changesets to pull from each pulled head.
1390 1398 self.depth = depth
1391 1399
1392 1400 @util.propertycache
1393 1401 def pulledsubset(self):
1394 1402 """heads of the set of changeset target by the pull"""
1395 1403 # compute target subset
1396 1404 if self.heads is None:
1397 1405 # We pulled every thing possible
1398 1406 # sync on everything common
1399 1407 c = set(self.common)
1400 1408 ret = list(self.common)
1401 1409 for n in self.rheads:
1402 1410 if n not in c:
1403 1411 ret.append(n)
1404 1412 return ret
1405 1413 else:
1406 1414 # We pulled a specific subset
1407 1415 # sync on this subset
1408 1416 return self.heads
1409 1417
1410 1418 @util.propertycache
1411 1419 def canusebundle2(self):
1412 1420 return not _forcebundle1(self)
1413 1421
1414 1422 @util.propertycache
1415 1423 def remotebundle2caps(self):
1416 1424 return bundle2.bundle2caps(self.remote)
1417 1425
1418 1426 def gettransaction(self):
1419 1427 # deprecated; talk to trmanager directly
1420 1428 return self.trmanager.transaction()
1421 1429
1422 1430 class transactionmanager(util.transactional):
1423 1431 """An object to manage the life cycle of a transaction
1424 1432
1425 1433 It creates the transaction on demand and calls the appropriate hooks when
1426 1434 closing the transaction."""
1427 1435 def __init__(self, repo, source, url):
1428 1436 self.repo = repo
1429 1437 self.source = source
1430 1438 self.url = url
1431 1439 self._tr = None
1432 1440
1433 1441 def transaction(self):
1434 1442 """Return an open transaction object, constructing if necessary"""
1435 1443 if not self._tr:
1436 1444 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1437 1445 self._tr = self.repo.transaction(trname)
1438 1446 self._tr.hookargs['source'] = self.source
1439 1447 self._tr.hookargs['url'] = self.url
1440 1448 return self._tr
1441 1449
1442 1450 def close(self):
1443 1451 """close transaction if created"""
1444 1452 if self._tr is not None:
1445 1453 self._tr.close()
1446 1454
1447 1455 def release(self):
1448 1456 """release transaction if created"""
1449 1457 if self._tr is not None:
1450 1458 self._tr.release()
1451 1459
1452 1460 def listkeys(remote, namespace):
1453 1461 with remote.commandexecutor() as e:
1454 1462 return e.callcommand('listkeys', {'namespace': namespace}).result()
1455 1463
1456 1464 def _fullpullbundle2(repo, pullop):
1457 1465 # The server may send a partial reply, i.e. when inlining
1458 1466 # pre-computed bundles. In that case, update the common
1459 1467 # set based on the results and pull another bundle.
1460 1468 #
1461 1469 # There are two indicators that the process is finished:
1462 1470 # - no changeset has been added, or
1463 1471 # - all remote heads are known locally.
1464 1472 # The head check must use the unfiltered view as obsoletion
1465 1473 # markers can hide heads.
1466 1474 unfi = repo.unfiltered()
1467 1475 unficl = unfi.changelog
1468 1476 def headsofdiff(h1, h2):
1469 1477 """Returns heads(h1 % h2)"""
1470 1478 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1471 1479 return set(ctx.node() for ctx in res)
1472 1480 def headsofunion(h1, h2):
1473 1481 """Returns heads((h1 + h2) - null)"""
1474 1482 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1475 1483 return set(ctx.node() for ctx in res)
1476 1484 while True:
1477 1485 old_heads = unficl.heads()
1478 1486 clstart = len(unficl)
1479 1487 _pullbundle2(pullop)
1480 1488 if repository.NARROW_REQUIREMENT in repo.requirements:
1481 1489 # XXX narrow clones filter the heads on the server side during
1482 1490 # XXX getbundle and result in partial replies as well.
1483 1491 # XXX Disable pull bundles in this case as band aid to avoid
1484 1492 # XXX extra round trips.
1485 1493 break
1486 1494 if clstart == len(unficl):
1487 1495 break
1488 1496 if all(unficl.hasnode(n) for n in pullop.rheads):
1489 1497 break
1490 1498 new_heads = headsofdiff(unficl.heads(), old_heads)
1491 1499 pullop.common = headsofunion(new_heads, pullop.common)
1492 1500 pullop.rheads = set(pullop.rheads) - pullop.common
1493 1501
1494 1502 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1495 1503 streamclonerequested=None, includepats=None, excludepats=None,
1496 1504 depth=None):
1497 1505 """Fetch repository data from a remote.
1498 1506
1499 1507 This is the main function used to retrieve data from a remote repository.
1500 1508
1501 1509 ``repo`` is the local repository to clone into.
1502 1510 ``remote`` is a peer instance.
1503 1511 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1504 1512 default) means to pull everything from the remote.
1505 1513 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1506 1514 default, all remote bookmarks are pulled.
1507 1515 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1508 1516 initialization.
1509 1517 ``streamclonerequested`` is a boolean indicating whether a "streaming
1510 1518 clone" is requested. A "streaming clone" is essentially a raw file copy
1511 1519 of revlogs from the server. This only works when the local repository is
1512 1520 empty. The default value of ``None`` means to respect the server
1513 1521 configuration for preferring stream clones.
1514 1522 ``includepats`` and ``excludepats`` define explicit file patterns to
1515 1523 include and exclude in storage, respectively. If not defined, narrow
1516 1524 patterns from the repo instance are used, if available.
1517 1525 ``depth`` is an integer indicating the DAG depth of history we're
1518 1526 interested in. If defined, for each revision specified in ``heads``, we
1519 1527 will fetch up to this many of its ancestors and data associated with them.
1520 1528
1521 1529 Returns the ``pulloperation`` created for this pull.
1522 1530 """
1523 1531 if opargs is None:
1524 1532 opargs = {}
1525 1533
1526 1534 # We allow the narrow patterns to be passed in explicitly to provide more
1527 1535 # flexibility for API consumers.
1528 1536 if includepats or excludepats:
1529 1537 includepats = includepats or set()
1530 1538 excludepats = excludepats or set()
1531 1539 else:
1532 1540 includepats, excludepats = repo.narrowpats
1533 1541
1534 1542 narrowspec.validatepatterns(includepats)
1535 1543 narrowspec.validatepatterns(excludepats)
1536 1544
1537 1545 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1538 1546 streamclonerequested=streamclonerequested,
1539 1547 includepats=includepats, excludepats=excludepats,
1540 1548 depth=depth,
1541 1549 **pycompat.strkwargs(opargs))
1542 1550
1543 1551 peerlocal = pullop.remote.local()
1544 1552 if peerlocal:
1545 1553 missing = set(peerlocal.requirements) - pullop.repo.supported
1546 1554 if missing:
1547 1555 msg = _("required features are not"
1548 1556 " supported in the destination:"
1549 1557 " %s") % (', '.join(sorted(missing)))
1550 1558 raise error.Abort(msg)
1551 1559
1552 1560 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1553 1561 wlock = util.nullcontextmanager()
1554 1562 if not bookmod.bookmarksinstore(repo):
1555 1563 wlock = repo.wlock()
1556 1564 with wlock, repo.lock(), pullop.trmanager:
1557 1565 # Use the modern wire protocol, if available.
1558 1566 if remote.capable('command-changesetdata'):
1559 1567 exchangev2.pull(pullop)
1560 1568 else:
1561 1569 # This should ideally be in _pullbundle2(). However, it needs to run
1562 1570 # before discovery to avoid extra work.
1563 1571 _maybeapplyclonebundle(pullop)
1564 1572 streamclone.maybeperformlegacystreamclone(pullop)
1565 1573 _pulldiscovery(pullop)
1566 1574 if pullop.canusebundle2:
1567 1575 _fullpullbundle2(repo, pullop)
1568 1576 _pullchangeset(pullop)
1569 1577 _pullphase(pullop)
1570 1578 _pullbookmarks(pullop)
1571 1579 _pullobsolete(pullop)
1572 1580
1573 1581 # storing remotenames
1574 1582 if repo.ui.configbool('experimental', 'remotenames'):
1575 1583 logexchange.pullremotenames(repo, remote)
1576 1584
1577 1585 return pullop
1578 1586
1579 1587 # list of steps to perform discovery before pull
1580 1588 pulldiscoveryorder = []
1581 1589
1582 1590 # Mapping between step name and function
1583 1591 #
1584 1592 # This exists to help extensions wrap steps if necessary
1585 1593 pulldiscoverymapping = {}
1586 1594
1587 1595 def pulldiscovery(stepname):
1588 1596 """decorator for function performing discovery before pull
1589 1597
1590 1598 The function is added to the step -> function mapping and appended to the
1591 1599 list of steps. Beware that decorated function will be added in order (this
1592 1600 may matter).
1593 1601
1594 1602 You can only use this decorator for a new step, if you want to wrap a step
1595 1603 from an extension, change the pulldiscovery dictionary directly."""
1596 1604 def dec(func):
1597 1605 assert stepname not in pulldiscoverymapping
1598 1606 pulldiscoverymapping[stepname] = func
1599 1607 pulldiscoveryorder.append(stepname)
1600 1608 return func
1601 1609 return dec
1602 1610
1603 1611 def _pulldiscovery(pullop):
1604 1612 """Run all discovery steps"""
1605 1613 for stepname in pulldiscoveryorder:
1606 1614 step = pulldiscoverymapping[stepname]
1607 1615 step(pullop)
1608 1616
1609 1617 @pulldiscovery('b1:bookmarks')
1610 1618 def _pullbookmarkbundle1(pullop):
1611 1619 """fetch bookmark data in bundle1 case
1612 1620
1613 1621 If not using bundle2, we have to fetch bookmarks before changeset
1614 1622 discovery to reduce the chance and impact of race conditions."""
1615 1623 if pullop.remotebookmarks is not None:
1616 1624 return
1617 1625 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1618 1626 # all known bundle2 servers now support listkeys, but lets be nice with
1619 1627 # new implementation.
1620 1628 return
1621 1629 books = listkeys(pullop.remote, 'bookmarks')
1622 1630 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1623 1631
1624 1632
1625 1633 @pulldiscovery('changegroup')
1626 1634 def _pulldiscoverychangegroup(pullop):
1627 1635 """discovery phase for the pull
1628 1636
1629 1637 Current handle changeset discovery only, will change handle all discovery
1630 1638 at some point."""
1631 1639 tmp = discovery.findcommonincoming(pullop.repo,
1632 1640 pullop.remote,
1633 1641 heads=pullop.heads,
1634 1642 force=pullop.force)
1635 1643 common, fetch, rheads = tmp
1636 1644 nm = pullop.repo.unfiltered().changelog.nodemap
1637 1645 if fetch and rheads:
1638 1646 # If a remote heads is filtered locally, put in back in common.
1639 1647 #
1640 1648 # This is a hackish solution to catch most of "common but locally
1641 1649 # hidden situation". We do not performs discovery on unfiltered
1642 1650 # repository because it end up doing a pathological amount of round
1643 1651 # trip for w huge amount of changeset we do not care about.
1644 1652 #
1645 1653 # If a set of such "common but filtered" changeset exist on the server
1646 1654 # but are not including a remote heads, we'll not be able to detect it,
1647 1655 scommon = set(common)
1648 1656 for n in rheads:
1649 1657 if n in nm:
1650 1658 if n not in scommon:
1651 1659 common.append(n)
1652 1660 if set(rheads).issubset(set(common)):
1653 1661 fetch = []
1654 1662 pullop.common = common
1655 1663 pullop.fetch = fetch
1656 1664 pullop.rheads = rheads
1657 1665
1658 1666 def _pullbundle2(pullop):
1659 1667 """pull data using bundle2
1660 1668
1661 1669 For now, the only supported data are changegroup."""
1662 1670 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1663 1671
1664 1672 # make ui easier to access
1665 1673 ui = pullop.repo.ui
1666 1674
1667 1675 # At the moment we don't do stream clones over bundle2. If that is
1668 1676 # implemented then here's where the check for that will go.
1669 1677 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1670 1678
1671 1679 # declare pull perimeters
1672 1680 kwargs['common'] = pullop.common
1673 1681 kwargs['heads'] = pullop.heads or pullop.rheads
1674 1682
1675 1683 # check server supports narrow and then adding includepats and excludepats
1676 1684 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1677 1685 if servernarrow and pullop.includepats:
1678 1686 kwargs['includepats'] = pullop.includepats
1679 1687 if servernarrow and pullop.excludepats:
1680 1688 kwargs['excludepats'] = pullop.excludepats
1681 1689
1682 1690 if streaming:
1683 1691 kwargs['cg'] = False
1684 1692 kwargs['stream'] = True
1685 1693 pullop.stepsdone.add('changegroup')
1686 1694 pullop.stepsdone.add('phases')
1687 1695
1688 1696 else:
1689 1697 # pulling changegroup
1690 1698 pullop.stepsdone.add('changegroup')
1691 1699
1692 1700 kwargs['cg'] = pullop.fetch
1693 1701
1694 1702 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1695 1703 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1696 1704 if (not legacyphase and hasbinaryphase):
1697 1705 kwargs['phases'] = True
1698 1706 pullop.stepsdone.add('phases')
1699 1707
1700 1708 if 'listkeys' in pullop.remotebundle2caps:
1701 1709 if 'phases' not in pullop.stepsdone:
1702 1710 kwargs['listkeys'] = ['phases']
1703 1711
1704 1712 bookmarksrequested = False
1705 1713 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1706 1714 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1707 1715
1708 1716 if pullop.remotebookmarks is not None:
1709 1717 pullop.stepsdone.add('request-bookmarks')
1710 1718
1711 1719 if ('request-bookmarks' not in pullop.stepsdone
1712 1720 and pullop.remotebookmarks is None
1713 1721 and not legacybookmark and hasbinarybook):
1714 1722 kwargs['bookmarks'] = True
1715 1723 bookmarksrequested = True
1716 1724
1717 1725 if 'listkeys' in pullop.remotebundle2caps:
1718 1726 if 'request-bookmarks' not in pullop.stepsdone:
1719 1727 # make sure to always includes bookmark data when migrating
1720 1728 # `hg incoming --bundle` to using this function.
1721 1729 pullop.stepsdone.add('request-bookmarks')
1722 1730 kwargs.setdefault('listkeys', []).append('bookmarks')
1723 1731
1724 1732 # If this is a full pull / clone and the server supports the clone bundles
1725 1733 # feature, tell the server whether we attempted a clone bundle. The
1726 1734 # presence of this flag indicates the client supports clone bundles. This
1727 1735 # will enable the server to treat clients that support clone bundles
1728 1736 # differently from those that don't.
1729 1737 if (pullop.remote.capable('clonebundles')
1730 1738 and pullop.heads is None and list(pullop.common) == [nullid]):
1731 1739 kwargs['cbattempted'] = pullop.clonebundleattempted
1732 1740
1733 1741 if streaming:
1734 1742 pullop.repo.ui.status(_('streaming all changes\n'))
1735 1743 elif not pullop.fetch:
1736 1744 pullop.repo.ui.status(_("no changes found\n"))
1737 1745 pullop.cgresult = 0
1738 1746 else:
1739 1747 if pullop.heads is None and list(pullop.common) == [nullid]:
1740 1748 pullop.repo.ui.status(_("requesting all changes\n"))
1741 1749 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1742 1750 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1743 1751 if obsolete.commonversion(remoteversions) is not None:
1744 1752 kwargs['obsmarkers'] = True
1745 1753 pullop.stepsdone.add('obsmarkers')
1746 1754 _pullbundle2extraprepare(pullop, kwargs)
1747 1755
1748 1756 with pullop.remote.commandexecutor() as e:
1749 1757 args = dict(kwargs)
1750 1758 args['source'] = 'pull'
1751 1759 bundle = e.callcommand('getbundle', args).result()
1752 1760
1753 1761 try:
1754 1762 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1755 1763 source='pull')
1756 1764 op.modes['bookmarks'] = 'records'
1757 1765 bundle2.processbundle(pullop.repo, bundle, op=op)
1758 1766 except bundle2.AbortFromPart as exc:
1759 1767 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1760 1768 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1761 1769 except error.BundleValueError as exc:
1762 1770 raise error.Abort(_('missing support for %s') % exc)
1763 1771
1764 1772 if pullop.fetch:
1765 1773 pullop.cgresult = bundle2.combinechangegroupresults(op)
1766 1774
1767 1775 # processing phases change
1768 1776 for namespace, value in op.records['listkeys']:
1769 1777 if namespace == 'phases':
1770 1778 _pullapplyphases(pullop, value)
1771 1779
1772 1780 # processing bookmark update
1773 1781 if bookmarksrequested:
1774 1782 books = {}
1775 1783 for record in op.records['bookmarks']:
1776 1784 books[record['bookmark']] = record["node"]
1777 1785 pullop.remotebookmarks = books
1778 1786 else:
1779 1787 for namespace, value in op.records['listkeys']:
1780 1788 if namespace == 'bookmarks':
1781 1789 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1782 1790
1783 1791 # bookmark data were either already there or pulled in the bundle
1784 1792 if pullop.remotebookmarks is not None:
1785 1793 _pullbookmarks(pullop)
1786 1794
1787 1795 def _pullbundle2extraprepare(pullop, kwargs):
1788 1796 """hook function so that extensions can extend the getbundle call"""
1789 1797
1790 1798 def _pullchangeset(pullop):
1791 1799 """pull changeset from unbundle into the local repo"""
1792 1800 # We delay the open of the transaction as late as possible so we
1793 1801 # don't open transaction for nothing or you break future useful
1794 1802 # rollback call
1795 1803 if 'changegroup' in pullop.stepsdone:
1796 1804 return
1797 1805 pullop.stepsdone.add('changegroup')
1798 1806 if not pullop.fetch:
1799 1807 pullop.repo.ui.status(_("no changes found\n"))
1800 1808 pullop.cgresult = 0
1801 1809 return
1802 1810 tr = pullop.gettransaction()
1803 1811 if pullop.heads is None and list(pullop.common) == [nullid]:
1804 1812 pullop.repo.ui.status(_("requesting all changes\n"))
1805 1813 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1806 1814 # issue1320, avoid a race if remote changed after discovery
1807 1815 pullop.heads = pullop.rheads
1808 1816
1809 1817 if pullop.remote.capable('getbundle'):
1810 1818 # TODO: get bundlecaps from remote
1811 1819 cg = pullop.remote.getbundle('pull', common=pullop.common,
1812 1820 heads=pullop.heads or pullop.rheads)
1813 1821 elif pullop.heads is None:
1814 1822 with pullop.remote.commandexecutor() as e:
1815 1823 cg = e.callcommand('changegroup', {
1816 1824 'nodes': pullop.fetch,
1817 1825 'source': 'pull',
1818 1826 }).result()
1819 1827
1820 1828 elif not pullop.remote.capable('changegroupsubset'):
1821 1829 raise error.Abort(_("partial pull cannot be done because "
1822 1830 "other repository doesn't support "
1823 1831 "changegroupsubset."))
1824 1832 else:
1825 1833 with pullop.remote.commandexecutor() as e:
1826 1834 cg = e.callcommand('changegroupsubset', {
1827 1835 'bases': pullop.fetch,
1828 1836 'heads': pullop.heads,
1829 1837 'source': 'pull',
1830 1838 }).result()
1831 1839
1832 1840 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1833 1841 pullop.remote.url())
1834 1842 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1835 1843
1836 1844 def _pullphase(pullop):
1837 1845 # Get remote phases data from remote
1838 1846 if 'phases' in pullop.stepsdone:
1839 1847 return
1840 1848 remotephases = listkeys(pullop.remote, 'phases')
1841 1849 _pullapplyphases(pullop, remotephases)
1842 1850
1843 1851 def _pullapplyphases(pullop, remotephases):
1844 1852 """apply phase movement from observed remote state"""
1845 1853 if 'phases' in pullop.stepsdone:
1846 1854 return
1847 1855 pullop.stepsdone.add('phases')
1848 1856 publishing = bool(remotephases.get('publishing', False))
1849 1857 if remotephases and not publishing:
1850 1858 # remote is new and non-publishing
1851 1859 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1852 1860 pullop.pulledsubset,
1853 1861 remotephases)
1854 1862 dheads = pullop.pulledsubset
1855 1863 else:
1856 1864 # Remote is old or publishing all common changesets
1857 1865 # should be seen as public
1858 1866 pheads = pullop.pulledsubset
1859 1867 dheads = []
1860 1868 unfi = pullop.repo.unfiltered()
1861 1869 phase = unfi._phasecache.phase
1862 1870 rev = unfi.changelog.nodemap.get
1863 1871 public = phases.public
1864 1872 draft = phases.draft
1865 1873
1866 1874 # exclude changesets already public locally and update the others
1867 1875 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1868 1876 if pheads:
1869 1877 tr = pullop.gettransaction()
1870 1878 phases.advanceboundary(pullop.repo, tr, public, pheads)
1871 1879
1872 1880 # exclude changesets already draft locally and update the others
1873 1881 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1874 1882 if dheads:
1875 1883 tr = pullop.gettransaction()
1876 1884 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1877 1885
1878 1886 def _pullbookmarks(pullop):
1879 1887 """process the remote bookmark information to update the local one"""
1880 1888 if 'bookmarks' in pullop.stepsdone:
1881 1889 return
1882 1890 pullop.stepsdone.add('bookmarks')
1883 1891 repo = pullop.repo
1884 1892 remotebookmarks = pullop.remotebookmarks
1885 1893 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1886 1894 pullop.remote.url(),
1887 1895 pullop.gettransaction,
1888 1896 explicit=pullop.explicitbookmarks)
1889 1897
1890 1898 def _pullobsolete(pullop):
1891 1899 """utility function to pull obsolete markers from a remote
1892 1900
1893 1901 The `gettransaction` is function that return the pull transaction, creating
1894 1902 one if necessary. We return the transaction to inform the calling code that
1895 1903 a new transaction have been created (when applicable).
1896 1904
1897 1905 Exists mostly to allow overriding for experimentation purpose"""
1898 1906 if 'obsmarkers' in pullop.stepsdone:
1899 1907 return
1900 1908 pullop.stepsdone.add('obsmarkers')
1901 1909 tr = None
1902 1910 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1903 1911 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1904 1912 remoteobs = listkeys(pullop.remote, 'obsolete')
1905 1913 if 'dump0' in remoteobs:
1906 1914 tr = pullop.gettransaction()
1907 1915 markers = []
1908 1916 for key in sorted(remoteobs, reverse=True):
1909 1917 if key.startswith('dump'):
1910 1918 data = util.b85decode(remoteobs[key])
1911 1919 version, newmarks = obsolete._readmarkers(data)
1912 1920 markers += newmarks
1913 1921 if markers:
1914 1922 pullop.repo.obsstore.add(tr, markers)
1915 1923 pullop.repo.invalidatevolatilesets()
1916 1924 return tr
1917 1925
1918 1926 def applynarrowacl(repo, kwargs):
1919 1927 """Apply narrow fetch access control.
1920 1928
1921 1929 This massages the named arguments for getbundle wire protocol commands
1922 1930 so requested data is filtered through access control rules.
1923 1931 """
1924 1932 ui = repo.ui
1925 1933 # TODO this assumes existence of HTTP and is a layering violation.
1926 1934 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1927 1935 user_includes = ui.configlist(
1928 1936 _NARROWACL_SECTION, username + '.includes',
1929 1937 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1930 1938 user_excludes = ui.configlist(
1931 1939 _NARROWACL_SECTION, username + '.excludes',
1932 1940 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1933 1941 if not user_includes:
1934 1942 raise error.Abort(_("{} configuration for user {} is empty")
1935 1943 .format(_NARROWACL_SECTION, username))
1936 1944
1937 1945 user_includes = [
1938 1946 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1939 1947 user_excludes = [
1940 1948 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1941 1949
1942 1950 req_includes = set(kwargs.get(r'includepats', []))
1943 1951 req_excludes = set(kwargs.get(r'excludepats', []))
1944 1952
1945 1953 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1946 1954 req_includes, req_excludes, user_includes, user_excludes)
1947 1955
1948 1956 if invalid_includes:
1949 1957 raise error.Abort(
1950 1958 _("The following includes are not accessible for {}: {}")
1951 1959 .format(username, invalid_includes))
1952 1960
1953 1961 new_args = {}
1954 1962 new_args.update(kwargs)
1955 1963 new_args[r'narrow'] = True
1956 1964 new_args[r'narrow_acl'] = True
1957 1965 new_args[r'includepats'] = req_includes
1958 1966 if req_excludes:
1959 1967 new_args[r'excludepats'] = req_excludes
1960 1968
1961 1969 return new_args
1962 1970
1963 1971 def _computeellipsis(repo, common, heads, known, match, depth=None):
1964 1972 """Compute the shape of a narrowed DAG.
1965 1973
1966 1974 Args:
1967 1975 repo: The repository we're transferring.
1968 1976 common: The roots of the DAG range we're transferring.
1969 1977 May be just [nullid], which means all ancestors of heads.
1970 1978 heads: The heads of the DAG range we're transferring.
1971 1979 match: The narrowmatcher that allows us to identify relevant changes.
1972 1980 depth: If not None, only consider nodes to be full nodes if they are at
1973 1981 most depth changesets away from one of heads.
1974 1982
1975 1983 Returns:
1976 1984 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1977 1985
1978 1986 visitnodes: The list of nodes (either full or ellipsis) which
1979 1987 need to be sent to the client.
1980 1988 relevant_nodes: The set of changelog nodes which change a file inside
1981 1989 the narrowspec. The client needs these as non-ellipsis nodes.
1982 1990 ellipsisroots: A dict of {rev: parents} that is used in
1983 1991 narrowchangegroup to produce ellipsis nodes with the
1984 1992 correct parents.
1985 1993 """
1986 1994 cl = repo.changelog
1987 1995 mfl = repo.manifestlog
1988 1996
1989 1997 clrev = cl.rev
1990 1998
1991 1999 commonrevs = {clrev(n) for n in common} | {nullrev}
1992 2000 headsrevs = {clrev(n) for n in heads}
1993 2001
1994 2002 if depth:
1995 2003 revdepth = {h: 0 for h in headsrevs}
1996 2004
1997 2005 ellipsisheads = collections.defaultdict(set)
1998 2006 ellipsisroots = collections.defaultdict(set)
1999 2007
2000 2008 def addroot(head, curchange):
2001 2009 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2002 2010 ellipsisroots[head].add(curchange)
2003 2011 # Recursively split ellipsis heads with 3 roots by finding the
2004 2012 # roots' youngest common descendant which is an elided merge commit.
2005 2013 # That descendant takes 2 of the 3 roots as its own, and becomes a
2006 2014 # root of the head.
2007 2015 while len(ellipsisroots[head]) > 2:
2008 2016 child, roots = splithead(head)
2009 2017 splitroots(head, child, roots)
2010 2018 head = child # Recurse in case we just added a 3rd root
2011 2019
2012 2020 def splitroots(head, child, roots):
2013 2021 ellipsisroots[head].difference_update(roots)
2014 2022 ellipsisroots[head].add(child)
2015 2023 ellipsisroots[child].update(roots)
2016 2024 ellipsisroots[child].discard(child)
2017 2025
2018 2026 def splithead(head):
2019 2027 r1, r2, r3 = sorted(ellipsisroots[head])
2020 2028 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2021 2029 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
2022 2030 nr1, head, nr2, head)
2023 2031 for j in mid:
2024 2032 if j == nr2:
2025 2033 return nr2, (nr1, nr2)
2026 2034 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2027 2035 return j, (nr1, nr2)
2028 2036 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
2029 2037 'roots: %d %d %d') % (head, r1, r2, r3))
2030 2038
2031 2039 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2032 2040 visit = reversed(missing)
2033 2041 relevant_nodes = set()
2034 2042 visitnodes = [cl.node(m) for m in missing]
2035 2043 required = set(headsrevs) | known
2036 2044 for rev in visit:
2037 2045 clrev = cl.changelogrevision(rev)
2038 2046 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2039 2047 if depth is not None:
2040 2048 curdepth = revdepth[rev]
2041 2049 for p in ps:
2042 2050 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2043 2051 needed = False
2044 2052 shallow_enough = depth is None or revdepth[rev] <= depth
2045 2053 if shallow_enough:
2046 2054 curmf = mfl[clrev.manifest].read()
2047 2055 if ps:
2048 2056 # We choose to not trust the changed files list in
2049 2057 # changesets because it's not always correct. TODO: could
2050 2058 # we trust it for the non-merge case?
2051 2059 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2052 2060 needed = bool(curmf.diff(p1mf, match))
2053 2061 if not needed and len(ps) > 1:
2054 2062 # For merge changes, the list of changed files is not
2055 2063 # helpful, since we need to emit the merge if a file
2056 2064 # in the narrow spec has changed on either side of the
2057 2065 # merge. As a result, we do a manifest diff to check.
2058 2066 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2059 2067 needed = bool(curmf.diff(p2mf, match))
2060 2068 else:
2061 2069 # For a root node, we need to include the node if any
2062 2070 # files in the node match the narrowspec.
2063 2071 needed = any(curmf.walk(match))
2064 2072
2065 2073 if needed:
2066 2074 for head in ellipsisheads[rev]:
2067 2075 addroot(head, rev)
2068 2076 for p in ps:
2069 2077 required.add(p)
2070 2078 relevant_nodes.add(cl.node(rev))
2071 2079 else:
2072 2080 if not ps:
2073 2081 ps = [nullrev]
2074 2082 if rev in required:
2075 2083 for head in ellipsisheads[rev]:
2076 2084 addroot(head, rev)
2077 2085 for p in ps:
2078 2086 ellipsisheads[p].add(rev)
2079 2087 else:
2080 2088 for p in ps:
2081 2089 ellipsisheads[p] |= ellipsisheads[rev]
2082 2090
2083 2091 # add common changesets as roots of their reachable ellipsis heads
2084 2092 for c in commonrevs:
2085 2093 for head in ellipsisheads[c]:
2086 2094 addroot(head, c)
2087 2095 return visitnodes, relevant_nodes, ellipsisroots
2088 2096
2089 2097 def caps20to10(repo, role):
2090 2098 """return a set with appropriate options to use bundle20 during getbundle"""
2091 2099 caps = {'HG20'}
2092 2100 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2093 2101 caps.add('bundle2=' + urlreq.quote(capsblob))
2094 2102 return caps
2095 2103
2096 2104 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2097 2105 getbundle2partsorder = []
2098 2106
2099 2107 # Mapping between step name and function
2100 2108 #
2101 2109 # This exists to help extensions wrap steps if necessary
2102 2110 getbundle2partsmapping = {}
2103 2111
2104 2112 def getbundle2partsgenerator(stepname, idx=None):
2105 2113 """decorator for function generating bundle2 part for getbundle
2106 2114
2107 2115 The function is added to the step -> function mapping and appended to the
2108 2116 list of steps. Beware that decorated functions will be added in order
2109 2117 (this may matter).
2110 2118
2111 2119 You can only use this decorator for new steps, if you want to wrap a step
2112 2120 from an extension, attack the getbundle2partsmapping dictionary directly."""
2113 2121 def dec(func):
2114 2122 assert stepname not in getbundle2partsmapping
2115 2123 getbundle2partsmapping[stepname] = func
2116 2124 if idx is None:
2117 2125 getbundle2partsorder.append(stepname)
2118 2126 else:
2119 2127 getbundle2partsorder.insert(idx, stepname)
2120 2128 return func
2121 2129 return dec
2122 2130
2123 2131 def bundle2requested(bundlecaps):
2124 2132 if bundlecaps is not None:
2125 2133 return any(cap.startswith('HG2') for cap in bundlecaps)
2126 2134 return False
2127 2135
2128 2136 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2129 2137 **kwargs):
2130 2138 """Return chunks constituting a bundle's raw data.
2131 2139
2132 2140 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2133 2141 passed.
2134 2142
2135 2143 Returns a 2-tuple of a dict with metadata about the generated bundle
2136 2144 and an iterator over raw chunks (of varying sizes).
2137 2145 """
2138 2146 kwargs = pycompat.byteskwargs(kwargs)
2139 2147 info = {}
2140 2148 usebundle2 = bundle2requested(bundlecaps)
2141 2149 # bundle10 case
2142 2150 if not usebundle2:
2143 2151 if bundlecaps and not kwargs.get('cg', True):
2144 2152 raise ValueError(_('request for bundle10 must include changegroup'))
2145 2153
2146 2154 if kwargs:
2147 2155 raise ValueError(_('unsupported getbundle arguments: %s')
2148 2156 % ', '.join(sorted(kwargs.keys())))
2149 2157 outgoing = _computeoutgoing(repo, heads, common)
2150 2158 info['bundleversion'] = 1
2151 2159 return info, changegroup.makestream(repo, outgoing, '01', source,
2152 2160 bundlecaps=bundlecaps)
2153 2161
2154 2162 # bundle20 case
2155 2163 info['bundleversion'] = 2
2156 2164 b2caps = {}
2157 2165 for bcaps in bundlecaps:
2158 2166 if bcaps.startswith('bundle2='):
2159 2167 blob = urlreq.unquote(bcaps[len('bundle2='):])
2160 2168 b2caps.update(bundle2.decodecaps(blob))
2161 2169 bundler = bundle2.bundle20(repo.ui, b2caps)
2162 2170
2163 2171 kwargs['heads'] = heads
2164 2172 kwargs['common'] = common
2165 2173
2166 2174 for name in getbundle2partsorder:
2167 2175 func = getbundle2partsmapping[name]
2168 2176 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2169 2177 **pycompat.strkwargs(kwargs))
2170 2178
2171 2179 info['prefercompressed'] = bundler.prefercompressed
2172 2180
2173 2181 return info, bundler.getchunks()
2174 2182
2175 2183 @getbundle2partsgenerator('stream2')
2176 2184 def _getbundlestream2(bundler, repo, *args, **kwargs):
2177 2185 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2178 2186
2179 2187 @getbundle2partsgenerator('changegroup')
2180 2188 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2181 2189 b2caps=None, heads=None, common=None, **kwargs):
2182 2190 """add a changegroup part to the requested bundle"""
2183 2191 if not kwargs.get(r'cg', True):
2184 2192 return
2185 2193
2186 2194 version = '01'
2187 2195 cgversions = b2caps.get('changegroup')
2188 2196 if cgversions: # 3.1 and 3.2 ship with an empty value
2189 2197 cgversions = [v for v in cgversions
2190 2198 if v in changegroup.supportedoutgoingversions(repo)]
2191 2199 if not cgversions:
2192 2200 raise error.Abort(_('no common changegroup version'))
2193 2201 version = max(cgversions)
2194 2202
2195 2203 outgoing = _computeoutgoing(repo, heads, common)
2196 2204 if not outgoing.missing:
2197 2205 return
2198 2206
2199 2207 if kwargs.get(r'narrow', False):
2200 2208 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2201 2209 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2202 2210 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2203 2211 else:
2204 2212 matcher = None
2205 2213
2206 2214 cgstream = changegroup.makestream(repo, outgoing, version, source,
2207 2215 bundlecaps=bundlecaps, matcher=matcher)
2208 2216
2209 2217 part = bundler.newpart('changegroup', data=cgstream)
2210 2218 if cgversions:
2211 2219 part.addparam('version', version)
2212 2220
2213 2221 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2214 2222 mandatory=False)
2215 2223
2216 2224 if 'treemanifest' in repo.requirements:
2217 2225 part.addparam('treemanifest', '1')
2218 2226
2219 2227 if (kwargs.get(r'narrow', False) and kwargs.get(r'narrow_acl', False)
2220 2228 and (include or exclude)):
2221 2229 # this is mandatory because otherwise ACL clients won't work
2222 2230 narrowspecpart = bundler.newpart('Narrow:responsespec')
2223 2231 narrowspecpart.data = '%s\0%s' % ('\n'.join(include),
2224 2232 '\n'.join(exclude))
2225 2233
2226 2234 @getbundle2partsgenerator('bookmarks')
2227 2235 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2228 2236 b2caps=None, **kwargs):
2229 2237 """add a bookmark part to the requested bundle"""
2230 2238 if not kwargs.get(r'bookmarks', False):
2231 2239 return
2232 2240 if 'bookmarks' not in b2caps:
2233 2241 raise error.Abort(_('no common bookmarks exchange method'))
2234 2242 books = bookmod.listbinbookmarks(repo)
2235 2243 data = bookmod.binaryencode(books)
2236 2244 if data:
2237 2245 bundler.newpart('bookmarks', data=data)
2238 2246
2239 2247 @getbundle2partsgenerator('listkeys')
2240 2248 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2241 2249 b2caps=None, **kwargs):
2242 2250 """add parts containing listkeys namespaces to the requested bundle"""
2243 2251 listkeys = kwargs.get(r'listkeys', ())
2244 2252 for namespace in listkeys:
2245 2253 part = bundler.newpart('listkeys')
2246 2254 part.addparam('namespace', namespace)
2247 2255 keys = repo.listkeys(namespace).items()
2248 2256 part.data = pushkey.encodekeys(keys)
2249 2257
2250 2258 @getbundle2partsgenerator('obsmarkers')
2251 2259 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2252 2260 b2caps=None, heads=None, **kwargs):
2253 2261 """add an obsolescence markers part to the requested bundle"""
2254 2262 if kwargs.get(r'obsmarkers', False):
2255 2263 if heads is None:
2256 2264 heads = repo.heads()
2257 2265 subset = [c.node() for c in repo.set('::%ln', heads)]
2258 2266 markers = repo.obsstore.relevantmarkers(subset)
2259 2267 markers = sorted(markers)
2260 2268 bundle2.buildobsmarkerspart(bundler, markers)
2261 2269
2262 2270 @getbundle2partsgenerator('phases')
2263 2271 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2264 2272 b2caps=None, heads=None, **kwargs):
2265 2273 """add phase heads part to the requested bundle"""
2266 2274 if kwargs.get(r'phases', False):
2267 2275 if not 'heads' in b2caps.get('phases'):
2268 2276 raise error.Abort(_('no common phases exchange method'))
2269 2277 if heads is None:
2270 2278 heads = repo.heads()
2271 2279
2272 2280 headsbyphase = collections.defaultdict(set)
2273 2281 if repo.publishing():
2274 2282 headsbyphase[phases.public] = heads
2275 2283 else:
2276 2284 # find the appropriate heads to move
2277 2285
2278 2286 phase = repo._phasecache.phase
2279 2287 node = repo.changelog.node
2280 2288 rev = repo.changelog.rev
2281 2289 for h in heads:
2282 2290 headsbyphase[phase(repo, rev(h))].add(h)
2283 2291 seenphases = list(headsbyphase.keys())
2284 2292
2285 2293 # We do not handle anything but public and draft phase for now)
2286 2294 if seenphases:
2287 2295 assert max(seenphases) <= phases.draft
2288 2296
2289 2297 # if client is pulling non-public changesets, we need to find
2290 2298 # intermediate public heads.
2291 2299 draftheads = headsbyphase.get(phases.draft, set())
2292 2300 if draftheads:
2293 2301 publicheads = headsbyphase.get(phases.public, set())
2294 2302
2295 2303 revset = 'heads(only(%ln, %ln) and public())'
2296 2304 extraheads = repo.revs(revset, draftheads, publicheads)
2297 2305 for r in extraheads:
2298 2306 headsbyphase[phases.public].add(node(r))
2299 2307
2300 2308 # transform data in a format used by the encoding function
2301 2309 phasemapping = []
2302 2310 for phase in phases.allphases:
2303 2311 phasemapping.append(sorted(headsbyphase[phase]))
2304 2312
2305 2313 # generate the actual part
2306 2314 phasedata = phases.binaryencode(phasemapping)
2307 2315 bundler.newpart('phase-heads', data=phasedata)
2308 2316
2309 2317 @getbundle2partsgenerator('hgtagsfnodes')
2310 2318 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2311 2319 b2caps=None, heads=None, common=None,
2312 2320 **kwargs):
2313 2321 """Transfer the .hgtags filenodes mapping.
2314 2322
2315 2323 Only values for heads in this bundle will be transferred.
2316 2324
2317 2325 The part data consists of pairs of 20 byte changeset node and .hgtags
2318 2326 filenodes raw values.
2319 2327 """
2320 2328 # Don't send unless:
2321 2329 # - changeset are being exchanged,
2322 2330 # - the client supports it.
2323 2331 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2324 2332 return
2325 2333
2326 2334 outgoing = _computeoutgoing(repo, heads, common)
2327 2335 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2328 2336
2329 2337 @getbundle2partsgenerator('cache:rev-branch-cache')
2330 2338 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2331 2339 b2caps=None, heads=None, common=None,
2332 2340 **kwargs):
2333 2341 """Transfer the rev-branch-cache mapping
2334 2342
2335 2343 The payload is a series of data related to each branch
2336 2344
2337 2345 1) branch name length
2338 2346 2) number of open heads
2339 2347 3) number of closed heads
2340 2348 4) open heads nodes
2341 2349 5) closed heads nodes
2342 2350 """
2343 2351 # Don't send unless:
2344 2352 # - changeset are being exchanged,
2345 2353 # - the client supports it.
2346 2354 # - narrow bundle isn't in play (not currently compatible).
2347 2355 if (not kwargs.get(r'cg', True)
2348 2356 or 'rev-branch-cache' not in b2caps
2349 2357 or kwargs.get(r'narrow', False)
2350 2358 or repo.ui.has_section(_NARROWACL_SECTION)):
2351 2359 return
2352 2360
2353 2361 outgoing = _computeoutgoing(repo, heads, common)
2354 2362 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2355 2363
2356 2364 def check_heads(repo, their_heads, context):
2357 2365 """check if the heads of a repo have been modified
2358 2366
2359 2367 Used by peer for unbundling.
2360 2368 """
2361 2369 heads = repo.heads()
2362 2370 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2363 2371 if not (their_heads == ['force'] or their_heads == heads or
2364 2372 their_heads == ['hashed', heads_hash]):
2365 2373 # someone else committed/pushed/unbundled while we
2366 2374 # were transferring data
2367 2375 raise error.PushRaced('repository changed while %s - '
2368 2376 'please try again' % context)
2369 2377
2370 2378 def unbundle(repo, cg, heads, source, url):
2371 2379 """Apply a bundle to a repo.
2372 2380
2373 2381 this function makes sure the repo is locked during the application and have
2374 2382 mechanism to check that no push race occurred between the creation of the
2375 2383 bundle and its application.
2376 2384
2377 2385 If the push was raced as PushRaced exception is raised."""
2378 2386 r = 0
2379 2387 # need a transaction when processing a bundle2 stream
2380 2388 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2381 2389 lockandtr = [None, None, None]
2382 2390 recordout = None
2383 2391 # quick fix for output mismatch with bundle2 in 3.4
2384 2392 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2385 2393 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2386 2394 captureoutput = True
2387 2395 try:
2388 2396 # note: outside bundle1, 'heads' is expected to be empty and this
2389 2397 # 'check_heads' call wil be a no-op
2390 2398 check_heads(repo, heads, 'uploading changes')
2391 2399 # push can proceed
2392 2400 if not isinstance(cg, bundle2.unbundle20):
2393 2401 # legacy case: bundle1 (changegroup 01)
2394 2402 txnname = "\n".join([source, util.hidepassword(url)])
2395 2403 with repo.lock(), repo.transaction(txnname) as tr:
2396 2404 op = bundle2.applybundle(repo, cg, tr, source, url)
2397 2405 r = bundle2.combinechangegroupresults(op)
2398 2406 else:
2399 2407 r = None
2400 2408 try:
2401 2409 def gettransaction():
2402 2410 if not lockandtr[2]:
2403 2411 if not bookmod.bookmarksinstore(repo):
2404 2412 lockandtr[0] = repo.wlock()
2405 2413 lockandtr[1] = repo.lock()
2406 2414 lockandtr[2] = repo.transaction(source)
2407 2415 lockandtr[2].hookargs['source'] = source
2408 2416 lockandtr[2].hookargs['url'] = url
2409 2417 lockandtr[2].hookargs['bundle2'] = '1'
2410 2418 return lockandtr[2]
2411 2419
2412 2420 # Do greedy locking by default until we're satisfied with lazy
2413 2421 # locking.
2414 2422 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2415 2423 gettransaction()
2416 2424
2417 2425 op = bundle2.bundleoperation(repo, gettransaction,
2418 2426 captureoutput=captureoutput,
2419 2427 source='push')
2420 2428 try:
2421 2429 op = bundle2.processbundle(repo, cg, op=op)
2422 2430 finally:
2423 2431 r = op.reply
2424 2432 if captureoutput and r is not None:
2425 2433 repo.ui.pushbuffer(error=True, subproc=True)
2426 2434 def recordout(output):
2427 2435 r.newpart('output', data=output, mandatory=False)
2428 2436 if lockandtr[2] is not None:
2429 2437 lockandtr[2].close()
2430 2438 except BaseException as exc:
2431 2439 exc.duringunbundle2 = True
2432 2440 if captureoutput and r is not None:
2433 2441 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2434 2442 def recordout(output):
2435 2443 part = bundle2.bundlepart('output', data=output,
2436 2444 mandatory=False)
2437 2445 parts.append(part)
2438 2446 raise
2439 2447 finally:
2440 2448 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2441 2449 if recordout is not None:
2442 2450 recordout(repo.ui.popbuffer())
2443 2451 return r
2444 2452
2445 2453 def _maybeapplyclonebundle(pullop):
2446 2454 """Apply a clone bundle from a remote, if possible."""
2447 2455
2448 2456 repo = pullop.repo
2449 2457 remote = pullop.remote
2450 2458
2451 2459 if not repo.ui.configbool('ui', 'clonebundles'):
2452 2460 return
2453 2461
2454 2462 # Only run if local repo is empty.
2455 2463 if len(repo):
2456 2464 return
2457 2465
2458 2466 if pullop.heads:
2459 2467 return
2460 2468
2461 2469 if not remote.capable('clonebundles'):
2462 2470 return
2463 2471
2464 2472 with remote.commandexecutor() as e:
2465 2473 res = e.callcommand('clonebundles', {}).result()
2466 2474
2467 2475 # If we call the wire protocol command, that's good enough to record the
2468 2476 # attempt.
2469 2477 pullop.clonebundleattempted = True
2470 2478
2471 2479 entries = parseclonebundlesmanifest(repo, res)
2472 2480 if not entries:
2473 2481 repo.ui.note(_('no clone bundles available on remote; '
2474 2482 'falling back to regular clone\n'))
2475 2483 return
2476 2484
2477 2485 entries = filterclonebundleentries(
2478 2486 repo, entries, streamclonerequested=pullop.streamclonerequested)
2479 2487
2480 2488 if not entries:
2481 2489 # There is a thundering herd concern here. However, if a server
2482 2490 # operator doesn't advertise bundles appropriate for its clients,
2483 2491 # they deserve what's coming. Furthermore, from a client's
2484 2492 # perspective, no automatic fallback would mean not being able to
2485 2493 # clone!
2486 2494 repo.ui.warn(_('no compatible clone bundles available on server; '
2487 2495 'falling back to regular clone\n'))
2488 2496 repo.ui.warn(_('(you may want to report this to the server '
2489 2497 'operator)\n'))
2490 2498 return
2491 2499
2492 2500 entries = sortclonebundleentries(repo.ui, entries)
2493 2501
2494 2502 url = entries[0]['URL']
2495 2503 repo.ui.status(_('applying clone bundle from %s\n') % url)
2496 2504 if trypullbundlefromurl(repo.ui, repo, url):
2497 2505 repo.ui.status(_('finished applying clone bundle\n'))
2498 2506 # Bundle failed.
2499 2507 #
2500 2508 # We abort by default to avoid the thundering herd of
2501 2509 # clients flooding a server that was expecting expensive
2502 2510 # clone load to be offloaded.
2503 2511 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2504 2512 repo.ui.warn(_('falling back to normal clone\n'))
2505 2513 else:
2506 2514 raise error.Abort(_('error applying bundle'),
2507 2515 hint=_('if this error persists, consider contacting '
2508 2516 'the server operator or disable clone '
2509 2517 'bundles via '
2510 2518 '"--config ui.clonebundles=false"'))
2511 2519
2512 2520 def parseclonebundlesmanifest(repo, s):
2513 2521 """Parses the raw text of a clone bundles manifest.
2514 2522
2515 2523 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2516 2524 to the URL and other keys are the attributes for the entry.
2517 2525 """
2518 2526 m = []
2519 2527 for line in s.splitlines():
2520 2528 fields = line.split()
2521 2529 if not fields:
2522 2530 continue
2523 2531 attrs = {'URL': fields[0]}
2524 2532 for rawattr in fields[1:]:
2525 2533 key, value = rawattr.split('=', 1)
2526 2534 key = urlreq.unquote(key)
2527 2535 value = urlreq.unquote(value)
2528 2536 attrs[key] = value
2529 2537
2530 2538 # Parse BUNDLESPEC into components. This makes client-side
2531 2539 # preferences easier to specify since you can prefer a single
2532 2540 # component of the BUNDLESPEC.
2533 2541 if key == 'BUNDLESPEC':
2534 2542 try:
2535 2543 bundlespec = parsebundlespec(repo, value)
2536 2544 attrs['COMPRESSION'] = bundlespec.compression
2537 2545 attrs['VERSION'] = bundlespec.version
2538 2546 except error.InvalidBundleSpecification:
2539 2547 pass
2540 2548 except error.UnsupportedBundleSpecification:
2541 2549 pass
2542 2550
2543 2551 m.append(attrs)
2544 2552
2545 2553 return m
2546 2554
2547 2555 def isstreamclonespec(bundlespec):
2548 2556 # Stream clone v1
2549 2557 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2550 2558 return True
2551 2559
2552 2560 # Stream clone v2
2553 2561 if (bundlespec.wirecompression == 'UN' and
2554 2562 bundlespec.wireversion == '02' and
2555 2563 bundlespec.contentopts.get('streamv2')):
2556 2564 return True
2557 2565
2558 2566 return False
2559 2567
2560 2568 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2561 2569 """Remove incompatible clone bundle manifest entries.
2562 2570
2563 2571 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2564 2572 and returns a new list consisting of only the entries that this client
2565 2573 should be able to apply.
2566 2574
2567 2575 There is no guarantee we'll be able to apply all returned entries because
2568 2576 the metadata we use to filter on may be missing or wrong.
2569 2577 """
2570 2578 newentries = []
2571 2579 for entry in entries:
2572 2580 spec = entry.get('BUNDLESPEC')
2573 2581 if spec:
2574 2582 try:
2575 2583 bundlespec = parsebundlespec(repo, spec, strict=True)
2576 2584
2577 2585 # If a stream clone was requested, filter out non-streamclone
2578 2586 # entries.
2579 2587 if streamclonerequested and not isstreamclonespec(bundlespec):
2580 2588 repo.ui.debug('filtering %s because not a stream clone\n' %
2581 2589 entry['URL'])
2582 2590 continue
2583 2591
2584 2592 except error.InvalidBundleSpecification as e:
2585 2593 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2586 2594 continue
2587 2595 except error.UnsupportedBundleSpecification as e:
2588 2596 repo.ui.debug('filtering %s because unsupported bundle '
2589 2597 'spec: %s\n' % (
2590 2598 entry['URL'], stringutil.forcebytestr(e)))
2591 2599 continue
2592 2600 # If we don't have a spec and requested a stream clone, we don't know
2593 2601 # what the entry is so don't attempt to apply it.
2594 2602 elif streamclonerequested:
2595 2603 repo.ui.debug('filtering %s because cannot determine if a stream '
2596 2604 'clone bundle\n' % entry['URL'])
2597 2605 continue
2598 2606
2599 2607 if 'REQUIRESNI' in entry and not sslutil.hassni:
2600 2608 repo.ui.debug('filtering %s because SNI not supported\n' %
2601 2609 entry['URL'])
2602 2610 continue
2603 2611
2604 2612 newentries.append(entry)
2605 2613
2606 2614 return newentries
2607 2615
2608 2616 class clonebundleentry(object):
2609 2617 """Represents an item in a clone bundles manifest.
2610 2618
2611 2619 This rich class is needed to support sorting since sorted() in Python 3
2612 2620 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2613 2621 won't work.
2614 2622 """
2615 2623
2616 2624 def __init__(self, value, prefers):
2617 2625 self.value = value
2618 2626 self.prefers = prefers
2619 2627
2620 2628 def _cmp(self, other):
2621 2629 for prefkey, prefvalue in self.prefers:
2622 2630 avalue = self.value.get(prefkey)
2623 2631 bvalue = other.value.get(prefkey)
2624 2632
2625 2633 # Special case for b missing attribute and a matches exactly.
2626 2634 if avalue is not None and bvalue is None and avalue == prefvalue:
2627 2635 return -1
2628 2636
2629 2637 # Special case for a missing attribute and b matches exactly.
2630 2638 if bvalue is not None and avalue is None and bvalue == prefvalue:
2631 2639 return 1
2632 2640
2633 2641 # We can't compare unless attribute present on both.
2634 2642 if avalue is None or bvalue is None:
2635 2643 continue
2636 2644
2637 2645 # Same values should fall back to next attribute.
2638 2646 if avalue == bvalue:
2639 2647 continue
2640 2648
2641 2649 # Exact matches come first.
2642 2650 if avalue == prefvalue:
2643 2651 return -1
2644 2652 if bvalue == prefvalue:
2645 2653 return 1
2646 2654
2647 2655 # Fall back to next attribute.
2648 2656 continue
2649 2657
2650 2658 # If we got here we couldn't sort by attributes and prefers. Fall
2651 2659 # back to index order.
2652 2660 return 0
2653 2661
2654 2662 def __lt__(self, other):
2655 2663 return self._cmp(other) < 0
2656 2664
2657 2665 def __gt__(self, other):
2658 2666 return self._cmp(other) > 0
2659 2667
2660 2668 def __eq__(self, other):
2661 2669 return self._cmp(other) == 0
2662 2670
2663 2671 def __le__(self, other):
2664 2672 return self._cmp(other) <= 0
2665 2673
2666 2674 def __ge__(self, other):
2667 2675 return self._cmp(other) >= 0
2668 2676
2669 2677 def __ne__(self, other):
2670 2678 return self._cmp(other) != 0
2671 2679
2672 2680 def sortclonebundleentries(ui, entries):
2673 2681 prefers = ui.configlist('ui', 'clonebundleprefers')
2674 2682 if not prefers:
2675 2683 return list(entries)
2676 2684
2677 2685 prefers = [p.split('=', 1) for p in prefers]
2678 2686
2679 2687 items = sorted(clonebundleentry(v, prefers) for v in entries)
2680 2688 return [i.value for i in items]
2681 2689
2682 2690 def trypullbundlefromurl(ui, repo, url):
2683 2691 """Attempt to apply a bundle from a URL."""
2684 2692 with repo.lock(), repo.transaction('bundleurl') as tr:
2685 2693 try:
2686 2694 fh = urlmod.open(ui, url)
2687 2695 cg = readbundle(ui, fh, 'stream')
2688 2696
2689 2697 if isinstance(cg, streamclone.streamcloneapplier):
2690 2698 cg.apply(repo)
2691 2699 else:
2692 2700 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2693 2701 return True
2694 2702 except urlerr.httperror as e:
2695 2703 ui.warn(_('HTTP error fetching bundle: %s\n') %
2696 2704 stringutil.forcebytestr(e))
2697 2705 except urlerr.urlerror as e:
2698 2706 ui.warn(_('error fetching bundle: %s\n') %
2699 2707 stringutil.forcebytestr(e.reason))
2700 2708
2701 2709 return False
@@ -1,1382 +1,1352 b''
1 1 #testcases b2-pushkey b2-binary
2 2
3 3 #if b2-pushkey
4 4 $ cat << EOF >> $HGRCPATH
5 5 > [devel]
6 6 > legacy.exchange=bookmarks
7 7 > EOF
8 8 #endif
9 9
10 10 #require serve
11 11
12 12 $ cat << EOF >> $HGRCPATH
13 13 > [ui]
14 14 > logtemplate={rev}:{node|short} {desc|firstline}
15 15 > [phases]
16 16 > publish=False
17 17 > [experimental]
18 18 > evolution.createmarkers=True
19 19 > evolution.exchange=True
20 20 > EOF
21 21
22 22 $ cat > $TESTTMP/hook.sh <<'EOF'
23 23 > echo "test-hook-bookmark: $HG_BOOKMARK: $HG_OLDNODE -> $HG_NODE"
24 24 > EOF
25 25 $ TESTHOOK="hooks.txnclose-bookmark.test=sh $TESTTMP/hook.sh"
26 26
27 27 initialize
28 28
29 29 $ hg init a
30 30 $ cd a
31 31 $ echo 'test' > test
32 32 $ hg commit -Am'test'
33 33 adding test
34 34
35 35 set bookmarks
36 36
37 37 $ hg bookmark X
38 38 $ hg bookmark Y
39 39 $ hg bookmark Z
40 40
41 41 import bookmark by name
42 42
43 43 $ hg init ../b
44 44 $ cd ../b
45 45 $ hg book Y
46 46 $ hg book
47 47 * Y -1:000000000000
48 48 $ hg pull ../a --config "$TESTHOOK"
49 49 pulling from ../a
50 50 requesting all changes
51 51 adding changesets
52 52 adding manifests
53 53 adding file changes
54 54 added 1 changesets with 1 changes to 1 files
55 55 adding remote bookmark X
56 56 updating bookmark Y
57 57 adding remote bookmark Z
58 58 new changesets 4e3505fd9583 (1 drafts)
59 59 test-hook-bookmark: X: -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
60 60 test-hook-bookmark: Y: 0000000000000000000000000000000000000000 -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
61 61 test-hook-bookmark: Z: -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
62 62 (run 'hg update' to get a working copy)
63 63 $ hg bookmarks
64 64 X 0:4e3505fd9583
65 65 * Y 0:4e3505fd9583
66 66 Z 0:4e3505fd9583
67 67 $ hg debugpushkey ../a namespaces
68 68 bookmarks
69 69 namespaces
70 70 obsolete
71 71 phases
72 72 $ hg debugpushkey ../a bookmarks
73 73 X 4e3505fd95835d721066b76e75dbb8cc554d7f77
74 74 Y 4e3505fd95835d721066b76e75dbb8cc554d7f77
75 75 Z 4e3505fd95835d721066b76e75dbb8cc554d7f77
76 76
77 77 delete the bookmark to re-pull it
78 78
79 79 $ hg book -d X
80 80 $ hg pull -B X ../a
81 81 pulling from ../a
82 82 no changes found
83 83 adding remote bookmark X
84 84
85 85 finally no-op pull
86 86
87 87 $ hg pull -B X ../a
88 88 pulling from ../a
89 89 no changes found
90 90 $ hg bookmark
91 91 X 0:4e3505fd9583
92 92 * Y 0:4e3505fd9583
93 93 Z 0:4e3505fd9583
94 94
95 95 export bookmark by name
96 96
97 97 $ hg bookmark W
98 98 $ hg bookmark foo
99 99 $ hg bookmark foobar
100 100 $ hg push -B W ../a
101 101 pushing to ../a
102 102 searching for changes
103 103 no changes found
104 104 exporting bookmark W
105 105 [1]
106 106 $ hg -R ../a bookmarks
107 107 W -1:000000000000
108 108 X 0:4e3505fd9583
109 109 Y 0:4e3505fd9583
110 110 * Z 0:4e3505fd9583
111 111
112 112 delete a remote bookmark
113 113
114 114 $ hg book -d W
115 115
116 116 #if b2-pushkey
117 117
118 118 $ hg push -B W ../a --config "$TESTHOOK" --debug --config devel.bundle2.debug=yes
119 119 pushing to ../a
120 120 query 1; heads
121 121 searching for changes
122 122 all remote heads known locally
123 123 listing keys for "phases"
124 124 checking for updated bookmarks
125 125 listing keys for "bookmarks"
126 126 no changes found
127 127 bundle2-output-bundle: "HG20", 4 parts total
128 128 bundle2-output: start emission of HG20 stream
129 129 bundle2-output: bundle parameter:
130 130 bundle2-output: start of parts
131 131 bundle2-output: bundle part: "replycaps"
132 132 bundle2-output-part: "replycaps" 222 bytes payload
133 133 bundle2-output: part 0: "REPLYCAPS"
134 134 bundle2-output: header chunk size: 16
135 135 bundle2-output: payload chunk size: 222
136 136 bundle2-output: closing payload chunk
137 137 bundle2-output: bundle part: "check:bookmarks"
138 138 bundle2-output-part: "check:bookmarks" 23 bytes payload
139 139 bundle2-output: part 1: "CHECK:BOOKMARKS"
140 140 bundle2-output: header chunk size: 22
141 141 bundle2-output: payload chunk size: 23
142 142 bundle2-output: closing payload chunk
143 143 bundle2-output: bundle part: "check:phases"
144 144 bundle2-output-part: "check:phases" 24 bytes payload
145 145 bundle2-output: part 2: "CHECK:PHASES"
146 146 bundle2-output: header chunk size: 19
147 147 bundle2-output: payload chunk size: 24
148 148 bundle2-output: closing payload chunk
149 149 bundle2-output: bundle part: "pushkey"
150 150 bundle2-output-part: "pushkey" (params: 4 mandatory) empty payload
151 151 bundle2-output: part 3: "PUSHKEY"
152 152 bundle2-output: header chunk size: 90
153 153 bundle2-output: closing payload chunk
154 154 bundle2-output: end of bundle
155 155 bundle2-input: start processing of HG20 stream
156 156 bundle2-input: reading bundle2 stream parameters
157 157 bundle2-input-bundle: with-transaction
158 158 bundle2-input: start extraction of bundle2 parts
159 159 bundle2-input: part header size: 16
160 160 bundle2-input: part type: "REPLYCAPS"
161 161 bundle2-input: part id: "0"
162 162 bundle2-input: part parameters: 0
163 163 bundle2-input: found a handler for part replycaps
164 164 bundle2-input-part: "replycaps" supported
165 165 bundle2-input: payload chunk size: 222
166 166 bundle2-input: payload chunk size: 0
167 167 bundle2-input-part: total payload size 222
168 168 bundle2-input: part header size: 22
169 169 bundle2-input: part type: "CHECK:BOOKMARKS"
170 170 bundle2-input: part id: "1"
171 171 bundle2-input: part parameters: 0
172 172 bundle2-input: found a handler for part check:bookmarks
173 173 bundle2-input-part: "check:bookmarks" supported
174 174 bundle2-input: payload chunk size: 23
175 175 bundle2-input: payload chunk size: 0
176 176 bundle2-input-part: total payload size 23
177 177 bundle2-input: part header size: 19
178 178 bundle2-input: part type: "CHECK:PHASES"
179 179 bundle2-input: part id: "2"
180 180 bundle2-input: part parameters: 0
181 181 bundle2-input: found a handler for part check:phases
182 182 bundle2-input-part: "check:phases" supported
183 183 bundle2-input: payload chunk size: 24
184 184 bundle2-input: payload chunk size: 0
185 185 bundle2-input-part: total payload size 24
186 186 bundle2-input: part header size: 90
187 187 bundle2-input: part type: "PUSHKEY"
188 188 bundle2-input: part id: "3"
189 189 bundle2-input: part parameters: 4
190 190 bundle2-input: found a handler for part pushkey
191 191 bundle2-input-part: "pushkey" (params: 4 mandatory) supported
192 192 pushing key for "bookmarks:W"
193 193 bundle2-input: payload chunk size: 0
194 194 bundle2-input: part header size: 0
195 195 bundle2-input: end of bundle2 stream
196 196 bundle2-input-bundle: 3 parts total
197 197 running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh
198 198 test-hook-bookmark: W: 0000000000000000000000000000000000000000 ->
199 199 bundle2-output-bundle: "HG20", 1 parts total
200 200 bundle2-output: start emission of HG20 stream
201 201 bundle2-output: bundle parameter:
202 202 bundle2-output: start of parts
203 203 bundle2-output: bundle part: "reply:pushkey"
204 204 bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
205 205 bundle2-output: part 0: "REPLY:PUSHKEY"
206 206 bundle2-output: header chunk size: 43
207 207 bundle2-output: closing payload chunk
208 208 bundle2-output: end of bundle
209 209 bundle2-input: start processing of HG20 stream
210 210 bundle2-input: reading bundle2 stream parameters
211 211 bundle2-input-bundle: no-transaction
212 212 bundle2-input: start extraction of bundle2 parts
213 213 bundle2-input: part header size: 43
214 214 bundle2-input: part type: "REPLY:PUSHKEY"
215 215 bundle2-input: part id: "0"
216 216 bundle2-input: part parameters: 2
217 217 bundle2-input: found a handler for part reply:pushkey
218 218 bundle2-input-part: "reply:pushkey" (params: 0 advisory) supported
219 219 bundle2-input: payload chunk size: 0
220 220 bundle2-input: part header size: 0
221 221 bundle2-input: end of bundle2 stream
222 222 bundle2-input-bundle: 0 parts total
223 223 deleting remote bookmark W
224 224 listing keys for "phases"
225 225 [1]
226 226
227 227 #endif
228 228 #if b2-binary
229 229
230 230 $ hg push -B W ../a --config "$TESTHOOK" --debug --config devel.bundle2.debug=yes
231 231 pushing to ../a
232 232 query 1; heads
233 233 searching for changes
234 234 all remote heads known locally
235 235 listing keys for "phases"
236 236 checking for updated bookmarks
237 237 listing keys for "bookmarks"
238 238 no changes found
239 239 bundle2-output-bundle: "HG20", 4 parts total
240 240 bundle2-output: start emission of HG20 stream
241 241 bundle2-output: bundle parameter:
242 242 bundle2-output: start of parts
243 243 bundle2-output: bundle part: "replycaps"
244 244 bundle2-output-part: "replycaps" 222 bytes payload
245 245 bundle2-output: part 0: "REPLYCAPS"
246 246 bundle2-output: header chunk size: 16
247 247 bundle2-output: payload chunk size: 222
248 248 bundle2-output: closing payload chunk
249 249 bundle2-output: bundle part: "check:bookmarks"
250 250 bundle2-output-part: "check:bookmarks" 23 bytes payload
251 251 bundle2-output: part 1: "CHECK:BOOKMARKS"
252 252 bundle2-output: header chunk size: 22
253 253 bundle2-output: payload chunk size: 23
254 254 bundle2-output: closing payload chunk
255 255 bundle2-output: bundle part: "check:phases"
256 256 bundle2-output-part: "check:phases" 24 bytes payload
257 257 bundle2-output: part 2: "CHECK:PHASES"
258 258 bundle2-output: header chunk size: 19
259 259 bundle2-output: payload chunk size: 24
260 260 bundle2-output: closing payload chunk
261 261 bundle2-output: bundle part: "bookmarks"
262 262 bundle2-output-part: "bookmarks" 23 bytes payload
263 263 bundle2-output: part 3: "BOOKMARKS"
264 264 bundle2-output: header chunk size: 16
265 265 bundle2-output: payload chunk size: 23
266 266 bundle2-output: closing payload chunk
267 267 bundle2-output: end of bundle
268 268 bundle2-input: start processing of HG20 stream
269 269 bundle2-input: reading bundle2 stream parameters
270 270 bundle2-input-bundle: with-transaction
271 271 bundle2-input: start extraction of bundle2 parts
272 272 bundle2-input: part header size: 16
273 273 bundle2-input: part type: "REPLYCAPS"
274 274 bundle2-input: part id: "0"
275 275 bundle2-input: part parameters: 0
276 276 bundle2-input: found a handler for part replycaps
277 277 bundle2-input-part: "replycaps" supported
278 278 bundle2-input: payload chunk size: 222
279 279 bundle2-input: payload chunk size: 0
280 280 bundle2-input-part: total payload size 222
281 281 bundle2-input: part header size: 22
282 282 bundle2-input: part type: "CHECK:BOOKMARKS"
283 283 bundle2-input: part id: "1"
284 284 bundle2-input: part parameters: 0
285 285 bundle2-input: found a handler for part check:bookmarks
286 286 bundle2-input-part: "check:bookmarks" supported
287 287 bundle2-input: payload chunk size: 23
288 288 bundle2-input: payload chunk size: 0
289 289 bundle2-input-part: total payload size 23
290 290 bundle2-input: part header size: 19
291 291 bundle2-input: part type: "CHECK:PHASES"
292 292 bundle2-input: part id: "2"
293 293 bundle2-input: part parameters: 0
294 294 bundle2-input: found a handler for part check:phases
295 295 bundle2-input-part: "check:phases" supported
296 296 bundle2-input: payload chunk size: 24
297 297 bundle2-input: payload chunk size: 0
298 298 bundle2-input-part: total payload size 24
299 299 bundle2-input: part header size: 16
300 300 bundle2-input: part type: "BOOKMARKS"
301 301 bundle2-input: part id: "3"
302 302 bundle2-input: part parameters: 0
303 303 bundle2-input: found a handler for part bookmarks
304 304 bundle2-input-part: "bookmarks" supported
305 305 bundle2-input: payload chunk size: 23
306 306 bundle2-input: payload chunk size: 0
307 307 bundle2-input-part: total payload size 23
308 308 bundle2-input: part header size: 0
309 309 bundle2-input: end of bundle2 stream
310 310 bundle2-input-bundle: 3 parts total
311 311 running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh
312 312 test-hook-bookmark: W: 0000000000000000000000000000000000000000 ->
313 313 bundle2-output-bundle: "HG20", 0 parts total
314 314 bundle2-output: start emission of HG20 stream
315 315 bundle2-output: bundle parameter:
316 316 bundle2-output: start of parts
317 317 bundle2-output: end of bundle
318 318 bundle2-input: start processing of HG20 stream
319 319 bundle2-input: reading bundle2 stream parameters
320 320 bundle2-input-bundle: no-transaction
321 321 bundle2-input: start extraction of bundle2 parts
322 322 bundle2-input: part header size: 0
323 323 bundle2-input: end of bundle2 stream
324 324 bundle2-input-bundle: 0 parts total
325 325 deleting remote bookmark W
326 326 listing keys for "phases"
327 327 [1]
328 328
329 329 #endif
330 330
331 331 export the active bookmark
332 332
333 333 $ hg bookmark V
334 334 $ hg push -B . ../a
335 335 pushing to ../a
336 336 searching for changes
337 337 no changes found
338 338 exporting bookmark V
339 339 [1]
340 340
341 341 exporting the active bookmark with 'push -B .'
342 342 demand that one of the bookmarks is activated
343 343
344 344 $ hg update -r default
345 345 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
346 346 (leaving bookmark V)
347 347 $ hg push -B . ../a
348 348 abort: no active bookmark!
349 349 [255]
350 350 $ hg update -r V
351 351 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
352 352 (activating bookmark V)
353 353
354 354 delete the bookmark
355 355
356 356 $ hg book -d V
357 357 $ hg push -B V ../a
358 358 pushing to ../a
359 359 searching for changes
360 360 no changes found
361 361 deleting remote bookmark V
362 362 [1]
363 363 $ hg up foobar
364 364 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
365 365 (activating bookmark foobar)
366 366
367 367 push/pull name that doesn't exist
368 368
369 369 $ hg push -B badname ../a
370 370 pushing to ../a
371 371 searching for changes
372 372 bookmark badname does not exist on the local or remote repository!
373 373 no changes found
374 374 [2]
375 375 $ hg pull -B anotherbadname ../a
376 376 pulling from ../a
377 377 abort: remote bookmark anotherbadname not found!
378 378 [255]
379 379
380 380 divergent bookmarks
381 381
382 382 $ cd ../a
383 383 $ echo c1 > f1
384 384 $ hg ci -Am1
385 385 adding f1
386 386 $ hg book -f @
387 387 $ hg book -f X
388 388 $ hg book
389 389 @ 1:0d2164f0ce0d
390 390 * X 1:0d2164f0ce0d
391 391 Y 0:4e3505fd9583
392 392 Z 1:0d2164f0ce0d
393 393
394 394 $ cd ../b
395 395 $ hg up
396 396 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
397 397 updating bookmark foobar
398 398 $ echo c2 > f2
399 399 $ hg ci -Am2
400 400 adding f2
401 401 $ hg book -if @
402 402 $ hg book -if X
403 403 $ hg book
404 404 @ 1:9b140be10808
405 405 X 1:9b140be10808
406 406 Y 0:4e3505fd9583
407 407 Z 0:4e3505fd9583
408 408 foo -1:000000000000
409 409 * foobar 1:9b140be10808
410 410
411 411 $ hg pull --config paths.foo=../a foo --config "$TESTHOOK"
412 412 pulling from $TESTTMP/a
413 413 searching for changes
414 414 adding changesets
415 415 adding manifests
416 416 adding file changes
417 417 added 1 changesets with 1 changes to 1 files (+1 heads)
418 418 divergent bookmark @ stored as @foo
419 419 divergent bookmark X stored as X@foo
420 420 updating bookmark Z
421 421 new changesets 0d2164f0ce0d (1 drafts)
422 422 test-hook-bookmark: @foo: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
423 423 test-hook-bookmark: X@foo: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
424 424 test-hook-bookmark: Z: 4e3505fd95835d721066b76e75dbb8cc554d7f77 -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
425 425 (run 'hg heads' to see heads, 'hg merge' to merge)
426 426 $ hg book
427 427 @ 1:9b140be10808
428 428 @foo 2:0d2164f0ce0d
429 429 X 1:9b140be10808
430 430 X@foo 2:0d2164f0ce0d
431 431 Y 0:4e3505fd9583
432 432 Z 2:0d2164f0ce0d
433 433 foo -1:000000000000
434 434 * foobar 1:9b140be10808
435 435
436 436 (test that too many divergence of bookmark)
437 437
438 438 $ "$PYTHON" $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -r 000000000000 "X@${i}"; done
439 439 $ hg pull ../a
440 440 pulling from ../a
441 441 searching for changes
442 442 no changes found
443 443 warning: failed to assign numbered name to divergent bookmark X
444 444 divergent bookmark @ stored as @1
445 445 $ hg bookmarks | grep '^ X' | grep -v ':000000000000'
446 446 X 1:9b140be10808
447 447 X@foo 2:0d2164f0ce0d
448 448
449 449 (test that remotely diverged bookmarks are reused if they aren't changed)
450 450
451 451 $ hg bookmarks | grep '^ @'
452 452 @ 1:9b140be10808
453 453 @1 2:0d2164f0ce0d
454 454 @foo 2:0d2164f0ce0d
455 455 $ hg pull ../a
456 456 pulling from ../a
457 457 searching for changes
458 458 no changes found
459 459 warning: failed to assign numbered name to divergent bookmark X
460 460 divergent bookmark @ stored as @1
461 461 $ hg bookmarks | grep '^ @'
462 462 @ 1:9b140be10808
463 463 @1 2:0d2164f0ce0d
464 464 @foo 2:0d2164f0ce0d
465 465
466 466 $ "$PYTHON" $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -d "X@${i}"; done
467 467 $ hg bookmarks -d "@1"
468 468
469 469 $ hg push -f ../a
470 470 pushing to ../a
471 471 searching for changes
472 472 adding changesets
473 473 adding manifests
474 474 adding file changes
475 475 added 1 changesets with 1 changes to 1 files (+1 heads)
476 476 $ hg -R ../a book
477 477 @ 1:0d2164f0ce0d
478 478 * X 1:0d2164f0ce0d
479 479 Y 0:4e3505fd9583
480 480 Z 1:0d2164f0ce0d
481 481
482 482 explicit pull should overwrite the local version (issue4439)
483 483
484 484 $ hg update -r X
485 485 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
486 486 (activating bookmark X)
487 487 $ hg pull --config paths.foo=../a foo -B . --config "$TESTHOOK"
488 488 pulling from $TESTTMP/a
489 489 no changes found
490 490 divergent bookmark @ stored as @foo
491 491 importing bookmark X
492 492 test-hook-bookmark: @foo: 0d2164f0ce0d8f1d6f94351eba04b794909be66c -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
493 493 test-hook-bookmark: X: 9b140be1080824d768c5a4691a564088eede71f9 -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
494 494
495 495 reinstall state for further testing:
496 496
497 497 $ hg book -fr 9b140be10808 X
498 498
499 499 revsets should not ignore divergent bookmarks
500 500
501 501 $ hg bookmark -fr 1 Z
502 502 $ hg log -r 'bookmark()' --template '{rev}:{node|short} {bookmarks}\n'
503 503 0:4e3505fd9583 Y
504 504 1:9b140be10808 @ X Z foobar
505 505 2:0d2164f0ce0d @foo X@foo
506 506 $ hg log -r 'bookmark("X@foo")' --template '{rev}:{node|short} {bookmarks}\n'
507 507 2:0d2164f0ce0d @foo X@foo
508 508 $ hg log -r 'bookmark("re:X@foo")' --template '{rev}:{node|short} {bookmarks}\n'
509 509 2:0d2164f0ce0d @foo X@foo
510 510
511 511 update a remote bookmark from a non-head to a head
512 512
513 513 $ hg up -q Y
514 514 $ echo c3 > f2
515 515 $ hg ci -Am3
516 516 adding f2
517 517 created new head
518 518 $ hg push ../a --config "$TESTHOOK"
519 519 pushing to ../a
520 520 searching for changes
521 521 adding changesets
522 522 adding manifests
523 523 adding file changes
524 524 added 1 changesets with 1 changes to 1 files (+1 heads)
525 525 test-hook-bookmark: Y: 4e3505fd95835d721066b76e75dbb8cc554d7f77 -> f6fc62dde3c0771e29704af56ba4d8af77abcc2f
526 526 updating bookmark Y
527 527 $ hg -R ../a book
528 528 @ 1:0d2164f0ce0d
529 529 * X 1:0d2164f0ce0d
530 530 Y 3:f6fc62dde3c0
531 531 Z 1:0d2164f0ce0d
532 532
533 533 update a bookmark in the middle of a client pulling changes
534 534
535 535 $ cd ..
536 536 $ hg clone -q a pull-race
537 537
538 538 We want to use http because it is stateless and therefore more susceptible to
539 539 race conditions
540 540
541 541 $ hg serve -R pull-race -p $HGPORT -d --pid-file=pull-race.pid -E main-error.log
542 542 $ cat pull-race.pid >> $DAEMON_PIDS
543 543
544 544 $ cat <<EOF > $TESTTMP/out_makecommit.sh
545 545 > #!/bin/sh
546 546 > hg ci -Am5
547 547 > echo committed in pull-race
548 548 > EOF
549 549
550 550 $ hg clone -q http://localhost:$HGPORT/ pull-race2 --config "$TESTHOOK"
551 551 test-hook-bookmark: @: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
552 552 test-hook-bookmark: X: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
553 553 test-hook-bookmark: Y: -> f6fc62dde3c0771e29704af56ba4d8af77abcc2f
554 554 test-hook-bookmark: Z: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
555 555 $ cd pull-race
556 556 $ hg up -q Y
557 557 $ echo c4 > f2
558 558 $ hg ci -Am4
559 559 $ echo c5 > f3
560 560 $ cat <<EOF > .hg/hgrc
561 561 > [hooks]
562 562 > outgoing.makecommit = sh $TESTTMP/out_makecommit.sh
563 563 > EOF
564 564
565 565 (new config needs a server restart)
566 566
567 567 $ cd ..
568 568 $ killdaemons.py
569 569 $ hg serve -R pull-race -p $HGPORT -d --pid-file=pull-race.pid -E main-error.log
570 570 $ cat pull-race.pid >> $DAEMON_PIDS
571 571 $ cd pull-race2
572 572 $ hg -R $TESTTMP/pull-race book
573 573 @ 1:0d2164f0ce0d
574 574 X 1:0d2164f0ce0d
575 575 * Y 4:b0a5eff05604
576 576 Z 1:0d2164f0ce0d
577 577 $ hg pull
578 578 pulling from http://localhost:$HGPORT/
579 579 searching for changes
580 580 adding changesets
581 581 adding manifests
582 582 adding file changes
583 583 added 1 changesets with 1 changes to 1 files
584 584 updating bookmark Y
585 585 new changesets b0a5eff05604 (1 drafts)
586 586 (run 'hg update' to get a working copy)
587 587 $ hg book
588 588 * @ 1:0d2164f0ce0d
589 589 X 1:0d2164f0ce0d
590 590 Y 4:b0a5eff05604
591 591 Z 1:0d2164f0ce0d
592 592
593 593 Update a bookmark right after the initial lookup -B (issue4689)
594 594
595 595 $ echo c6 > ../pull-race/f3 # to be committed during the race
596 596 $ cat <<EOF > $TESTTMP/listkeys_makecommit.sh
597 597 > #!/bin/sh
598 598 > if hg st | grep -q M; then
599 599 > hg commit -m race
600 600 > echo committed in pull-race
601 601 > else
602 602 > exit 0
603 603 > fi
604 604 > EOF
605 605 $ cat <<EOF > ../pull-race/.hg/hgrc
606 606 > [hooks]
607 607 > # If anything to commit, commit it right after the first key listing used
608 608 > # during lookup. This makes the commit appear before the actual getbundle
609 609 > # call.
610 610 > listkeys.makecommit= sh $TESTTMP/listkeys_makecommit.sh
611 611 > EOF
612 612 $ restart_server() {
613 613 > "$TESTDIR/killdaemons.py" $DAEMON_PIDS
614 614 > hg serve -R ../pull-race -p $HGPORT -d --pid-file=../pull-race.pid -E main-error.log
615 615 > cat ../pull-race.pid >> $DAEMON_PIDS
616 616 > }
617 617 $ restart_server # new config need server restart
618 618 $ hg -R $TESTTMP/pull-race book
619 619 @ 1:0d2164f0ce0d
620 620 X 1:0d2164f0ce0d
621 621 * Y 5:35d1ef0a8d1b
622 622 Z 1:0d2164f0ce0d
623 623 $ hg update -r Y
624 624 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
625 625 (activating bookmark Y)
626 626 $ hg pull -B .
627 627 pulling from http://localhost:$HGPORT/
628 628 searching for changes
629 629 adding changesets
630 630 adding manifests
631 631 adding file changes
632 632 added 1 changesets with 1 changes to 1 files
633 633 updating bookmark Y
634 634 new changesets 35d1ef0a8d1b (1 drafts)
635 635 (run 'hg update' to get a working copy)
636 636 $ hg book
637 637 @ 1:0d2164f0ce0d
638 638 X 1:0d2164f0ce0d
639 639 * Y 5:35d1ef0a8d1b
640 640 Z 1:0d2164f0ce0d
641 641
642 642 Update a bookmark right after the initial lookup -r (issue4700)
643 643
644 644 $ echo c7 > ../pull-race/f3 # to be committed during the race
645 645 $ cat <<EOF > ../lookuphook.py
646 646 > """small extensions adding a hook after wireprotocol lookup to test race"""
647 647 > import functools
648 648 > from mercurial import wireprotov1server, wireprotov2server
649 649 >
650 650 > def wrappedlookup(orig, repo, *args, **kwargs):
651 651 > ret = orig(repo, *args, **kwargs)
652 652 > repo.hook(b'lookup')
653 653 > return ret
654 654 > for table in [wireprotov1server.commands, wireprotov2server.COMMANDS]:
655 655 > table[b'lookup'].func = functools.partial(wrappedlookup, table[b'lookup'].func)
656 656 > EOF
657 657 $ cat <<EOF > ../pull-race/.hg/hgrc
658 658 > [extensions]
659 659 > lookuphook=$TESTTMP/lookuphook.py
660 660 > [hooks]
661 661 > lookup.makecommit= sh $TESTTMP/listkeys_makecommit.sh
662 662 > EOF
663 663 $ restart_server # new config need server restart
664 664 $ hg -R $TESTTMP/pull-race book
665 665 @ 1:0d2164f0ce0d
666 666 X 1:0d2164f0ce0d
667 667 * Y 6:0d60821d2197
668 668 Z 1:0d2164f0ce0d
669 669 $ hg pull -r Y
670 670 pulling from http://localhost:$HGPORT/
671 671 searching for changes
672 672 adding changesets
673 673 adding manifests
674 674 adding file changes
675 675 added 1 changesets with 1 changes to 1 files
676 676 updating bookmark Y
677 677 new changesets 0d60821d2197 (1 drafts)
678 678 (run 'hg update' to get a working copy)
679 679 $ hg book
680 680 @ 1:0d2164f0ce0d
681 681 X 1:0d2164f0ce0d
682 682 * Y 6:0d60821d2197
683 683 Z 1:0d2164f0ce0d
684 684 $ hg -R $TESTTMP/pull-race book
685 685 @ 1:0d2164f0ce0d
686 686 X 1:0d2164f0ce0d
687 687 * Y 7:714424d9e8b8
688 688 Z 1:0d2164f0ce0d
689 689
690 690 (done with this section of the test)
691 691
692 692 $ killdaemons.py
693 693 $ cd ../b
694 694
695 695 diverging a remote bookmark fails
696 696
697 697 $ hg up -q 4e3505fd9583
698 698 $ echo c4 > f2
699 699 $ hg ci -Am4
700 700 adding f2
701 701 created new head
702 702 $ echo c5 > f2
703 703 $ hg ci -Am5
704 704 $ hg log -G
705 705 @ 5:c922c0139ca0 5
706 706 |
707 707 o 4:4efff6d98829 4
708 708 |
709 709 | o 3:f6fc62dde3c0 3
710 710 |/
711 711 | o 2:0d2164f0ce0d 1
712 712 |/
713 713 | o 1:9b140be10808 2
714 714 |/
715 715 o 0:4e3505fd9583 test
716 716
717 717
718 718 $ hg book -f Y
719 719
720 720 $ cat <<EOF > ../a/.hg/hgrc
721 721 > [web]
722 722 > push_ssl = false
723 723 > allow_push = *
724 724 > EOF
725 725
726 726 $ hg serve -R ../a -p $HGPORT2 -d --pid-file=../hg2.pid
727 727 $ cat ../hg2.pid >> $DAEMON_PIDS
728 728
729 729 $ hg push http://localhost:$HGPORT2/
730 730 pushing to http://localhost:$HGPORT2/
731 731 searching for changes
732 732 abort: push creates new remote head c922c0139ca0 with bookmark 'Y'!
733 733 (merge or see 'hg help push' for details about pushing new heads)
734 734 [255]
735 735 $ hg -R ../a book
736 736 @ 1:0d2164f0ce0d
737 737 * X 1:0d2164f0ce0d
738 738 Y 3:f6fc62dde3c0
739 739 Z 1:0d2164f0ce0d
740 740
741 741
742 742 Unrelated marker does not alter the decision
743 743
744 744 $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
745 745 $ hg push http://localhost:$HGPORT2/
746 746 pushing to http://localhost:$HGPORT2/
747 747 searching for changes
748 748 abort: push creates new remote head c922c0139ca0 with bookmark 'Y'!
749 749 (merge or see 'hg help push' for details about pushing new heads)
750 750 [255]
751 751 $ hg -R ../a book
752 752 @ 1:0d2164f0ce0d
753 753 * X 1:0d2164f0ce0d
754 754 Y 3:f6fc62dde3c0
755 755 Z 1:0d2164f0ce0d
756 756
757 757 Update to a successor works
758 758
759 759 $ hg id --debug -r 3
760 760 f6fc62dde3c0771e29704af56ba4d8af77abcc2f
761 761 $ hg id --debug -r 4
762 762 4efff6d98829d9c824c621afd6e3f01865f5439f
763 763 $ hg id --debug -r 5
764 764 c922c0139ca03858f655e4a2af4dd02796a63969 tip Y
765 765 $ hg debugobsolete f6fc62dde3c0771e29704af56ba4d8af77abcc2f cccccccccccccccccccccccccccccccccccccccc
766 766 obsoleted 1 changesets
767 767 $ hg debugobsolete cccccccccccccccccccccccccccccccccccccccc 4efff6d98829d9c824c621afd6e3f01865f5439f
768 768 $ hg push http://localhost:$HGPORT2/
769 769 pushing to http://localhost:$HGPORT2/
770 770 searching for changes
771 771 remote: adding changesets
772 772 remote: adding manifests
773 773 remote: adding file changes
774 774 remote: added 2 changesets with 2 changes to 1 files (+1 heads)
775 775 remote: 2 new obsolescence markers
776 776 remote: obsoleted 1 changesets
777 777 updating bookmark Y
778 778 $ hg -R ../a book
779 779 @ 1:0d2164f0ce0d
780 780 * X 1:0d2164f0ce0d
781 781 Y 5:c922c0139ca0
782 782 Z 1:0d2164f0ce0d
783 783
784 784 hgweb
785 785
786 786 $ cat <<EOF > .hg/hgrc
787 787 > [web]
788 788 > push_ssl = false
789 789 > allow_push = *
790 790 > EOF
791 791
792 792 $ hg serve -p $HGPORT -d --pid-file=../hg.pid -E errors.log
793 793 $ cat ../hg.pid >> $DAEMON_PIDS
794 794 $ cd ../a
795 795
796 796 $ hg debugpushkey http://localhost:$HGPORT/ namespaces
797 797 bookmarks
798 798 namespaces
799 799 obsolete
800 800 phases
801 801 $ hg debugpushkey http://localhost:$HGPORT/ bookmarks
802 802 @ 9b140be1080824d768c5a4691a564088eede71f9
803 803 X 9b140be1080824d768c5a4691a564088eede71f9
804 804 Y c922c0139ca03858f655e4a2af4dd02796a63969
805 805 Z 9b140be1080824d768c5a4691a564088eede71f9
806 806 foo 0000000000000000000000000000000000000000
807 807 foobar 9b140be1080824d768c5a4691a564088eede71f9
808 808 $ hg out -B http://localhost:$HGPORT/
809 809 comparing with http://localhost:$HGPORT/
810 810 searching for changed bookmarks
811 811 @ 0d2164f0ce0d
812 812 X 0d2164f0ce0d
813 813 Z 0d2164f0ce0d
814 814 foo
815 815 foobar
816 816 $ hg push -B Z http://localhost:$HGPORT/
817 817 pushing to http://localhost:$HGPORT/
818 818 searching for changes
819 819 no changes found
820 820 updating bookmark Z
821 821 [1]
822 822 $ hg book -d Z
823 823 $ hg in -B http://localhost:$HGPORT/
824 824 comparing with http://localhost:$HGPORT/
825 825 searching for changed bookmarks
826 826 @ 9b140be10808
827 827 X 9b140be10808
828 828 Z 0d2164f0ce0d
829 829 foo 000000000000
830 830 foobar 9b140be10808
831 831 $ hg pull -B Z http://localhost:$HGPORT/
832 832 pulling from http://localhost:$HGPORT/
833 833 no changes found
834 834 divergent bookmark @ stored as @1
835 835 divergent bookmark X stored as X@1
836 836 adding remote bookmark Z
837 837 adding remote bookmark foo
838 838 adding remote bookmark foobar
839 839 $ hg clone http://localhost:$HGPORT/ cloned-bookmarks
840 840 requesting all changes
841 841 adding changesets
842 842 adding manifests
843 843 adding file changes
844 844 added 5 changesets with 5 changes to 3 files (+2 heads)
845 845 2 new obsolescence markers
846 846 new changesets 4e3505fd9583:c922c0139ca0 (5 drafts)
847 847 updating to bookmark @
848 848 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
849 849 $ hg -R cloned-bookmarks bookmarks
850 850 * @ 1:9b140be10808
851 851 X 1:9b140be10808
852 852 Y 4:c922c0139ca0
853 853 Z 2:0d2164f0ce0d
854 854 foo -1:000000000000
855 855 foobar 1:9b140be10808
856 856
857 857 $ cd ..
858 858
859 859 Test to show result of bookmarks comparison
860 860
861 861 $ mkdir bmcomparison
862 862 $ cd bmcomparison
863 863
864 864 $ hg init source
865 865 $ hg -R source debugbuilddag '+2*2*3*4'
866 866 $ hg -R source log -G --template '{rev}:{node|short}'
867 867 o 4:e7bd5218ca15
868 868 |
869 869 | o 3:6100d3090acf
870 870 |/
871 871 | o 2:fa942426a6fd
872 872 |/
873 873 | o 1:66f7d451a68b
874 874 |/
875 875 o 0:1ea73414a91b
876 876
877 877 $ hg -R source bookmarks -r 0 SAME
878 878 $ hg -R source bookmarks -r 0 ADV_ON_REPO1
879 879 $ hg -R source bookmarks -r 0 ADV_ON_REPO2
880 880 $ hg -R source bookmarks -r 0 DIFF_ADV_ON_REPO1
881 881 $ hg -R source bookmarks -r 0 DIFF_ADV_ON_REPO2
882 882 $ hg -R source bookmarks -r 1 DIVERGED
883 883
884 884 $ hg clone -U source repo1
885 885
886 886 (test that incoming/outgoing exit with 1, if there is no bookmark to
887 887 be exchanged)
888 888
889 889 $ hg -R repo1 incoming -B
890 890 comparing with $TESTTMP/bmcomparison/source
891 891 searching for changed bookmarks
892 892 no changed bookmarks found
893 893 [1]
894 894 $ hg -R repo1 outgoing -B
895 895 comparing with $TESTTMP/bmcomparison/source
896 896 searching for changed bookmarks
897 897 no changed bookmarks found
898 898 [1]
899 899
900 900 $ hg -R repo1 bookmarks -f -r 1 ADD_ON_REPO1
901 901 $ hg -R repo1 bookmarks -f -r 2 ADV_ON_REPO1
902 902 $ hg -R repo1 bookmarks -f -r 3 DIFF_ADV_ON_REPO1
903 903 $ hg -R repo1 bookmarks -f -r 3 DIFF_DIVERGED
904 904 $ hg -R repo1 -q --config extensions.mq= strip 4
905 905 $ hg -R repo1 log -G --template '{node|short} ({bookmarks})'
906 906 o 6100d3090acf (DIFF_ADV_ON_REPO1 DIFF_DIVERGED)
907 907 |
908 908 | o fa942426a6fd (ADV_ON_REPO1)
909 909 |/
910 910 | o 66f7d451a68b (ADD_ON_REPO1 DIVERGED)
911 911 |/
912 912 o 1ea73414a91b (ADV_ON_REPO2 DIFF_ADV_ON_REPO2 SAME)
913 913
914 914
915 915 $ hg clone -U source repo2
916 916 $ hg -R repo2 bookmarks -f -r 1 ADD_ON_REPO2
917 917 $ hg -R repo2 bookmarks -f -r 1 ADV_ON_REPO2
918 918 $ hg -R repo2 bookmarks -f -r 2 DIVERGED
919 919 $ hg -R repo2 bookmarks -f -r 4 DIFF_ADV_ON_REPO2
920 920 $ hg -R repo2 bookmarks -f -r 4 DIFF_DIVERGED
921 921 $ hg -R repo2 -q --config extensions.mq= strip 3
922 922 $ hg -R repo2 log -G --template '{node|short} ({bookmarks})'
923 923 o e7bd5218ca15 (DIFF_ADV_ON_REPO2 DIFF_DIVERGED)
924 924 |
925 925 | o fa942426a6fd (DIVERGED)
926 926 |/
927 927 | o 66f7d451a68b (ADD_ON_REPO2 ADV_ON_REPO2)
928 928 |/
929 929 o 1ea73414a91b (ADV_ON_REPO1 DIFF_ADV_ON_REPO1 SAME)
930 930
931 931
932 932 (test that difference of bookmarks between repositories are fully shown)
933 933
934 934 $ hg -R repo1 incoming -B repo2 -v
935 935 comparing with repo2
936 936 searching for changed bookmarks
937 937 ADD_ON_REPO2 66f7d451a68b added
938 938 ADV_ON_REPO2 66f7d451a68b advanced
939 939 DIFF_ADV_ON_REPO2 e7bd5218ca15 changed
940 940 DIFF_DIVERGED e7bd5218ca15 changed
941 941 DIVERGED fa942426a6fd diverged
942 942 $ hg -R repo1 outgoing -B repo2 -v
943 943 comparing with repo2
944 944 searching for changed bookmarks
945 945 ADD_ON_REPO1 66f7d451a68b added
946 946 ADD_ON_REPO2 deleted
947 947 ADV_ON_REPO1 fa942426a6fd advanced
948 948 DIFF_ADV_ON_REPO1 6100d3090acf advanced
949 949 DIFF_ADV_ON_REPO2 1ea73414a91b changed
950 950 DIFF_DIVERGED 6100d3090acf changed
951 951 DIVERGED 66f7d451a68b diverged
952 952
953 953 $ hg -R repo2 incoming -B repo1 -v
954 954 comparing with repo1
955 955 searching for changed bookmarks
956 956 ADD_ON_REPO1 66f7d451a68b added
957 957 ADV_ON_REPO1 fa942426a6fd advanced
958 958 DIFF_ADV_ON_REPO1 6100d3090acf changed
959 959 DIFF_DIVERGED 6100d3090acf changed
960 960 DIVERGED 66f7d451a68b diverged
961 961 $ hg -R repo2 outgoing -B repo1 -v
962 962 comparing with repo1
963 963 searching for changed bookmarks
964 964 ADD_ON_REPO1 deleted
965 965 ADD_ON_REPO2 66f7d451a68b added
966 966 ADV_ON_REPO2 66f7d451a68b advanced
967 967 DIFF_ADV_ON_REPO1 1ea73414a91b changed
968 968 DIFF_ADV_ON_REPO2 e7bd5218ca15 advanced
969 969 DIFF_DIVERGED e7bd5218ca15 changed
970 970 DIVERGED fa942426a6fd diverged
971 971
972 972 $ cd ..
973 973
974 974 Pushing a bookmark should only push the changes required by that
975 975 bookmark, not all outgoing changes:
976 976 $ hg clone http://localhost:$HGPORT/ addmarks
977 977 requesting all changes
978 978 adding changesets
979 979 adding manifests
980 980 adding file changes
981 981 added 5 changesets with 5 changes to 3 files (+2 heads)
982 982 2 new obsolescence markers
983 983 new changesets 4e3505fd9583:c922c0139ca0 (5 drafts)
984 984 updating to bookmark @
985 985 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
986 986 $ cd addmarks
987 987 $ echo foo > foo
988 988 $ hg add foo
989 989 $ hg commit -m 'add foo'
990 990 $ echo bar > bar
991 991 $ hg add bar
992 992 $ hg commit -m 'add bar'
993 993 $ hg co "tip^"
994 994 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
995 995 (leaving bookmark @)
996 996 $ hg book add-foo
997 997 $ hg book -r tip add-bar
998 998 Note: this push *must* push only a single changeset, as that's the point
999 999 of this test.
1000 1000 $ hg push -B add-foo --traceback
1001 1001 pushing to http://localhost:$HGPORT/
1002 1002 searching for changes
1003 1003 remote: adding changesets
1004 1004 remote: adding manifests
1005 1005 remote: adding file changes
1006 1006 remote: added 1 changesets with 1 changes to 1 files
1007 1007 exporting bookmark add-foo
1008 1008
1009 1009 pushing a new bookmark on a new head does not require -f if -B is specified
1010 1010
1011 1011 $ hg up -q X
1012 1012 $ hg book W
1013 1013 $ echo c5 > f2
1014 1014 $ hg ci -Am5
1015 1015 created new head
1016 1016 $ hg push -B .
1017 1017 pushing to http://localhost:$HGPORT/
1018 1018 searching for changes
1019 1019 remote: adding changesets
1020 1020 remote: adding manifests
1021 1021 remote: adding file changes
1022 1022 remote: added 1 changesets with 1 changes to 1 files (+1 heads)
1023 1023 exporting bookmark W
1024 1024 $ hg -R ../b id -r W
1025 1025 cc978a373a53 tip W
1026 1026
1027 1027 pushing an existing but divergent bookmark with -B still requires -f
1028 1028
1029 1029 $ hg clone -q . ../r
1030 1030 $ hg up -q X
1031 1031 $ echo 1 > f2
1032 1032 $ hg ci -qAml
1033 1033
1034 1034 $ cd ../r
1035 1035 $ hg up -q X
1036 1036 $ echo 2 > f2
1037 1037 $ hg ci -qAmr
1038 1038 $ hg push -B X
1039 1039 pushing to $TESTTMP/addmarks
1040 1040 searching for changes
1041 1041 remote has heads on branch 'default' that are not known locally: a2a606d9ff1b
1042 1042 abort: push creates new remote head 54694f811df9 with bookmark 'X'!
1043 1043 (pull and merge or see 'hg help push' for details about pushing new heads)
1044 1044 [255]
1045 1045 $ cd ../addmarks
1046 1046
1047 1047 Check summary output for incoming/outgoing bookmarks
1048 1048
1049 1049 $ hg bookmarks -d X
1050 1050 $ hg bookmarks -d Y
1051 1051 $ hg summary --remote | grep '^remote:'
1052 1052 remote: *, 2 incoming bookmarks, 1 outgoing bookmarks (glob)
1053 1053
1054 1054 $ cd ..
1055 1055
1056 1056 pushing an unchanged bookmark should result in no changes
1057 1057
1058 1058 $ hg init unchanged-a
1059 1059 $ hg init unchanged-b
1060 1060 $ cd unchanged-a
1061 1061 $ echo initial > foo
1062 1062 $ hg commit -A -m initial
1063 1063 adding foo
1064 1064 $ hg bookmark @
1065 1065 $ hg push -B @ ../unchanged-b
1066 1066 pushing to ../unchanged-b
1067 1067 searching for changes
1068 1068 adding changesets
1069 1069 adding manifests
1070 1070 adding file changes
1071 1071 added 1 changesets with 1 changes to 1 files
1072 1072 exporting bookmark @
1073 1073
1074 1074 $ hg push -B @ ../unchanged-b
1075 1075 pushing to ../unchanged-b
1076 1076 searching for changes
1077 1077 no changes found
1078 1078 [1]
1079 1079
1080 1080 Pushing a really long bookmark should work fine (issue5165)
1081 1081 ===============================================
1082 1082
1083 1083 #if b2-binary
1084 1084 >>> with open('longname', 'w') as f:
1085 1085 ... f.write('wat' * 100) and None
1086 1086 $ hg book `cat longname`
1087 1087 $ hg push -B `cat longname` ../unchanged-b
1088 1088 pushing to ../unchanged-b
1089 1089 searching for changes
1090 1090 no changes found
1091 1091 exporting bookmark (wat){100} (re)
1092 1092 [1]
1093 1093 $ hg -R ../unchanged-b book --delete `cat longname`
1094 1094
1095 1095 Test again but forcing bundle2 exchange to make sure that doesn't regress.
1096 1096
1097 1097 $ hg push -B `cat longname` ../unchanged-b --config devel.legacy.exchange=bundle1
1098 1098 pushing to ../unchanged-b
1099 1099 searching for changes
1100 1100 no changes found
1101 1101 exporting bookmark (wat){100} (re)
1102 1102 [1]
1103 1103 $ hg -R ../unchanged-b book --delete `cat longname`
1104 1104 $ hg book --delete `cat longname`
1105 1105 $ hg co @
1106 1106 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1107 1107 (activating bookmark @)
1108 1108 #endif
1109 1109
1110 1110 Check hook preventing push (issue4455)
1111 1111 ======================================
1112 1112
1113 1113 $ hg bookmarks
1114 1114 * @ 0:55482a6fb4b1
1115 1115 $ hg log -G
1116 1116 @ 0:55482a6fb4b1 initial
1117 1117
1118 1118 $ hg init ../issue4455-dest
1119 1119 $ hg push ../issue4455-dest # changesets only
1120 1120 pushing to ../issue4455-dest
1121 1121 searching for changes
1122 1122 adding changesets
1123 1123 adding manifests
1124 1124 adding file changes
1125 1125 added 1 changesets with 1 changes to 1 files
1126 1126 $ cat >> .hg/hgrc << EOF
1127 1127 > [paths]
1128 1128 > local=../issue4455-dest/
1129 1129 > ssh=ssh://user@dummy/issue4455-dest
1130 1130 > http=http://localhost:$HGPORT/
1131 1131 > [ui]
1132 1132 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1133 1133 > EOF
1134 1134 $ cat >> ../issue4455-dest/.hg/hgrc << EOF
1135 1135 > [hooks]
1136 1136 > prepushkey=false
1137 1137 > [web]
1138 1138 > push_ssl = false
1139 1139 > allow_push = *
1140 1140 > EOF
1141 1141 $ killdaemons.py
1142 1142 $ hg serve -R ../issue4455-dest -p $HGPORT -d --pid-file=../issue4455.pid -E ../issue4455-error.log
1143 1143 $ cat ../issue4455.pid >> $DAEMON_PIDS
1144 1144
1145 1145 Local push
1146 1146 ----------
1147 1147
1148 1148 #if b2-pushkey
1149 1149
1150 1150 $ hg push -B @ local
1151 1151 pushing to $TESTTMP/issue4455-dest
1152 1152 searching for changes
1153 1153 no changes found
1154 1154 pushkey-abort: prepushkey hook exited with status 1
1155 1155 abort: exporting bookmark @ failed!
1156 1156 [255]
1157 1157
1158 1158 #endif
1159 1159 #if b2-binary
1160 1160
1161 1161 $ hg push -B @ local
1162 1162 pushing to $TESTTMP/issue4455-dest
1163 1163 searching for changes
1164 1164 no changes found
1165 1165 abort: prepushkey hook exited with status 1
1166 1166 [255]
1167 1167
1168 1168 #endif
1169 1169
1170 1170 $ hg -R ../issue4455-dest/ bookmarks
1171 1171 no bookmarks set
1172 1172
1173 1173 Using ssh
1174 1174 ---------
1175 1175
1176 1176 #if b2-pushkey
1177 1177
1178 1178 $ hg push -B @ ssh # bundle2+
1179 1179 pushing to ssh://user@dummy/issue4455-dest
1180 1180 searching for changes
1181 1181 no changes found
1182 1182 remote: pushkey-abort: prepushkey hook exited with status 1
1183 1183 abort: exporting bookmark @ failed!
1184 1184 [255]
1185 1185
1186 1186 $ hg -R ../issue4455-dest/ bookmarks
1187 1187 no bookmarks set
1188 1188
1189 1189 $ hg push -B @ ssh --config devel.legacy.exchange=bundle1
1190 1190 pushing to ssh://user@dummy/issue4455-dest
1191 1191 searching for changes
1192 1192 no changes found
1193 1193 remote: pushkey-abort: prepushkey hook exited with status 1
1194 1194 exporting bookmark @ failed!
1195 1195 [1]
1196 1196
1197 1197 #endif
1198 1198 #if b2-binary
1199 1199
1200 1200 $ hg push -B @ ssh # bundle2+
1201 1201 pushing to ssh://user@dummy/issue4455-dest
1202 1202 searching for changes
1203 1203 no changes found
1204 1204 remote: prepushkey hook exited with status 1
1205 1205 abort: push failed on remote
1206 1206 [255]
1207 1207
1208 1208 #endif
1209 1209
1210 1210 $ hg -R ../issue4455-dest/ bookmarks
1211 1211 no bookmarks set
1212 1212
1213 1213 Using http
1214 1214 ----------
1215 1215
1216 1216 #if b2-pushkey
1217 1217 $ hg push -B @ http # bundle2+
1218 1218 pushing to http://localhost:$HGPORT/
1219 1219 searching for changes
1220 1220 no changes found
1221 1221 remote: pushkey-abort: prepushkey hook exited with status 1
1222 1222 abort: exporting bookmark @ failed!
1223 1223 [255]
1224 1224
1225 1225 $ hg -R ../issue4455-dest/ bookmarks
1226 1226 no bookmarks set
1227 1227
1228 1228 $ hg push -B @ http --config devel.legacy.exchange=bundle1
1229 1229 pushing to http://localhost:$HGPORT/
1230 1230 searching for changes
1231 1231 no changes found
1232 1232 remote: pushkey-abort: prepushkey hook exited with status 1
1233 1233 exporting bookmark @ failed!
1234 1234 [1]
1235 1235
1236 1236 #endif
1237 1237
1238 1238 #if b2-binary
1239 1239
1240 1240 $ hg push -B @ ssh # bundle2+
1241 1241 pushing to ssh://user@dummy/issue4455-dest
1242 1242 searching for changes
1243 1243 no changes found
1244 1244 remote: prepushkey hook exited with status 1
1245 1245 abort: push failed on remote
1246 1246 [255]
1247 1247
1248 1248 #endif
1249 1249
1250 1250 $ hg -R ../issue4455-dest/ bookmarks
1251 1251 no bookmarks set
1252 1252
1253 1253 $ cd ..
1254 1254
1255 1255 Test that pre-pushkey compat for bookmark works as expected (issue5777)
1256 1256
1257 1257 $ cat << EOF >> $HGRCPATH
1258 1258 > [ui]
1259 1259 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1260 1260 > [server]
1261 1261 > bookmarks-pushkey-compat = yes
1262 1262 > EOF
1263 1263
1264 1264 $ hg init server
1265 1265 $ echo foo > server/a
1266 1266 $ hg -R server book foo
1267 1267 $ hg -R server commit -Am a
1268 1268 adding a
1269 1269 $ hg clone ssh://user@dummy/server client
1270 1270 requesting all changes
1271 1271 adding changesets
1272 1272 adding manifests
1273 1273 adding file changes
1274 1274 added 1 changesets with 1 changes to 1 files
1275 1275 new changesets 79513d0d7716 (1 drafts)
1276 1276 updating to branch default
1277 1277 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1278 1278
1279 1279 Forbid bookmark move on the server
1280 1280
1281 1281 $ cat << EOF >> $TESTTMP/no-bm-move.sh
1282 1282 > #!/bin/sh
1283 1283 > echo \$HG_NAMESPACE | grep -v bookmarks
1284 1284 > EOF
1285 1285 $ cat << EOF >> server/.hg/hgrc
1286 1286 > [hooks]
1287 1287 > prepushkey.no-bm-move= sh $TESTTMP/no-bm-move.sh
1288 1288 > EOF
1289 1289
1290 1290 pushing changeset is okay
1291 1291
1292 1292 $ echo bar >> client/a
1293 1293 $ hg -R client commit -m b
1294 1294 $ hg -R client push
1295 1295 pushing to ssh://user@dummy/server
1296 1296 searching for changes
1297 1297 remote: adding changesets
1298 1298 remote: adding manifests
1299 1299 remote: adding file changes
1300 1300 remote: added 1 changesets with 1 changes to 1 files
1301 1301
1302 1302 attempt to move the bookmark is rejected
1303 1303
1304 1304 $ hg -R client book foo -r .
1305 1305 moving bookmark 'foo' forward from 79513d0d7716
1306 1306
1307 1307 #if b2-pushkey
1308 1308 $ hg -R client push
1309 1309 pushing to ssh://user@dummy/server
1310 1310 searching for changes
1311 1311 no changes found
1312 1312 remote: pushkey-abort: prepushkey.no-bm-move hook exited with status 1
1313 1313 abort: updating bookmark foo failed!
1314 1314 [255]
1315 1315 #endif
1316 1316 #if b2-binary
1317 1317 $ hg -R client push
1318 1318 pushing to ssh://user@dummy/server
1319 1319 searching for changes
1320 1320 no changes found
1321 1321 remote: prepushkey.no-bm-move hook exited with status 1
1322 1322 abort: push failed on remote
1323 1323 [255]
1324 1324 #endif
1325 1325
1326 1326 -- test for pushing bookmarks pointing to secret changesets
1327 1327
1328 1328 Set up a "remote" repo
1329 1329 $ hg init issue6159remote
1330 1330 $ cd issue6159remote
1331 1331 $ echo a > a
1332 1332 $ hg add a
1333 1333 $ hg commit -m_
1334 1334 $ hg bookmark foo
1335 1335 $ cd ..
1336 1336
1337 1337 Clone a local repo
1338 1338 $ hg clone -q issue6159remote issue6159local
1339 1339 $ cd issue6159local
1340 1340 $ hg up -qr foo
1341 1341 $ echo b > b
1342 1342
1343 1343 Move the bookmark "foo" to point at a secret changeset
1344 1344 $ hg commit -qAm_ --config phases.new-commit=secret
1345 1345
1346 1346 Pushing the bookmark "foo" now fails as it contains a secret changeset
1347 #if b2-pushkey
1348 $ hg push -r foo
1349 pushing to $TESTTMP/issue6159remote
1350 searching for changes
1351 no changes found (ignored 1 secret changesets)
1352 abort: updating bookmark foo failed!
1353 [255]
1354 #endif
1355
1356 #if b2-binary
1357 1347 $ hg push -r foo
1358 1348 pushing to $TESTTMP/issue6159remote
1359 1349 searching for changes
1360 1350 no changes found (ignored 1 secret changesets)
1361 updating bookmark foo
1362 [1]
1363 #endif
1364
1365 Now the "remote" repo contains a bookmark pointing to a nonexistent revision
1366 $ cd ../issue6159remote
1367 #if b2-pushkey
1368 $ hg bookmark
1369 * foo 0:1599bc8b897a
1370 $ hg log -r 1599bc8b897a
1371 0:1599bc8b897a _ (no-eol)
1372 #endif
1373
1374 #if b2-binary
1375 $ hg bookmark
1376 no bookmarks set
1377 $ cat .hg/bookmarks
1378 cf489fd8a374cab73c2dc19e899bde6fe3a43f8f foo
1379 $ hg log -r cf489fd8a374
1380 abort: unknown revision 'cf489fd8a374'!
1351 abort: cannot push bookmark foo as it points to a secret changeset
1381 1352 [255]
1382 #endif
General Comments 0
You need to be logged in to leave comments. Login now