##// END OF EJS Templates
remotenames: rename related file and storage dir to logexchange...
Pulkit Goyal -
r35348:a29fe459 default
parent child Browse files
Show More
@@ -1,2214 +1,2214
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 lock as lockmod,
27 logexchange,
27 28 obsolete,
28 29 phases,
29 30 pushkey,
30 31 pycompat,
31 remotenames,
32 32 scmutil,
33 33 sslutil,
34 34 streamclone,
35 35 url as urlmod,
36 36 util,
37 37 )
38 38
39 39 urlerr = util.urlerr
40 40 urlreq = util.urlreq
41 41
42 42 # Maps bundle version human names to changegroup versions.
43 43 _bundlespeccgversions = {'v1': '01',
44 44 'v2': '02',
45 45 'packed1': 's1',
46 46 'bundle2': '02', #legacy
47 47 }
48 48
49 49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51 51
52 52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 53 """Parse a bundle string specification into parts.
54 54
55 55 Bundle specifications denote a well-defined bundle/exchange format.
56 56 The content of a given specification should not change over time in
57 57 order to ensure that bundles produced by a newer version of Mercurial are
58 58 readable from an older version.
59 59
60 60 The string currently has the form:
61 61
62 62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63 63
64 64 Where <compression> is one of the supported compression formats
65 65 and <type> is (currently) a version string. A ";" can follow the type and
66 66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 67 pairs.
68 68
69 69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 70 it is optional.
71 71
72 72 If ``externalnames`` is False (the default), the human-centric names will
73 73 be converted to their internal representation.
74 74
75 75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 76 be ``None`` if not in strict mode and a compression isn't defined.
77 77
78 78 An ``InvalidBundleSpecification`` is raised when the specification is
79 79 not syntactically well formed.
80 80
81 81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 82 bundle type/version is not recognized.
83 83
84 84 Note: this function will likely eventually return a more complex data
85 85 structure, including bundle2 part information.
86 86 """
87 87 def parseparams(s):
88 88 if ';' not in s:
89 89 return s, {}
90 90
91 91 params = {}
92 92 version, paramstr = s.split(';', 1)
93 93
94 94 for p in paramstr.split(';'):
95 95 if '=' not in p:
96 96 raise error.InvalidBundleSpecification(
97 97 _('invalid bundle specification: '
98 98 'missing "=" in parameter: %s') % p)
99 99
100 100 key, value = p.split('=', 1)
101 101 key = urlreq.unquote(key)
102 102 value = urlreq.unquote(value)
103 103 params[key] = value
104 104
105 105 return version, params
106 106
107 107
108 108 if strict and '-' not in spec:
109 109 raise error.InvalidBundleSpecification(
110 110 _('invalid bundle specification; '
111 111 'must be prefixed with compression: %s') % spec)
112 112
113 113 if '-' in spec:
114 114 compression, version = spec.split('-', 1)
115 115
116 116 if compression not in util.compengines.supportedbundlenames:
117 117 raise error.UnsupportedBundleSpecification(
118 118 _('%s compression is not supported') % compression)
119 119
120 120 version, params = parseparams(version)
121 121
122 122 if version not in _bundlespeccgversions:
123 123 raise error.UnsupportedBundleSpecification(
124 124 _('%s is not a recognized bundle version') % version)
125 125 else:
126 126 # Value could be just the compression or just the version, in which
127 127 # case some defaults are assumed (but only when not in strict mode).
128 128 assert not strict
129 129
130 130 spec, params = parseparams(spec)
131 131
132 132 if spec in util.compengines.supportedbundlenames:
133 133 compression = spec
134 134 version = 'v1'
135 135 # Generaldelta repos require v2.
136 136 if 'generaldelta' in repo.requirements:
137 137 version = 'v2'
138 138 # Modern compression engines require v2.
139 139 if compression not in _bundlespecv1compengines:
140 140 version = 'v2'
141 141 elif spec in _bundlespeccgversions:
142 142 if spec == 'packed1':
143 143 compression = 'none'
144 144 else:
145 145 compression = 'bzip2'
146 146 version = spec
147 147 else:
148 148 raise error.UnsupportedBundleSpecification(
149 149 _('%s is not a recognized bundle specification') % spec)
150 150
151 151 # Bundle version 1 only supports a known set of compression engines.
152 152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 153 raise error.UnsupportedBundleSpecification(
154 154 _('compression engine %s is not supported on v1 bundles') %
155 155 compression)
156 156
157 157 # The specification for packed1 can optionally declare the data formats
158 158 # required to apply it. If we see this metadata, compare against what the
159 159 # repo supports and error if the bundle isn't compatible.
160 160 if version == 'packed1' and 'requirements' in params:
161 161 requirements = set(params['requirements'].split(','))
162 162 missingreqs = requirements - repo.supportedformats
163 163 if missingreqs:
164 164 raise error.UnsupportedBundleSpecification(
165 165 _('missing support for repository features: %s') %
166 166 ', '.join(sorted(missingreqs)))
167 167
168 168 if not externalnames:
169 169 engine = util.compengines.forbundlename(compression)
170 170 compression = engine.bundletype()[1]
171 171 version = _bundlespeccgversions[version]
172 172 return compression, version, params
173 173
174 174 def readbundle(ui, fh, fname, vfs=None):
175 175 header = changegroup.readexactly(fh, 4)
176 176
177 177 alg = None
178 178 if not fname:
179 179 fname = "stream"
180 180 if not header.startswith('HG') and header.startswith('\0'):
181 181 fh = changegroup.headerlessfixup(fh, header)
182 182 header = "HG10"
183 183 alg = 'UN'
184 184 elif vfs:
185 185 fname = vfs.join(fname)
186 186
187 187 magic, version = header[0:2], header[2:4]
188 188
189 189 if magic != 'HG':
190 190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 191 if version == '10':
192 192 if alg is None:
193 193 alg = changegroup.readexactly(fh, 2)
194 194 return changegroup.cg1unpacker(fh, alg)
195 195 elif version.startswith('2'):
196 196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 197 elif version == 'S1':
198 198 return streamclone.streamcloneapplier(fh)
199 199 else:
200 200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201 201
202 202 def getbundlespec(ui, fh):
203 203 """Infer the bundlespec from a bundle file handle.
204 204
205 205 The input file handle is seeked and the original seek position is not
206 206 restored.
207 207 """
208 208 def speccompression(alg):
209 209 try:
210 210 return util.compengines.forbundletype(alg).bundletype()[0]
211 211 except KeyError:
212 212 return None
213 213
214 214 b = readbundle(ui, fh, None)
215 215 if isinstance(b, changegroup.cg1unpacker):
216 216 alg = b._type
217 217 if alg == '_truncatedBZ':
218 218 alg = 'BZ'
219 219 comp = speccompression(alg)
220 220 if not comp:
221 221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
222 222 return '%s-v1' % comp
223 223 elif isinstance(b, bundle2.unbundle20):
224 224 if 'Compression' in b.params:
225 225 comp = speccompression(b.params['Compression'])
226 226 if not comp:
227 227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
228 228 else:
229 229 comp = 'none'
230 230
231 231 version = None
232 232 for part in b.iterparts():
233 233 if part.type == 'changegroup':
234 234 version = part.params['version']
235 235 if version in ('01', '02'):
236 236 version = 'v2'
237 237 else:
238 238 raise error.Abort(_('changegroup version %s does not have '
239 239 'a known bundlespec') % version,
240 240 hint=_('try upgrading your Mercurial '
241 241 'client'))
242 242
243 243 if not version:
244 244 raise error.Abort(_('could not identify changegroup version in '
245 245 'bundle'))
246 246
247 247 return '%s-%s' % (comp, version)
248 248 elif isinstance(b, streamclone.streamcloneapplier):
249 249 requirements = streamclone.readbundle1header(fh)[2]
250 250 params = 'requirements=%s' % ','.join(sorted(requirements))
251 251 return 'none-packed1;%s' % urlreq.quote(params)
252 252 else:
253 253 raise error.Abort(_('unknown bundle type: %s') % b)
254 254
255 255 def _computeoutgoing(repo, heads, common):
256 256 """Computes which revs are outgoing given a set of common
257 257 and a set of heads.
258 258
259 259 This is a separate function so extensions can have access to
260 260 the logic.
261 261
262 262 Returns a discovery.outgoing object.
263 263 """
264 264 cl = repo.changelog
265 265 if common:
266 266 hasnode = cl.hasnode
267 267 common = [n for n in common if hasnode(n)]
268 268 else:
269 269 common = [nullid]
270 270 if not heads:
271 271 heads = cl.heads()
272 272 return discovery.outgoing(repo, common, heads)
273 273
274 274 def _forcebundle1(op):
275 275 """return true if a pull/push must use bundle1
276 276
277 277 This function is used to allow testing of the older bundle version"""
278 278 ui = op.repo.ui
279 279 forcebundle1 = False
280 280 # The goal is this config is to allow developer to choose the bundle
281 281 # version used during exchanged. This is especially handy during test.
282 282 # Value is a list of bundle version to be picked from, highest version
283 283 # should be used.
284 284 #
285 285 # developer config: devel.legacy.exchange
286 286 exchange = ui.configlist('devel', 'legacy.exchange')
287 287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 288 return forcebundle1 or not op.remote.capable('bundle2')
289 289
290 290 class pushoperation(object):
291 291 """A object that represent a single push operation
292 292
293 293 Its purpose is to carry push related state and very common operations.
294 294
295 295 A new pushoperation should be created at the beginning of each push and
296 296 discarded afterward.
297 297 """
298 298
299 299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 300 bookmarks=(), pushvars=None):
301 301 # repo we push from
302 302 self.repo = repo
303 303 self.ui = repo.ui
304 304 # repo we push to
305 305 self.remote = remote
306 306 # force option provided
307 307 self.force = force
308 308 # revs to be pushed (None is "all")
309 309 self.revs = revs
310 310 # bookmark explicitly pushed
311 311 self.bookmarks = bookmarks
312 312 # allow push of new branch
313 313 self.newbranch = newbranch
314 314 # step already performed
315 315 # (used to check what steps have been already performed through bundle2)
316 316 self.stepsdone = set()
317 317 # Integer version of the changegroup push result
318 318 # - None means nothing to push
319 319 # - 0 means HTTP error
320 320 # - 1 means we pushed and remote head count is unchanged *or*
321 321 # we have outgoing changesets but refused to push
322 322 # - other values as described by addchangegroup()
323 323 self.cgresult = None
324 324 # Boolean value for the bookmark push
325 325 self.bkresult = None
326 326 # discover.outgoing object (contains common and outgoing data)
327 327 self.outgoing = None
328 328 # all remote topological heads before the push
329 329 self.remoteheads = None
330 330 # Details of the remote branch pre and post push
331 331 #
332 332 # mapping: {'branch': ([remoteheads],
333 333 # [newheads],
334 334 # [unsyncedheads],
335 335 # [discardedheads])}
336 336 # - branch: the branch name
337 337 # - remoteheads: the list of remote heads known locally
338 338 # None if the branch is new
339 339 # - newheads: the new remote heads (known locally) with outgoing pushed
340 340 # - unsyncedheads: the list of remote heads unknown locally.
341 341 # - discardedheads: the list of remote heads made obsolete by the push
342 342 self.pushbranchmap = None
343 343 # testable as a boolean indicating if any nodes are missing locally.
344 344 self.incoming = None
345 345 # summary of the remote phase situation
346 346 self.remotephases = None
347 347 # phases changes that must be pushed along side the changesets
348 348 self.outdatedphases = None
349 349 # phases changes that must be pushed if changeset push fails
350 350 self.fallbackoutdatedphases = None
351 351 # outgoing obsmarkers
352 352 self.outobsmarkers = set()
353 353 # outgoing bookmarks
354 354 self.outbookmarks = []
355 355 # transaction manager
356 356 self.trmanager = None
357 357 # map { pushkey partid -> callback handling failure}
358 358 # used to handle exception from mandatory pushkey part failure
359 359 self.pkfailcb = {}
360 360 # an iterable of pushvars or None
361 361 self.pushvars = pushvars
362 362
363 363 @util.propertycache
364 364 def futureheads(self):
365 365 """future remote heads if the changeset push succeeds"""
366 366 return self.outgoing.missingheads
367 367
368 368 @util.propertycache
369 369 def fallbackheads(self):
370 370 """future remote heads if the changeset push fails"""
371 371 if self.revs is None:
372 372 # not target to push, all common are relevant
373 373 return self.outgoing.commonheads
374 374 unfi = self.repo.unfiltered()
375 375 # I want cheads = heads(::missingheads and ::commonheads)
376 376 # (missingheads is revs with secret changeset filtered out)
377 377 #
378 378 # This can be expressed as:
379 379 # cheads = ( (missingheads and ::commonheads)
380 380 # + (commonheads and ::missingheads))"
381 381 # )
382 382 #
383 383 # while trying to push we already computed the following:
384 384 # common = (::commonheads)
385 385 # missing = ((commonheads::missingheads) - commonheads)
386 386 #
387 387 # We can pick:
388 388 # * missingheads part of common (::commonheads)
389 389 common = self.outgoing.common
390 390 nm = self.repo.changelog.nodemap
391 391 cheads = [node for node in self.revs if nm[node] in common]
392 392 # and
393 393 # * commonheads parents on missing
394 394 revset = unfi.set('%ln and parents(roots(%ln))',
395 395 self.outgoing.commonheads,
396 396 self.outgoing.missing)
397 397 cheads.extend(c.node() for c in revset)
398 398 return cheads
399 399
400 400 @property
401 401 def commonheads(self):
402 402 """set of all common heads after changeset bundle push"""
403 403 if self.cgresult:
404 404 return self.futureheads
405 405 else:
406 406 return self.fallbackheads
407 407
408 408 # mapping of message used when pushing bookmark
409 409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
410 410 _('updating bookmark %s failed!\n')),
411 411 'export': (_("exporting bookmark %s\n"),
412 412 _('exporting bookmark %s failed!\n')),
413 413 'delete': (_("deleting remote bookmark %s\n"),
414 414 _('deleting remote bookmark %s failed!\n')),
415 415 }
416 416
417 417
418 418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
419 419 opargs=None):
420 420 '''Push outgoing changesets (limited by revs) from a local
421 421 repository to remote. Return an integer:
422 422 - None means nothing to push
423 423 - 0 means HTTP error
424 424 - 1 means we pushed and remote head count is unchanged *or*
425 425 we have outgoing changesets but refused to push
426 426 - other values as described by addchangegroup()
427 427 '''
428 428 if opargs is None:
429 429 opargs = {}
430 430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
431 431 **pycompat.strkwargs(opargs))
432 432 if pushop.remote.local():
433 433 missing = (set(pushop.repo.requirements)
434 434 - pushop.remote.local().supported)
435 435 if missing:
436 436 msg = _("required features are not"
437 437 " supported in the destination:"
438 438 " %s") % (', '.join(sorted(missing)))
439 439 raise error.Abort(msg)
440 440
441 441 if not pushop.remote.canpush():
442 442 raise error.Abort(_("destination does not support push"))
443 443
444 444 if not pushop.remote.capable('unbundle'):
445 445 raise error.Abort(_('cannot push: destination does not support the '
446 446 'unbundle wire protocol command'))
447 447
448 448 # get lock as we might write phase data
449 449 wlock = lock = None
450 450 try:
451 451 # bundle2 push may receive a reply bundle touching bookmarks or other
452 452 # things requiring the wlock. Take it now to ensure proper ordering.
453 453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
454 454 if (not _forcebundle1(pushop)) and maypushback:
455 455 wlock = pushop.repo.wlock()
456 456 lock = pushop.repo.lock()
457 457 pushop.trmanager = transactionmanager(pushop.repo,
458 458 'push-response',
459 459 pushop.remote.url())
460 460 except IOError as err:
461 461 if err.errno != errno.EACCES:
462 462 raise
463 463 # source repo cannot be locked.
464 464 # We do not abort the push, but just disable the local phase
465 465 # synchronisation.
466 466 msg = 'cannot lock source repository: %s\n' % err
467 467 pushop.ui.debug(msg)
468 468
469 469 with wlock or util.nullcontextmanager(), \
470 470 lock or util.nullcontextmanager(), \
471 471 pushop.trmanager or util.nullcontextmanager():
472 472 pushop.repo.checkpush(pushop)
473 473 _pushdiscovery(pushop)
474 474 if not _forcebundle1(pushop):
475 475 _pushbundle2(pushop)
476 476 _pushchangeset(pushop)
477 477 _pushsyncphase(pushop)
478 478 _pushobsolete(pushop)
479 479 _pushbookmark(pushop)
480 480
481 481 return pushop
482 482
483 483 # list of steps to perform discovery before push
484 484 pushdiscoveryorder = []
485 485
486 486 # Mapping between step name and function
487 487 #
488 488 # This exists to help extensions wrap steps if necessary
489 489 pushdiscoverymapping = {}
490 490
491 491 def pushdiscovery(stepname):
492 492 """decorator for function performing discovery before push
493 493
494 494 The function is added to the step -> function mapping and appended to the
495 495 list of steps. Beware that decorated function will be added in order (this
496 496 may matter).
497 497
498 498 You can only use this decorator for a new step, if you want to wrap a step
499 499 from an extension, change the pushdiscovery dictionary directly."""
500 500 def dec(func):
501 501 assert stepname not in pushdiscoverymapping
502 502 pushdiscoverymapping[stepname] = func
503 503 pushdiscoveryorder.append(stepname)
504 504 return func
505 505 return dec
506 506
507 507 def _pushdiscovery(pushop):
508 508 """Run all discovery steps"""
509 509 for stepname in pushdiscoveryorder:
510 510 step = pushdiscoverymapping[stepname]
511 511 step(pushop)
512 512
513 513 @pushdiscovery('changeset')
514 514 def _pushdiscoverychangeset(pushop):
515 515 """discover the changeset that need to be pushed"""
516 516 fci = discovery.findcommonincoming
517 517 if pushop.revs:
518 518 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
519 519 ancestorsof=pushop.revs)
520 520 else:
521 521 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
522 522 common, inc, remoteheads = commoninc
523 523 fco = discovery.findcommonoutgoing
524 524 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
525 525 commoninc=commoninc, force=pushop.force)
526 526 pushop.outgoing = outgoing
527 527 pushop.remoteheads = remoteheads
528 528 pushop.incoming = inc
529 529
530 530 @pushdiscovery('phase')
531 531 def _pushdiscoveryphase(pushop):
532 532 """discover the phase that needs to be pushed
533 533
534 534 (computed for both success and failure case for changesets push)"""
535 535 outgoing = pushop.outgoing
536 536 unfi = pushop.repo.unfiltered()
537 537 remotephases = pushop.remote.listkeys('phases')
538 538 if (pushop.ui.configbool('ui', '_usedassubrepo')
539 539 and remotephases # server supports phases
540 540 and not pushop.outgoing.missing # no changesets to be pushed
541 541 and remotephases.get('publishing', False)):
542 542 # When:
543 543 # - this is a subrepo push
544 544 # - and remote support phase
545 545 # - and no changeset are to be pushed
546 546 # - and remote is publishing
547 547 # We may be in issue 3781 case!
548 548 # We drop the possible phase synchronisation done by
549 549 # courtesy to publish changesets possibly locally draft
550 550 # on the remote.
551 551 pushop.outdatedphases = []
552 552 pushop.fallbackoutdatedphases = []
553 553 return
554 554
555 555 pushop.remotephases = phases.remotephasessummary(pushop.repo,
556 556 pushop.fallbackheads,
557 557 remotephases)
558 558 droots = pushop.remotephases.draftroots
559 559
560 560 extracond = ''
561 561 if not pushop.remotephases.publishing:
562 562 extracond = ' and public()'
563 563 revset = 'heads((%%ln::%%ln) %s)' % extracond
564 564 # Get the list of all revs draft on remote by public here.
565 565 # XXX Beware that revset break if droots is not strictly
566 566 # XXX root we may want to ensure it is but it is costly
567 567 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
568 568 if not outgoing.missing:
569 569 future = fallback
570 570 else:
571 571 # adds changeset we are going to push as draft
572 572 #
573 573 # should not be necessary for publishing server, but because of an
574 574 # issue fixed in xxxxx we have to do it anyway.
575 575 fdroots = list(unfi.set('roots(%ln + %ln::)',
576 576 outgoing.missing, droots))
577 577 fdroots = [f.node() for f in fdroots]
578 578 future = list(unfi.set(revset, fdroots, pushop.futureheads))
579 579 pushop.outdatedphases = future
580 580 pushop.fallbackoutdatedphases = fallback
581 581
582 582 @pushdiscovery('obsmarker')
583 583 def _pushdiscoveryobsmarkers(pushop):
584 584 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
585 585 and pushop.repo.obsstore
586 586 and 'obsolete' in pushop.remote.listkeys('namespaces')):
587 587 repo = pushop.repo
588 588 # very naive computation, that can be quite expensive on big repo.
589 589 # However: evolution is currently slow on them anyway.
590 590 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
591 591 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
592 592
593 593 @pushdiscovery('bookmarks')
594 594 def _pushdiscoverybookmarks(pushop):
595 595 ui = pushop.ui
596 596 repo = pushop.repo.unfiltered()
597 597 remote = pushop.remote
598 598 ui.debug("checking for updated bookmarks\n")
599 599 ancestors = ()
600 600 if pushop.revs:
601 601 revnums = map(repo.changelog.rev, pushop.revs)
602 602 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
603 603 remotebookmark = remote.listkeys('bookmarks')
604 604
605 605 explicit = set([repo._bookmarks.expandname(bookmark)
606 606 for bookmark in pushop.bookmarks])
607 607
608 608 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
609 609 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
610 610
611 611 def safehex(x):
612 612 if x is None:
613 613 return x
614 614 return hex(x)
615 615
616 616 def hexifycompbookmarks(bookmarks):
617 617 for b, scid, dcid in bookmarks:
618 618 yield b, safehex(scid), safehex(dcid)
619 619
620 620 comp = [hexifycompbookmarks(marks) for marks in comp]
621 621 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
622 622
623 623 for b, scid, dcid in advsrc:
624 624 if b in explicit:
625 625 explicit.remove(b)
626 626 if not ancestors or repo[scid].rev() in ancestors:
627 627 pushop.outbookmarks.append((b, dcid, scid))
628 628 # search added bookmark
629 629 for b, scid, dcid in addsrc:
630 630 if b in explicit:
631 631 explicit.remove(b)
632 632 pushop.outbookmarks.append((b, '', scid))
633 633 # search for overwritten bookmark
634 634 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
635 635 if b in explicit:
636 636 explicit.remove(b)
637 637 pushop.outbookmarks.append((b, dcid, scid))
638 638 # search for bookmark to delete
639 639 for b, scid, dcid in adddst:
640 640 if b in explicit:
641 641 explicit.remove(b)
642 642 # treat as "deleted locally"
643 643 pushop.outbookmarks.append((b, dcid, ''))
644 644 # identical bookmarks shouldn't get reported
645 645 for b, scid, dcid in same:
646 646 if b in explicit:
647 647 explicit.remove(b)
648 648
649 649 if explicit:
650 650 explicit = sorted(explicit)
651 651 # we should probably list all of them
652 652 ui.warn(_('bookmark %s does not exist on the local '
653 653 'or remote repository!\n') % explicit[0])
654 654 pushop.bkresult = 2
655 655
656 656 pushop.outbookmarks.sort()
657 657
658 658 def _pushcheckoutgoing(pushop):
659 659 outgoing = pushop.outgoing
660 660 unfi = pushop.repo.unfiltered()
661 661 if not outgoing.missing:
662 662 # nothing to push
663 663 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
664 664 return False
665 665 # something to push
666 666 if not pushop.force:
667 667 # if repo.obsstore == False --> no obsolete
668 668 # then, save the iteration
669 669 if unfi.obsstore:
670 670 # this message are here for 80 char limit reason
671 671 mso = _("push includes obsolete changeset: %s!")
672 672 mspd = _("push includes phase-divergent changeset: %s!")
673 673 mscd = _("push includes content-divergent changeset: %s!")
674 674 mst = {"orphan": _("push includes orphan changeset: %s!"),
675 675 "phase-divergent": mspd,
676 676 "content-divergent": mscd}
677 677 # If we are to push if there is at least one
678 678 # obsolete or unstable changeset in missing, at
679 679 # least one of the missinghead will be obsolete or
680 680 # unstable. So checking heads only is ok
681 681 for node in outgoing.missingheads:
682 682 ctx = unfi[node]
683 683 if ctx.obsolete():
684 684 raise error.Abort(mso % ctx)
685 685 elif ctx.isunstable():
686 686 # TODO print more than one instability in the abort
687 687 # message
688 688 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
689 689
690 690 discovery.checkheads(pushop)
691 691 return True
692 692
693 693 # List of names of steps to perform for an outgoing bundle2, order matters.
694 694 b2partsgenorder = []
695 695
696 696 # Mapping between step name and function
697 697 #
698 698 # This exists to help extensions wrap steps if necessary
699 699 b2partsgenmapping = {}
700 700
701 701 def b2partsgenerator(stepname, idx=None):
702 702 """decorator for function generating bundle2 part
703 703
704 704 The function is added to the step -> function mapping and appended to the
705 705 list of steps. Beware that decorated functions will be added in order
706 706 (this may matter).
707 707
708 708 You can only use this decorator for new steps, if you want to wrap a step
709 709 from an extension, attack the b2partsgenmapping dictionary directly."""
710 710 def dec(func):
711 711 assert stepname not in b2partsgenmapping
712 712 b2partsgenmapping[stepname] = func
713 713 if idx is None:
714 714 b2partsgenorder.append(stepname)
715 715 else:
716 716 b2partsgenorder.insert(idx, stepname)
717 717 return func
718 718 return dec
719 719
720 720 def _pushb2ctxcheckheads(pushop, bundler):
721 721 """Generate race condition checking parts
722 722
723 723 Exists as an independent function to aid extensions
724 724 """
725 725 # * 'force' do not check for push race,
726 726 # * if we don't push anything, there are nothing to check.
727 727 if not pushop.force and pushop.outgoing.missingheads:
728 728 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
729 729 emptyremote = pushop.pushbranchmap is None
730 730 if not allowunrelated or emptyremote:
731 731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
732 732 else:
733 733 affected = set()
734 734 for branch, heads in pushop.pushbranchmap.iteritems():
735 735 remoteheads, newheads, unsyncedheads, discardedheads = heads
736 736 if remoteheads is not None:
737 737 remote = set(remoteheads)
738 738 affected |= set(discardedheads) & remote
739 739 affected |= remote - set(newheads)
740 740 if affected:
741 741 data = iter(sorted(affected))
742 742 bundler.newpart('check:updated-heads', data=data)
743 743
744 744 def _pushing(pushop):
745 745 """return True if we are pushing anything"""
746 746 return bool(pushop.outgoing.missing
747 747 or pushop.outdatedphases
748 748 or pushop.outobsmarkers
749 749 or pushop.outbookmarks)
750 750
751 751 @b2partsgenerator('check-bookmarks')
752 752 def _pushb2checkbookmarks(pushop, bundler):
753 753 """insert bookmark move checking"""
754 754 if not _pushing(pushop) or pushop.force:
755 755 return
756 756 b2caps = bundle2.bundle2caps(pushop.remote)
757 757 hasbookmarkcheck = 'bookmarks' in b2caps
758 758 if not (pushop.outbookmarks and hasbookmarkcheck):
759 759 return
760 760 data = []
761 761 for book, old, new in pushop.outbookmarks:
762 762 old = bin(old)
763 763 data.append((book, old))
764 764 checkdata = bookmod.binaryencode(data)
765 765 bundler.newpart('check:bookmarks', data=checkdata)
766 766
767 767 @b2partsgenerator('check-phases')
768 768 def _pushb2checkphases(pushop, bundler):
769 769 """insert phase move checking"""
770 770 if not _pushing(pushop) or pushop.force:
771 771 return
772 772 b2caps = bundle2.bundle2caps(pushop.remote)
773 773 hasphaseheads = 'heads' in b2caps.get('phases', ())
774 774 if pushop.remotephases is not None and hasphaseheads:
775 775 # check that the remote phase has not changed
776 776 checks = [[] for p in phases.allphases]
777 777 checks[phases.public].extend(pushop.remotephases.publicheads)
778 778 checks[phases.draft].extend(pushop.remotephases.draftroots)
779 779 if any(checks):
780 780 for nodes in checks:
781 781 nodes.sort()
782 782 checkdata = phases.binaryencode(checks)
783 783 bundler.newpart('check:phases', data=checkdata)
784 784
785 785 @b2partsgenerator('changeset')
786 786 def _pushb2ctx(pushop, bundler):
787 787 """handle changegroup push through bundle2
788 788
789 789 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
790 790 """
791 791 if 'changesets' in pushop.stepsdone:
792 792 return
793 793 pushop.stepsdone.add('changesets')
794 794 # Send known heads to the server for race detection.
795 795 if not _pushcheckoutgoing(pushop):
796 796 return
797 797 pushop.repo.prepushoutgoinghooks(pushop)
798 798
799 799 _pushb2ctxcheckheads(pushop, bundler)
800 800
801 801 b2caps = bundle2.bundle2caps(pushop.remote)
802 802 version = '01'
803 803 cgversions = b2caps.get('changegroup')
804 804 if cgversions: # 3.1 and 3.2 ship with an empty value
805 805 cgversions = [v for v in cgversions
806 806 if v in changegroup.supportedoutgoingversions(
807 807 pushop.repo)]
808 808 if not cgversions:
809 809 raise ValueError(_('no common changegroup version'))
810 810 version = max(cgversions)
811 811 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
812 812 'push')
813 813 cgpart = bundler.newpart('changegroup', data=cgstream)
814 814 if cgversions:
815 815 cgpart.addparam('version', version)
816 816 if 'treemanifest' in pushop.repo.requirements:
817 817 cgpart.addparam('treemanifest', '1')
818 818 def handlereply(op):
819 819 """extract addchangegroup returns from server reply"""
820 820 cgreplies = op.records.getreplies(cgpart.id)
821 821 assert len(cgreplies['changegroup']) == 1
822 822 pushop.cgresult = cgreplies['changegroup'][0]['return']
823 823 return handlereply
824 824
825 825 @b2partsgenerator('phase')
826 826 def _pushb2phases(pushop, bundler):
827 827 """handle phase push through bundle2"""
828 828 if 'phases' in pushop.stepsdone:
829 829 return
830 830 b2caps = bundle2.bundle2caps(pushop.remote)
831 831 ui = pushop.repo.ui
832 832
833 833 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
834 834 haspushkey = 'pushkey' in b2caps
835 835 hasphaseheads = 'heads' in b2caps.get('phases', ())
836 836
837 837 if hasphaseheads and not legacyphase:
838 838 return _pushb2phaseheads(pushop, bundler)
839 839 elif haspushkey:
840 840 return _pushb2phasespushkey(pushop, bundler)
841 841
842 842 def _pushb2phaseheads(pushop, bundler):
843 843 """push phase information through a bundle2 - binary part"""
844 844 pushop.stepsdone.add('phases')
845 845 if pushop.outdatedphases:
846 846 updates = [[] for p in phases.allphases]
847 847 updates[0].extend(h.node() for h in pushop.outdatedphases)
848 848 phasedata = phases.binaryencode(updates)
849 849 bundler.newpart('phase-heads', data=phasedata)
850 850
851 851 def _pushb2phasespushkey(pushop, bundler):
852 852 """push phase information through a bundle2 - pushkey part"""
853 853 pushop.stepsdone.add('phases')
854 854 part2node = []
855 855
856 856 def handlefailure(pushop, exc):
857 857 targetid = int(exc.partid)
858 858 for partid, node in part2node:
859 859 if partid == targetid:
860 860 raise error.Abort(_('updating %s to public failed') % node)
861 861
862 862 enc = pushkey.encode
863 863 for newremotehead in pushop.outdatedphases:
864 864 part = bundler.newpart('pushkey')
865 865 part.addparam('namespace', enc('phases'))
866 866 part.addparam('key', enc(newremotehead.hex()))
867 867 part.addparam('old', enc('%d' % phases.draft))
868 868 part.addparam('new', enc('%d' % phases.public))
869 869 part2node.append((part.id, newremotehead))
870 870 pushop.pkfailcb[part.id] = handlefailure
871 871
872 872 def handlereply(op):
873 873 for partid, node in part2node:
874 874 partrep = op.records.getreplies(partid)
875 875 results = partrep['pushkey']
876 876 assert len(results) <= 1
877 877 msg = None
878 878 if not results:
879 879 msg = _('server ignored update of %s to public!\n') % node
880 880 elif not int(results[0]['return']):
881 881 msg = _('updating %s to public failed!\n') % node
882 882 if msg is not None:
883 883 pushop.ui.warn(msg)
884 884 return handlereply
885 885
886 886 @b2partsgenerator('obsmarkers')
887 887 def _pushb2obsmarkers(pushop, bundler):
888 888 if 'obsmarkers' in pushop.stepsdone:
889 889 return
890 890 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
891 891 if obsolete.commonversion(remoteversions) is None:
892 892 return
893 893 pushop.stepsdone.add('obsmarkers')
894 894 if pushop.outobsmarkers:
895 895 markers = sorted(pushop.outobsmarkers)
896 896 bundle2.buildobsmarkerspart(bundler, markers)
897 897
898 898 @b2partsgenerator('bookmarks')
899 899 def _pushb2bookmarks(pushop, bundler):
900 900 """handle bookmark push through bundle2"""
901 901 if 'bookmarks' in pushop.stepsdone:
902 902 return
903 903 b2caps = bundle2.bundle2caps(pushop.remote)
904 904
905 905 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
906 906 legacybooks = 'bookmarks' in legacy
907 907
908 908 if not legacybooks and 'bookmarks' in b2caps:
909 909 return _pushb2bookmarkspart(pushop, bundler)
910 910 elif 'pushkey' in b2caps:
911 911 return _pushb2bookmarkspushkey(pushop, bundler)
912 912
913 913 def _bmaction(old, new):
914 914 """small utility for bookmark pushing"""
915 915 if not old:
916 916 return 'export'
917 917 elif not new:
918 918 return 'delete'
919 919 return 'update'
920 920
921 921 def _pushb2bookmarkspart(pushop, bundler):
922 922 pushop.stepsdone.add('bookmarks')
923 923 if not pushop.outbookmarks:
924 924 return
925 925
926 926 allactions = []
927 927 data = []
928 928 for book, old, new in pushop.outbookmarks:
929 929 new = bin(new)
930 930 data.append((book, new))
931 931 allactions.append((book, _bmaction(old, new)))
932 932 checkdata = bookmod.binaryencode(data)
933 933 bundler.newpart('bookmarks', data=checkdata)
934 934
935 935 def handlereply(op):
936 936 ui = pushop.ui
937 937 # if success
938 938 for book, action in allactions:
939 939 ui.status(bookmsgmap[action][0] % book)
940 940
941 941 return handlereply
942 942
943 943 def _pushb2bookmarkspushkey(pushop, bundler):
944 944 pushop.stepsdone.add('bookmarks')
945 945 part2book = []
946 946 enc = pushkey.encode
947 947
948 948 def handlefailure(pushop, exc):
949 949 targetid = int(exc.partid)
950 950 for partid, book, action in part2book:
951 951 if partid == targetid:
952 952 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
953 953 # we should not be called for part we did not generated
954 954 assert False
955 955
956 956 for book, old, new in pushop.outbookmarks:
957 957 part = bundler.newpart('pushkey')
958 958 part.addparam('namespace', enc('bookmarks'))
959 959 part.addparam('key', enc(book))
960 960 part.addparam('old', enc(old))
961 961 part.addparam('new', enc(new))
962 962 action = 'update'
963 963 if not old:
964 964 action = 'export'
965 965 elif not new:
966 966 action = 'delete'
967 967 part2book.append((part.id, book, action))
968 968 pushop.pkfailcb[part.id] = handlefailure
969 969
970 970 def handlereply(op):
971 971 ui = pushop.ui
972 972 for partid, book, action in part2book:
973 973 partrep = op.records.getreplies(partid)
974 974 results = partrep['pushkey']
975 975 assert len(results) <= 1
976 976 if not results:
977 977 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
978 978 else:
979 979 ret = int(results[0]['return'])
980 980 if ret:
981 981 ui.status(bookmsgmap[action][0] % book)
982 982 else:
983 983 ui.warn(bookmsgmap[action][1] % book)
984 984 if pushop.bkresult is not None:
985 985 pushop.bkresult = 1
986 986 return handlereply
987 987
988 988 @b2partsgenerator('pushvars', idx=0)
989 989 def _getbundlesendvars(pushop, bundler):
990 990 '''send shellvars via bundle2'''
991 991 pushvars = pushop.pushvars
992 992 if pushvars:
993 993 shellvars = {}
994 994 for raw in pushvars:
995 995 if '=' not in raw:
996 996 msg = ("unable to parse variable '%s', should follow "
997 997 "'KEY=VALUE' or 'KEY=' format")
998 998 raise error.Abort(msg % raw)
999 999 k, v = raw.split('=', 1)
1000 1000 shellvars[k] = v
1001 1001
1002 1002 part = bundler.newpart('pushvars')
1003 1003
1004 1004 for key, value in shellvars.iteritems():
1005 1005 part.addparam(key, value, mandatory=False)
1006 1006
1007 1007 def _pushbundle2(pushop):
1008 1008 """push data to the remote using bundle2
1009 1009
1010 1010 The only currently supported type of data is changegroup but this will
1011 1011 evolve in the future."""
1012 1012 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1013 1013 pushback = (pushop.trmanager
1014 1014 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1015 1015
1016 1016 # create reply capability
1017 1017 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1018 1018 allowpushback=pushback))
1019 1019 bundler.newpart('replycaps', data=capsblob)
1020 1020 replyhandlers = []
1021 1021 for partgenname in b2partsgenorder:
1022 1022 partgen = b2partsgenmapping[partgenname]
1023 1023 ret = partgen(pushop, bundler)
1024 1024 if callable(ret):
1025 1025 replyhandlers.append(ret)
1026 1026 # do not push if nothing to push
1027 1027 if bundler.nbparts <= 1:
1028 1028 return
1029 1029 stream = util.chunkbuffer(bundler.getchunks())
1030 1030 try:
1031 1031 try:
1032 1032 reply = pushop.remote.unbundle(
1033 1033 stream, ['force'], pushop.remote.url())
1034 1034 except error.BundleValueError as exc:
1035 1035 raise error.Abort(_('missing support for %s') % exc)
1036 1036 try:
1037 1037 trgetter = None
1038 1038 if pushback:
1039 1039 trgetter = pushop.trmanager.transaction
1040 1040 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1041 1041 except error.BundleValueError as exc:
1042 1042 raise error.Abort(_('missing support for %s') % exc)
1043 1043 except bundle2.AbortFromPart as exc:
1044 1044 pushop.ui.status(_('remote: %s\n') % exc)
1045 1045 if exc.hint is not None:
1046 1046 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1047 1047 raise error.Abort(_('push failed on remote'))
1048 1048 except error.PushkeyFailed as exc:
1049 1049 partid = int(exc.partid)
1050 1050 if partid not in pushop.pkfailcb:
1051 1051 raise
1052 1052 pushop.pkfailcb[partid](pushop, exc)
1053 1053 for rephand in replyhandlers:
1054 1054 rephand(op)
1055 1055
1056 1056 def _pushchangeset(pushop):
1057 1057 """Make the actual push of changeset bundle to remote repo"""
1058 1058 if 'changesets' in pushop.stepsdone:
1059 1059 return
1060 1060 pushop.stepsdone.add('changesets')
1061 1061 if not _pushcheckoutgoing(pushop):
1062 1062 return
1063 1063
1064 1064 # Should have verified this in push().
1065 1065 assert pushop.remote.capable('unbundle')
1066 1066
1067 1067 pushop.repo.prepushoutgoinghooks(pushop)
1068 1068 outgoing = pushop.outgoing
1069 1069 # TODO: get bundlecaps from remote
1070 1070 bundlecaps = None
1071 1071 # create a changegroup from local
1072 1072 if pushop.revs is None and not (outgoing.excluded
1073 1073 or pushop.repo.changelog.filteredrevs):
1074 1074 # push everything,
1075 1075 # use the fast path, no race possible on push
1076 1076 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1077 1077 fastpath=True, bundlecaps=bundlecaps)
1078 1078 else:
1079 1079 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1080 1080 'push', bundlecaps=bundlecaps)
1081 1081
1082 1082 # apply changegroup to remote
1083 1083 # local repo finds heads on server, finds out what
1084 1084 # revs it must push. once revs transferred, if server
1085 1085 # finds it has different heads (someone else won
1086 1086 # commit/push race), server aborts.
1087 1087 if pushop.force:
1088 1088 remoteheads = ['force']
1089 1089 else:
1090 1090 remoteheads = pushop.remoteheads
1091 1091 # ssh: return remote's addchangegroup()
1092 1092 # http: return remote's addchangegroup() or 0 for error
1093 1093 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1094 1094 pushop.repo.url())
1095 1095
1096 1096 def _pushsyncphase(pushop):
1097 1097 """synchronise phase information locally and remotely"""
1098 1098 cheads = pushop.commonheads
1099 1099 # even when we don't push, exchanging phase data is useful
1100 1100 remotephases = pushop.remote.listkeys('phases')
1101 1101 if (pushop.ui.configbool('ui', '_usedassubrepo')
1102 1102 and remotephases # server supports phases
1103 1103 and pushop.cgresult is None # nothing was pushed
1104 1104 and remotephases.get('publishing', False)):
1105 1105 # When:
1106 1106 # - this is a subrepo push
1107 1107 # - and remote support phase
1108 1108 # - and no changeset was pushed
1109 1109 # - and remote is publishing
1110 1110 # We may be in issue 3871 case!
1111 1111 # We drop the possible phase synchronisation done by
1112 1112 # courtesy to publish changesets possibly locally draft
1113 1113 # on the remote.
1114 1114 remotephases = {'publishing': 'True'}
1115 1115 if not remotephases: # old server or public only reply from non-publishing
1116 1116 _localphasemove(pushop, cheads)
1117 1117 # don't push any phase data as there is nothing to push
1118 1118 else:
1119 1119 ana = phases.analyzeremotephases(pushop.repo, cheads,
1120 1120 remotephases)
1121 1121 pheads, droots = ana
1122 1122 ### Apply remote phase on local
1123 1123 if remotephases.get('publishing', False):
1124 1124 _localphasemove(pushop, cheads)
1125 1125 else: # publish = False
1126 1126 _localphasemove(pushop, pheads)
1127 1127 _localphasemove(pushop, cheads, phases.draft)
1128 1128 ### Apply local phase on remote
1129 1129
1130 1130 if pushop.cgresult:
1131 1131 if 'phases' in pushop.stepsdone:
1132 1132 # phases already pushed though bundle2
1133 1133 return
1134 1134 outdated = pushop.outdatedphases
1135 1135 else:
1136 1136 outdated = pushop.fallbackoutdatedphases
1137 1137
1138 1138 pushop.stepsdone.add('phases')
1139 1139
1140 1140 # filter heads already turned public by the push
1141 1141 outdated = [c for c in outdated if c.node() not in pheads]
1142 1142 # fallback to independent pushkey command
1143 1143 for newremotehead in outdated:
1144 1144 r = pushop.remote.pushkey('phases',
1145 1145 newremotehead.hex(),
1146 1146 str(phases.draft),
1147 1147 str(phases.public))
1148 1148 if not r:
1149 1149 pushop.ui.warn(_('updating %s to public failed!\n')
1150 1150 % newremotehead)
1151 1151
1152 1152 def _localphasemove(pushop, nodes, phase=phases.public):
1153 1153 """move <nodes> to <phase> in the local source repo"""
1154 1154 if pushop.trmanager:
1155 1155 phases.advanceboundary(pushop.repo,
1156 1156 pushop.trmanager.transaction(),
1157 1157 phase,
1158 1158 nodes)
1159 1159 else:
1160 1160 # repo is not locked, do not change any phases!
1161 1161 # Informs the user that phases should have been moved when
1162 1162 # applicable.
1163 1163 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1164 1164 phasestr = phases.phasenames[phase]
1165 1165 if actualmoves:
1166 1166 pushop.ui.status(_('cannot lock source repo, skipping '
1167 1167 'local %s phase update\n') % phasestr)
1168 1168
1169 1169 def _pushobsolete(pushop):
1170 1170 """utility function to push obsolete markers to a remote"""
1171 1171 if 'obsmarkers' in pushop.stepsdone:
1172 1172 return
1173 1173 repo = pushop.repo
1174 1174 remote = pushop.remote
1175 1175 pushop.stepsdone.add('obsmarkers')
1176 1176 if pushop.outobsmarkers:
1177 1177 pushop.ui.debug('try to push obsolete markers to remote\n')
1178 1178 rslts = []
1179 1179 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1180 1180 for key in sorted(remotedata, reverse=True):
1181 1181 # reverse sort to ensure we end with dump0
1182 1182 data = remotedata[key]
1183 1183 rslts.append(remote.pushkey('obsolete', key, '', data))
1184 1184 if [r for r in rslts if not r]:
1185 1185 msg = _('failed to push some obsolete markers!\n')
1186 1186 repo.ui.warn(msg)
1187 1187
1188 1188 def _pushbookmark(pushop):
1189 1189 """Update bookmark position on remote"""
1190 1190 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1191 1191 return
1192 1192 pushop.stepsdone.add('bookmarks')
1193 1193 ui = pushop.ui
1194 1194 remote = pushop.remote
1195 1195
1196 1196 for b, old, new in pushop.outbookmarks:
1197 1197 action = 'update'
1198 1198 if not old:
1199 1199 action = 'export'
1200 1200 elif not new:
1201 1201 action = 'delete'
1202 1202 if remote.pushkey('bookmarks', b, old, new):
1203 1203 ui.status(bookmsgmap[action][0] % b)
1204 1204 else:
1205 1205 ui.warn(bookmsgmap[action][1] % b)
1206 1206 # discovery can have set the value form invalid entry
1207 1207 if pushop.bkresult is not None:
1208 1208 pushop.bkresult = 1
1209 1209
1210 1210 class pulloperation(object):
1211 1211 """A object that represent a single pull operation
1212 1212
1213 1213 It purpose is to carry pull related state and very common operation.
1214 1214
1215 1215 A new should be created at the beginning of each pull and discarded
1216 1216 afterward.
1217 1217 """
1218 1218
1219 1219 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1220 1220 remotebookmarks=None, streamclonerequested=None):
1221 1221 # repo we pull into
1222 1222 self.repo = repo
1223 1223 # repo we pull from
1224 1224 self.remote = remote
1225 1225 # revision we try to pull (None is "all")
1226 1226 self.heads = heads
1227 1227 # bookmark pulled explicitly
1228 1228 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1229 1229 for bookmark in bookmarks]
1230 1230 # do we force pull?
1231 1231 self.force = force
1232 1232 # whether a streaming clone was requested
1233 1233 self.streamclonerequested = streamclonerequested
1234 1234 # transaction manager
1235 1235 self.trmanager = None
1236 1236 # set of common changeset between local and remote before pull
1237 1237 self.common = None
1238 1238 # set of pulled head
1239 1239 self.rheads = None
1240 1240 # list of missing changeset to fetch remotely
1241 1241 self.fetch = None
1242 1242 # remote bookmarks data
1243 1243 self.remotebookmarks = remotebookmarks
1244 1244 # result of changegroup pulling (used as return code by pull)
1245 1245 self.cgresult = None
1246 1246 # list of step already done
1247 1247 self.stepsdone = set()
1248 1248 # Whether we attempted a clone from pre-generated bundles.
1249 1249 self.clonebundleattempted = False
1250 1250
1251 1251 @util.propertycache
1252 1252 def pulledsubset(self):
1253 1253 """heads of the set of changeset target by the pull"""
1254 1254 # compute target subset
1255 1255 if self.heads is None:
1256 1256 # We pulled every thing possible
1257 1257 # sync on everything common
1258 1258 c = set(self.common)
1259 1259 ret = list(self.common)
1260 1260 for n in self.rheads:
1261 1261 if n not in c:
1262 1262 ret.append(n)
1263 1263 return ret
1264 1264 else:
1265 1265 # We pulled a specific subset
1266 1266 # sync on this subset
1267 1267 return self.heads
1268 1268
1269 1269 @util.propertycache
1270 1270 def canusebundle2(self):
1271 1271 return not _forcebundle1(self)
1272 1272
1273 1273 @util.propertycache
1274 1274 def remotebundle2caps(self):
1275 1275 return bundle2.bundle2caps(self.remote)
1276 1276
1277 1277 def gettransaction(self):
1278 1278 # deprecated; talk to trmanager directly
1279 1279 return self.trmanager.transaction()
1280 1280
1281 1281 class transactionmanager(util.transactional):
1282 1282 """An object to manage the life cycle of a transaction
1283 1283
1284 1284 It creates the transaction on demand and calls the appropriate hooks when
1285 1285 closing the transaction."""
1286 1286 def __init__(self, repo, source, url):
1287 1287 self.repo = repo
1288 1288 self.source = source
1289 1289 self.url = url
1290 1290 self._tr = None
1291 1291
1292 1292 def transaction(self):
1293 1293 """Return an open transaction object, constructing if necessary"""
1294 1294 if not self._tr:
1295 1295 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1296 1296 self._tr = self.repo.transaction(trname)
1297 1297 self._tr.hookargs['source'] = self.source
1298 1298 self._tr.hookargs['url'] = self.url
1299 1299 return self._tr
1300 1300
1301 1301 def close(self):
1302 1302 """close transaction if created"""
1303 1303 if self._tr is not None:
1304 1304 self._tr.close()
1305 1305
1306 1306 def release(self):
1307 1307 """release transaction if created"""
1308 1308 if self._tr is not None:
1309 1309 self._tr.release()
1310 1310
1311 1311 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1312 1312 streamclonerequested=None):
1313 1313 """Fetch repository data from a remote.
1314 1314
1315 1315 This is the main function used to retrieve data from a remote repository.
1316 1316
1317 1317 ``repo`` is the local repository to clone into.
1318 1318 ``remote`` is a peer instance.
1319 1319 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1320 1320 default) means to pull everything from the remote.
1321 1321 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1322 1322 default, all remote bookmarks are pulled.
1323 1323 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1324 1324 initialization.
1325 1325 ``streamclonerequested`` is a boolean indicating whether a "streaming
1326 1326 clone" is requested. A "streaming clone" is essentially a raw file copy
1327 1327 of revlogs from the server. This only works when the local repository is
1328 1328 empty. The default value of ``None`` means to respect the server
1329 1329 configuration for preferring stream clones.
1330 1330
1331 1331 Returns the ``pulloperation`` created for this pull.
1332 1332 """
1333 1333 if opargs is None:
1334 1334 opargs = {}
1335 1335 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1336 1336 streamclonerequested=streamclonerequested, **opargs)
1337 1337
1338 1338 peerlocal = pullop.remote.local()
1339 1339 if peerlocal:
1340 1340 missing = set(peerlocal.requirements) - pullop.repo.supported
1341 1341 if missing:
1342 1342 msg = _("required features are not"
1343 1343 " supported in the destination:"
1344 1344 " %s") % (', '.join(sorted(missing)))
1345 1345 raise error.Abort(msg)
1346 1346
1347 1347 wlock = lock = None
1348 1348 try:
1349 1349 wlock = pullop.repo.wlock()
1350 1350 lock = pullop.repo.lock()
1351 1351 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1352 1352 # This should ideally be in _pullbundle2(). However, it needs to run
1353 1353 # before discovery to avoid extra work.
1354 1354 _maybeapplyclonebundle(pullop)
1355 1355 streamclone.maybeperformlegacystreamclone(pullop)
1356 1356 _pulldiscovery(pullop)
1357 1357 if pullop.canusebundle2:
1358 1358 _pullbundle2(pullop)
1359 1359 _pullchangeset(pullop)
1360 1360 _pullphase(pullop)
1361 1361 _pullbookmarks(pullop)
1362 1362 _pullobsolete(pullop)
1363 1363 pullop.trmanager.close()
1364 1364 finally:
1365 1365 lockmod.release(pullop.trmanager, lock, wlock)
1366 1366
1367 1367 # storing remotenames
1368 1368 if repo.ui.configbool('experimental', 'remotenames'):
1369 remotenames.pullremotenames(repo, remote)
1369 logexchange.pullremotenames(repo, remote)
1370 1370
1371 1371 return pullop
1372 1372
1373 1373 # list of steps to perform discovery before pull
1374 1374 pulldiscoveryorder = []
1375 1375
1376 1376 # Mapping between step name and function
1377 1377 #
1378 1378 # This exists to help extensions wrap steps if necessary
1379 1379 pulldiscoverymapping = {}
1380 1380
1381 1381 def pulldiscovery(stepname):
1382 1382 """decorator for function performing discovery before pull
1383 1383
1384 1384 The function is added to the step -> function mapping and appended to the
1385 1385 list of steps. Beware that decorated function will be added in order (this
1386 1386 may matter).
1387 1387
1388 1388 You can only use this decorator for a new step, if you want to wrap a step
1389 1389 from an extension, change the pulldiscovery dictionary directly."""
1390 1390 def dec(func):
1391 1391 assert stepname not in pulldiscoverymapping
1392 1392 pulldiscoverymapping[stepname] = func
1393 1393 pulldiscoveryorder.append(stepname)
1394 1394 return func
1395 1395 return dec
1396 1396
1397 1397 def _pulldiscovery(pullop):
1398 1398 """Run all discovery steps"""
1399 1399 for stepname in pulldiscoveryorder:
1400 1400 step = pulldiscoverymapping[stepname]
1401 1401 step(pullop)
1402 1402
1403 1403 @pulldiscovery('b1:bookmarks')
1404 1404 def _pullbookmarkbundle1(pullop):
1405 1405 """fetch bookmark data in bundle1 case
1406 1406
1407 1407 If not using bundle2, we have to fetch bookmarks before changeset
1408 1408 discovery to reduce the chance and impact of race conditions."""
1409 1409 if pullop.remotebookmarks is not None:
1410 1410 return
1411 1411 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1412 1412 # all known bundle2 servers now support listkeys, but lets be nice with
1413 1413 # new implementation.
1414 1414 return
1415 1415 books = pullop.remote.listkeys('bookmarks')
1416 1416 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1417 1417
1418 1418
1419 1419 @pulldiscovery('changegroup')
1420 1420 def _pulldiscoverychangegroup(pullop):
1421 1421 """discovery phase for the pull
1422 1422
1423 1423 Current handle changeset discovery only, will change handle all discovery
1424 1424 at some point."""
1425 1425 tmp = discovery.findcommonincoming(pullop.repo,
1426 1426 pullop.remote,
1427 1427 heads=pullop.heads,
1428 1428 force=pullop.force)
1429 1429 common, fetch, rheads = tmp
1430 1430 nm = pullop.repo.unfiltered().changelog.nodemap
1431 1431 if fetch and rheads:
1432 1432 # If a remote heads is filtered locally, put in back in common.
1433 1433 #
1434 1434 # This is a hackish solution to catch most of "common but locally
1435 1435 # hidden situation". We do not performs discovery on unfiltered
1436 1436 # repository because it end up doing a pathological amount of round
1437 1437 # trip for w huge amount of changeset we do not care about.
1438 1438 #
1439 1439 # If a set of such "common but filtered" changeset exist on the server
1440 1440 # but are not including a remote heads, we'll not be able to detect it,
1441 1441 scommon = set(common)
1442 1442 for n in rheads:
1443 1443 if n in nm:
1444 1444 if n not in scommon:
1445 1445 common.append(n)
1446 1446 if set(rheads).issubset(set(common)):
1447 1447 fetch = []
1448 1448 pullop.common = common
1449 1449 pullop.fetch = fetch
1450 1450 pullop.rheads = rheads
1451 1451
1452 1452 def _pullbundle2(pullop):
1453 1453 """pull data using bundle2
1454 1454
1455 1455 For now, the only supported data are changegroup."""
1456 1456 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1457 1457
1458 1458 # At the moment we don't do stream clones over bundle2. If that is
1459 1459 # implemented then here's where the check for that will go.
1460 1460 streaming = False
1461 1461
1462 1462 # pulling changegroup
1463 1463 pullop.stepsdone.add('changegroup')
1464 1464
1465 1465 kwargs['common'] = pullop.common
1466 1466 kwargs['heads'] = pullop.heads or pullop.rheads
1467 1467 kwargs['cg'] = pullop.fetch
1468 1468
1469 1469 ui = pullop.repo.ui
1470 1470 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1471 1471 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1472 1472 if (not legacyphase and hasbinaryphase):
1473 1473 kwargs['phases'] = True
1474 1474 pullop.stepsdone.add('phases')
1475 1475
1476 1476 bookmarksrequested = False
1477 1477 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1478 1478 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1479 1479
1480 1480 if pullop.remotebookmarks is not None:
1481 1481 pullop.stepsdone.add('request-bookmarks')
1482 1482
1483 1483 if ('request-bookmarks' not in pullop.stepsdone
1484 1484 and pullop.remotebookmarks is None
1485 1485 and not legacybookmark and hasbinarybook):
1486 1486 kwargs['bookmarks'] = True
1487 1487 bookmarksrequested = True
1488 1488
1489 1489 if 'listkeys' in pullop.remotebundle2caps:
1490 1490 if 'phases' not in pullop.stepsdone:
1491 1491 kwargs['listkeys'] = ['phases']
1492 1492 if 'request-bookmarks' not in pullop.stepsdone:
1493 1493 # make sure to always includes bookmark data when migrating
1494 1494 # `hg incoming --bundle` to using this function.
1495 1495 pullop.stepsdone.add('request-bookmarks')
1496 1496 kwargs.setdefault('listkeys', []).append('bookmarks')
1497 1497
1498 1498 # If this is a full pull / clone and the server supports the clone bundles
1499 1499 # feature, tell the server whether we attempted a clone bundle. The
1500 1500 # presence of this flag indicates the client supports clone bundles. This
1501 1501 # will enable the server to treat clients that support clone bundles
1502 1502 # differently from those that don't.
1503 1503 if (pullop.remote.capable('clonebundles')
1504 1504 and pullop.heads is None and list(pullop.common) == [nullid]):
1505 1505 kwargs['cbattempted'] = pullop.clonebundleattempted
1506 1506
1507 1507 if streaming:
1508 1508 pullop.repo.ui.status(_('streaming all changes\n'))
1509 1509 elif not pullop.fetch:
1510 1510 pullop.repo.ui.status(_("no changes found\n"))
1511 1511 pullop.cgresult = 0
1512 1512 else:
1513 1513 if pullop.heads is None and list(pullop.common) == [nullid]:
1514 1514 pullop.repo.ui.status(_("requesting all changes\n"))
1515 1515 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1516 1516 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1517 1517 if obsolete.commonversion(remoteversions) is not None:
1518 1518 kwargs['obsmarkers'] = True
1519 1519 pullop.stepsdone.add('obsmarkers')
1520 1520 _pullbundle2extraprepare(pullop, kwargs)
1521 1521 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1522 1522 try:
1523 1523 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1524 1524 op.modes['bookmarks'] = 'records'
1525 1525 bundle2.processbundle(pullop.repo, bundle, op=op)
1526 1526 except bundle2.AbortFromPart as exc:
1527 1527 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1528 1528 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1529 1529 except error.BundleValueError as exc:
1530 1530 raise error.Abort(_('missing support for %s') % exc)
1531 1531
1532 1532 if pullop.fetch:
1533 1533 pullop.cgresult = bundle2.combinechangegroupresults(op)
1534 1534
1535 1535 # processing phases change
1536 1536 for namespace, value in op.records['listkeys']:
1537 1537 if namespace == 'phases':
1538 1538 _pullapplyphases(pullop, value)
1539 1539
1540 1540 # processing bookmark update
1541 1541 if bookmarksrequested:
1542 1542 books = {}
1543 1543 for record in op.records['bookmarks']:
1544 1544 books[record['bookmark']] = record["node"]
1545 1545 pullop.remotebookmarks = books
1546 1546 else:
1547 1547 for namespace, value in op.records['listkeys']:
1548 1548 if namespace == 'bookmarks':
1549 1549 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1550 1550
1551 1551 # bookmark data were either already there or pulled in the bundle
1552 1552 if pullop.remotebookmarks is not None:
1553 1553 _pullbookmarks(pullop)
1554 1554
1555 1555 def _pullbundle2extraprepare(pullop, kwargs):
1556 1556 """hook function so that extensions can extend the getbundle call"""
1557 1557
1558 1558 def _pullchangeset(pullop):
1559 1559 """pull changeset from unbundle into the local repo"""
1560 1560 # We delay the open of the transaction as late as possible so we
1561 1561 # don't open transaction for nothing or you break future useful
1562 1562 # rollback call
1563 1563 if 'changegroup' in pullop.stepsdone:
1564 1564 return
1565 1565 pullop.stepsdone.add('changegroup')
1566 1566 if not pullop.fetch:
1567 1567 pullop.repo.ui.status(_("no changes found\n"))
1568 1568 pullop.cgresult = 0
1569 1569 return
1570 1570 tr = pullop.gettransaction()
1571 1571 if pullop.heads is None and list(pullop.common) == [nullid]:
1572 1572 pullop.repo.ui.status(_("requesting all changes\n"))
1573 1573 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1574 1574 # issue1320, avoid a race if remote changed after discovery
1575 1575 pullop.heads = pullop.rheads
1576 1576
1577 1577 if pullop.remote.capable('getbundle'):
1578 1578 # TODO: get bundlecaps from remote
1579 1579 cg = pullop.remote.getbundle('pull', common=pullop.common,
1580 1580 heads=pullop.heads or pullop.rheads)
1581 1581 elif pullop.heads is None:
1582 1582 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1583 1583 elif not pullop.remote.capable('changegroupsubset'):
1584 1584 raise error.Abort(_("partial pull cannot be done because "
1585 1585 "other repository doesn't support "
1586 1586 "changegroupsubset."))
1587 1587 else:
1588 1588 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1589 1589 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1590 1590 pullop.remote.url())
1591 1591 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1592 1592
1593 1593 def _pullphase(pullop):
1594 1594 # Get remote phases data from remote
1595 1595 if 'phases' in pullop.stepsdone:
1596 1596 return
1597 1597 remotephases = pullop.remote.listkeys('phases')
1598 1598 _pullapplyphases(pullop, remotephases)
1599 1599
1600 1600 def _pullapplyphases(pullop, remotephases):
1601 1601 """apply phase movement from observed remote state"""
1602 1602 if 'phases' in pullop.stepsdone:
1603 1603 return
1604 1604 pullop.stepsdone.add('phases')
1605 1605 publishing = bool(remotephases.get('publishing', False))
1606 1606 if remotephases and not publishing:
1607 1607 # remote is new and non-publishing
1608 1608 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1609 1609 pullop.pulledsubset,
1610 1610 remotephases)
1611 1611 dheads = pullop.pulledsubset
1612 1612 else:
1613 1613 # Remote is old or publishing all common changesets
1614 1614 # should be seen as public
1615 1615 pheads = pullop.pulledsubset
1616 1616 dheads = []
1617 1617 unfi = pullop.repo.unfiltered()
1618 1618 phase = unfi._phasecache.phase
1619 1619 rev = unfi.changelog.nodemap.get
1620 1620 public = phases.public
1621 1621 draft = phases.draft
1622 1622
1623 1623 # exclude changesets already public locally and update the others
1624 1624 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1625 1625 if pheads:
1626 1626 tr = pullop.gettransaction()
1627 1627 phases.advanceboundary(pullop.repo, tr, public, pheads)
1628 1628
1629 1629 # exclude changesets already draft locally and update the others
1630 1630 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1631 1631 if dheads:
1632 1632 tr = pullop.gettransaction()
1633 1633 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1634 1634
1635 1635 def _pullbookmarks(pullop):
1636 1636 """process the remote bookmark information to update the local one"""
1637 1637 if 'bookmarks' in pullop.stepsdone:
1638 1638 return
1639 1639 pullop.stepsdone.add('bookmarks')
1640 1640 repo = pullop.repo
1641 1641 remotebookmarks = pullop.remotebookmarks
1642 1642 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1643 1643 pullop.remote.url(),
1644 1644 pullop.gettransaction,
1645 1645 explicit=pullop.explicitbookmarks)
1646 1646
1647 1647 def _pullobsolete(pullop):
1648 1648 """utility function to pull obsolete markers from a remote
1649 1649
1650 1650 The `gettransaction` is function that return the pull transaction, creating
1651 1651 one if necessary. We return the transaction to inform the calling code that
1652 1652 a new transaction have been created (when applicable).
1653 1653
1654 1654 Exists mostly to allow overriding for experimentation purpose"""
1655 1655 if 'obsmarkers' in pullop.stepsdone:
1656 1656 return
1657 1657 pullop.stepsdone.add('obsmarkers')
1658 1658 tr = None
1659 1659 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1660 1660 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1661 1661 remoteobs = pullop.remote.listkeys('obsolete')
1662 1662 if 'dump0' in remoteobs:
1663 1663 tr = pullop.gettransaction()
1664 1664 markers = []
1665 1665 for key in sorted(remoteobs, reverse=True):
1666 1666 if key.startswith('dump'):
1667 1667 data = util.b85decode(remoteobs[key])
1668 1668 version, newmarks = obsolete._readmarkers(data)
1669 1669 markers += newmarks
1670 1670 if markers:
1671 1671 pullop.repo.obsstore.add(tr, markers)
1672 1672 pullop.repo.invalidatevolatilesets()
1673 1673 return tr
1674 1674
1675 1675 def caps20to10(repo):
1676 1676 """return a set with appropriate options to use bundle20 during getbundle"""
1677 1677 caps = {'HG20'}
1678 1678 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1679 1679 caps.add('bundle2=' + urlreq.quote(capsblob))
1680 1680 return caps
1681 1681
1682 1682 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1683 1683 getbundle2partsorder = []
1684 1684
1685 1685 # Mapping between step name and function
1686 1686 #
1687 1687 # This exists to help extensions wrap steps if necessary
1688 1688 getbundle2partsmapping = {}
1689 1689
1690 1690 def getbundle2partsgenerator(stepname, idx=None):
1691 1691 """decorator for function generating bundle2 part for getbundle
1692 1692
1693 1693 The function is added to the step -> function mapping and appended to the
1694 1694 list of steps. Beware that decorated functions will be added in order
1695 1695 (this may matter).
1696 1696
1697 1697 You can only use this decorator for new steps, if you want to wrap a step
1698 1698 from an extension, attack the getbundle2partsmapping dictionary directly."""
1699 1699 def dec(func):
1700 1700 assert stepname not in getbundle2partsmapping
1701 1701 getbundle2partsmapping[stepname] = func
1702 1702 if idx is None:
1703 1703 getbundle2partsorder.append(stepname)
1704 1704 else:
1705 1705 getbundle2partsorder.insert(idx, stepname)
1706 1706 return func
1707 1707 return dec
1708 1708
1709 1709 def bundle2requested(bundlecaps):
1710 1710 if bundlecaps is not None:
1711 1711 return any(cap.startswith('HG2') for cap in bundlecaps)
1712 1712 return False
1713 1713
1714 1714 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1715 1715 **kwargs):
1716 1716 """Return chunks constituting a bundle's raw data.
1717 1717
1718 1718 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1719 1719 passed.
1720 1720
1721 1721 Returns an iterator over raw chunks (of varying sizes).
1722 1722 """
1723 1723 kwargs = pycompat.byteskwargs(kwargs)
1724 1724 usebundle2 = bundle2requested(bundlecaps)
1725 1725 # bundle10 case
1726 1726 if not usebundle2:
1727 1727 if bundlecaps and not kwargs.get('cg', True):
1728 1728 raise ValueError(_('request for bundle10 must include changegroup'))
1729 1729
1730 1730 if kwargs:
1731 1731 raise ValueError(_('unsupported getbundle arguments: %s')
1732 1732 % ', '.join(sorted(kwargs.keys())))
1733 1733 outgoing = _computeoutgoing(repo, heads, common)
1734 1734 return changegroup.makestream(repo, outgoing, '01', source,
1735 1735 bundlecaps=bundlecaps)
1736 1736
1737 1737 # bundle20 case
1738 1738 b2caps = {}
1739 1739 for bcaps in bundlecaps:
1740 1740 if bcaps.startswith('bundle2='):
1741 1741 blob = urlreq.unquote(bcaps[len('bundle2='):])
1742 1742 b2caps.update(bundle2.decodecaps(blob))
1743 1743 bundler = bundle2.bundle20(repo.ui, b2caps)
1744 1744
1745 1745 kwargs['heads'] = heads
1746 1746 kwargs['common'] = common
1747 1747
1748 1748 for name in getbundle2partsorder:
1749 1749 func = getbundle2partsmapping[name]
1750 1750 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1751 1751 **pycompat.strkwargs(kwargs))
1752 1752
1753 1753 return bundler.getchunks()
1754 1754
1755 1755 @getbundle2partsgenerator('changegroup')
1756 1756 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1757 1757 b2caps=None, heads=None, common=None, **kwargs):
1758 1758 """add a changegroup part to the requested bundle"""
1759 1759 cgstream = None
1760 1760 if kwargs.get('cg', True):
1761 1761 # build changegroup bundle here.
1762 1762 version = '01'
1763 1763 cgversions = b2caps.get('changegroup')
1764 1764 if cgversions: # 3.1 and 3.2 ship with an empty value
1765 1765 cgversions = [v for v in cgversions
1766 1766 if v in changegroup.supportedoutgoingversions(repo)]
1767 1767 if not cgversions:
1768 1768 raise ValueError(_('no common changegroup version'))
1769 1769 version = max(cgversions)
1770 1770 outgoing = _computeoutgoing(repo, heads, common)
1771 1771 if outgoing.missing:
1772 1772 cgstream = changegroup.makestream(repo, outgoing, version, source,
1773 1773 bundlecaps=bundlecaps)
1774 1774
1775 1775 if cgstream:
1776 1776 part = bundler.newpart('changegroup', data=cgstream)
1777 1777 if cgversions:
1778 1778 part.addparam('version', version)
1779 1779 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1780 1780 mandatory=False)
1781 1781 if 'treemanifest' in repo.requirements:
1782 1782 part.addparam('treemanifest', '1')
1783 1783
1784 1784 @getbundle2partsgenerator('bookmarks')
1785 1785 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1786 1786 b2caps=None, **kwargs):
1787 1787 """add a bookmark part to the requested bundle"""
1788 1788 if not kwargs.get('bookmarks', False):
1789 1789 return
1790 1790 if 'bookmarks' not in b2caps:
1791 1791 raise ValueError(_('no common bookmarks exchange method'))
1792 1792 books = bookmod.listbinbookmarks(repo)
1793 1793 data = bookmod.binaryencode(books)
1794 1794 if data:
1795 1795 bundler.newpart('bookmarks', data=data)
1796 1796
1797 1797 @getbundle2partsgenerator('listkeys')
1798 1798 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1799 1799 b2caps=None, **kwargs):
1800 1800 """add parts containing listkeys namespaces to the requested bundle"""
1801 1801 listkeys = kwargs.get('listkeys', ())
1802 1802 for namespace in listkeys:
1803 1803 part = bundler.newpart('listkeys')
1804 1804 part.addparam('namespace', namespace)
1805 1805 keys = repo.listkeys(namespace).items()
1806 1806 part.data = pushkey.encodekeys(keys)
1807 1807
1808 1808 @getbundle2partsgenerator('obsmarkers')
1809 1809 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1810 1810 b2caps=None, heads=None, **kwargs):
1811 1811 """add an obsolescence markers part to the requested bundle"""
1812 1812 if kwargs.get('obsmarkers', False):
1813 1813 if heads is None:
1814 1814 heads = repo.heads()
1815 1815 subset = [c.node() for c in repo.set('::%ln', heads)]
1816 1816 markers = repo.obsstore.relevantmarkers(subset)
1817 1817 markers = sorted(markers)
1818 1818 bundle2.buildobsmarkerspart(bundler, markers)
1819 1819
1820 1820 @getbundle2partsgenerator('phases')
1821 1821 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1822 1822 b2caps=None, heads=None, **kwargs):
1823 1823 """add phase heads part to the requested bundle"""
1824 1824 if kwargs.get('phases', False):
1825 1825 if not 'heads' in b2caps.get('phases'):
1826 1826 raise ValueError(_('no common phases exchange method'))
1827 1827 if heads is None:
1828 1828 heads = repo.heads()
1829 1829
1830 1830 headsbyphase = collections.defaultdict(set)
1831 1831 if repo.publishing():
1832 1832 headsbyphase[phases.public] = heads
1833 1833 else:
1834 1834 # find the appropriate heads to move
1835 1835
1836 1836 phase = repo._phasecache.phase
1837 1837 node = repo.changelog.node
1838 1838 rev = repo.changelog.rev
1839 1839 for h in heads:
1840 1840 headsbyphase[phase(repo, rev(h))].add(h)
1841 1841 seenphases = list(headsbyphase.keys())
1842 1842
1843 1843 # We do not handle anything but public and draft phase for now)
1844 1844 if seenphases:
1845 1845 assert max(seenphases) <= phases.draft
1846 1846
1847 1847 # if client is pulling non-public changesets, we need to find
1848 1848 # intermediate public heads.
1849 1849 draftheads = headsbyphase.get(phases.draft, set())
1850 1850 if draftheads:
1851 1851 publicheads = headsbyphase.get(phases.public, set())
1852 1852
1853 1853 revset = 'heads(only(%ln, %ln) and public())'
1854 1854 extraheads = repo.revs(revset, draftheads, publicheads)
1855 1855 for r in extraheads:
1856 1856 headsbyphase[phases.public].add(node(r))
1857 1857
1858 1858 # transform data in a format used by the encoding function
1859 1859 phasemapping = []
1860 1860 for phase in phases.allphases:
1861 1861 phasemapping.append(sorted(headsbyphase[phase]))
1862 1862
1863 1863 # generate the actual part
1864 1864 phasedata = phases.binaryencode(phasemapping)
1865 1865 bundler.newpart('phase-heads', data=phasedata)
1866 1866
1867 1867 @getbundle2partsgenerator('hgtagsfnodes')
1868 1868 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1869 1869 b2caps=None, heads=None, common=None,
1870 1870 **kwargs):
1871 1871 """Transfer the .hgtags filenodes mapping.
1872 1872
1873 1873 Only values for heads in this bundle will be transferred.
1874 1874
1875 1875 The part data consists of pairs of 20 byte changeset node and .hgtags
1876 1876 filenodes raw values.
1877 1877 """
1878 1878 # Don't send unless:
1879 1879 # - changeset are being exchanged,
1880 1880 # - the client supports it.
1881 1881 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1882 1882 return
1883 1883
1884 1884 outgoing = _computeoutgoing(repo, heads, common)
1885 1885 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1886 1886
1887 1887 def check_heads(repo, their_heads, context):
1888 1888 """check if the heads of a repo have been modified
1889 1889
1890 1890 Used by peer for unbundling.
1891 1891 """
1892 1892 heads = repo.heads()
1893 1893 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1894 1894 if not (their_heads == ['force'] or their_heads == heads or
1895 1895 their_heads == ['hashed', heads_hash]):
1896 1896 # someone else committed/pushed/unbundled while we
1897 1897 # were transferring data
1898 1898 raise error.PushRaced('repository changed while %s - '
1899 1899 'please try again' % context)
1900 1900
1901 1901 def unbundle(repo, cg, heads, source, url):
1902 1902 """Apply a bundle to a repo.
1903 1903
1904 1904 this function makes sure the repo is locked during the application and have
1905 1905 mechanism to check that no push race occurred between the creation of the
1906 1906 bundle and its application.
1907 1907
1908 1908 If the push was raced as PushRaced exception is raised."""
1909 1909 r = 0
1910 1910 # need a transaction when processing a bundle2 stream
1911 1911 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1912 1912 lockandtr = [None, None, None]
1913 1913 recordout = None
1914 1914 # quick fix for output mismatch with bundle2 in 3.4
1915 1915 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1916 1916 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1917 1917 captureoutput = True
1918 1918 try:
1919 1919 # note: outside bundle1, 'heads' is expected to be empty and this
1920 1920 # 'check_heads' call wil be a no-op
1921 1921 check_heads(repo, heads, 'uploading changes')
1922 1922 # push can proceed
1923 1923 if not isinstance(cg, bundle2.unbundle20):
1924 1924 # legacy case: bundle1 (changegroup 01)
1925 1925 txnname = "\n".join([source, util.hidepassword(url)])
1926 1926 with repo.lock(), repo.transaction(txnname) as tr:
1927 1927 op = bundle2.applybundle(repo, cg, tr, source, url)
1928 1928 r = bundle2.combinechangegroupresults(op)
1929 1929 else:
1930 1930 r = None
1931 1931 try:
1932 1932 def gettransaction():
1933 1933 if not lockandtr[2]:
1934 1934 lockandtr[0] = repo.wlock()
1935 1935 lockandtr[1] = repo.lock()
1936 1936 lockandtr[2] = repo.transaction(source)
1937 1937 lockandtr[2].hookargs['source'] = source
1938 1938 lockandtr[2].hookargs['url'] = url
1939 1939 lockandtr[2].hookargs['bundle2'] = '1'
1940 1940 return lockandtr[2]
1941 1941
1942 1942 # Do greedy locking by default until we're satisfied with lazy
1943 1943 # locking.
1944 1944 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1945 1945 gettransaction()
1946 1946
1947 1947 op = bundle2.bundleoperation(repo, gettransaction,
1948 1948 captureoutput=captureoutput)
1949 1949 try:
1950 1950 op = bundle2.processbundle(repo, cg, op=op)
1951 1951 finally:
1952 1952 r = op.reply
1953 1953 if captureoutput and r is not None:
1954 1954 repo.ui.pushbuffer(error=True, subproc=True)
1955 1955 def recordout(output):
1956 1956 r.newpart('output', data=output, mandatory=False)
1957 1957 if lockandtr[2] is not None:
1958 1958 lockandtr[2].close()
1959 1959 except BaseException as exc:
1960 1960 exc.duringunbundle2 = True
1961 1961 if captureoutput and r is not None:
1962 1962 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1963 1963 def recordout(output):
1964 1964 part = bundle2.bundlepart('output', data=output,
1965 1965 mandatory=False)
1966 1966 parts.append(part)
1967 1967 raise
1968 1968 finally:
1969 1969 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1970 1970 if recordout is not None:
1971 1971 recordout(repo.ui.popbuffer())
1972 1972 return r
1973 1973
1974 1974 def _maybeapplyclonebundle(pullop):
1975 1975 """Apply a clone bundle from a remote, if possible."""
1976 1976
1977 1977 repo = pullop.repo
1978 1978 remote = pullop.remote
1979 1979
1980 1980 if not repo.ui.configbool('ui', 'clonebundles'):
1981 1981 return
1982 1982
1983 1983 # Only run if local repo is empty.
1984 1984 if len(repo):
1985 1985 return
1986 1986
1987 1987 if pullop.heads:
1988 1988 return
1989 1989
1990 1990 if not remote.capable('clonebundles'):
1991 1991 return
1992 1992
1993 1993 res = remote._call('clonebundles')
1994 1994
1995 1995 # If we call the wire protocol command, that's good enough to record the
1996 1996 # attempt.
1997 1997 pullop.clonebundleattempted = True
1998 1998
1999 1999 entries = parseclonebundlesmanifest(repo, res)
2000 2000 if not entries:
2001 2001 repo.ui.note(_('no clone bundles available on remote; '
2002 2002 'falling back to regular clone\n'))
2003 2003 return
2004 2004
2005 2005 entries = filterclonebundleentries(
2006 2006 repo, entries, streamclonerequested=pullop.streamclonerequested)
2007 2007
2008 2008 if not entries:
2009 2009 # There is a thundering herd concern here. However, if a server
2010 2010 # operator doesn't advertise bundles appropriate for its clients,
2011 2011 # they deserve what's coming. Furthermore, from a client's
2012 2012 # perspective, no automatic fallback would mean not being able to
2013 2013 # clone!
2014 2014 repo.ui.warn(_('no compatible clone bundles available on server; '
2015 2015 'falling back to regular clone\n'))
2016 2016 repo.ui.warn(_('(you may want to report this to the server '
2017 2017 'operator)\n'))
2018 2018 return
2019 2019
2020 2020 entries = sortclonebundleentries(repo.ui, entries)
2021 2021
2022 2022 url = entries[0]['URL']
2023 2023 repo.ui.status(_('applying clone bundle from %s\n') % url)
2024 2024 if trypullbundlefromurl(repo.ui, repo, url):
2025 2025 repo.ui.status(_('finished applying clone bundle\n'))
2026 2026 # Bundle failed.
2027 2027 #
2028 2028 # We abort by default to avoid the thundering herd of
2029 2029 # clients flooding a server that was expecting expensive
2030 2030 # clone load to be offloaded.
2031 2031 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2032 2032 repo.ui.warn(_('falling back to normal clone\n'))
2033 2033 else:
2034 2034 raise error.Abort(_('error applying bundle'),
2035 2035 hint=_('if this error persists, consider contacting '
2036 2036 'the server operator or disable clone '
2037 2037 'bundles via '
2038 2038 '"--config ui.clonebundles=false"'))
2039 2039
2040 2040 def parseclonebundlesmanifest(repo, s):
2041 2041 """Parses the raw text of a clone bundles manifest.
2042 2042
2043 2043 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2044 2044 to the URL and other keys are the attributes for the entry.
2045 2045 """
2046 2046 m = []
2047 2047 for line in s.splitlines():
2048 2048 fields = line.split()
2049 2049 if not fields:
2050 2050 continue
2051 2051 attrs = {'URL': fields[0]}
2052 2052 for rawattr in fields[1:]:
2053 2053 key, value = rawattr.split('=', 1)
2054 2054 key = urlreq.unquote(key)
2055 2055 value = urlreq.unquote(value)
2056 2056 attrs[key] = value
2057 2057
2058 2058 # Parse BUNDLESPEC into components. This makes client-side
2059 2059 # preferences easier to specify since you can prefer a single
2060 2060 # component of the BUNDLESPEC.
2061 2061 if key == 'BUNDLESPEC':
2062 2062 try:
2063 2063 comp, version, params = parsebundlespec(repo, value,
2064 2064 externalnames=True)
2065 2065 attrs['COMPRESSION'] = comp
2066 2066 attrs['VERSION'] = version
2067 2067 except error.InvalidBundleSpecification:
2068 2068 pass
2069 2069 except error.UnsupportedBundleSpecification:
2070 2070 pass
2071 2071
2072 2072 m.append(attrs)
2073 2073
2074 2074 return m
2075 2075
2076 2076 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2077 2077 """Remove incompatible clone bundle manifest entries.
2078 2078
2079 2079 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2080 2080 and returns a new list consisting of only the entries that this client
2081 2081 should be able to apply.
2082 2082
2083 2083 There is no guarantee we'll be able to apply all returned entries because
2084 2084 the metadata we use to filter on may be missing or wrong.
2085 2085 """
2086 2086 newentries = []
2087 2087 for entry in entries:
2088 2088 spec = entry.get('BUNDLESPEC')
2089 2089 if spec:
2090 2090 try:
2091 2091 comp, version, params = parsebundlespec(repo, spec, strict=True)
2092 2092
2093 2093 # If a stream clone was requested, filter out non-streamclone
2094 2094 # entries.
2095 2095 if streamclonerequested and (comp != 'UN' or version != 's1'):
2096 2096 repo.ui.debug('filtering %s because not a stream clone\n' %
2097 2097 entry['URL'])
2098 2098 continue
2099 2099
2100 2100 except error.InvalidBundleSpecification as e:
2101 2101 repo.ui.debug(str(e) + '\n')
2102 2102 continue
2103 2103 except error.UnsupportedBundleSpecification as e:
2104 2104 repo.ui.debug('filtering %s because unsupported bundle '
2105 2105 'spec: %s\n' % (entry['URL'], str(e)))
2106 2106 continue
2107 2107 # If we don't have a spec and requested a stream clone, we don't know
2108 2108 # what the entry is so don't attempt to apply it.
2109 2109 elif streamclonerequested:
2110 2110 repo.ui.debug('filtering %s because cannot determine if a stream '
2111 2111 'clone bundle\n' % entry['URL'])
2112 2112 continue
2113 2113
2114 2114 if 'REQUIRESNI' in entry and not sslutil.hassni:
2115 2115 repo.ui.debug('filtering %s because SNI not supported\n' %
2116 2116 entry['URL'])
2117 2117 continue
2118 2118
2119 2119 newentries.append(entry)
2120 2120
2121 2121 return newentries
2122 2122
2123 2123 class clonebundleentry(object):
2124 2124 """Represents an item in a clone bundles manifest.
2125 2125
2126 2126 This rich class is needed to support sorting since sorted() in Python 3
2127 2127 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2128 2128 won't work.
2129 2129 """
2130 2130
2131 2131 def __init__(self, value, prefers):
2132 2132 self.value = value
2133 2133 self.prefers = prefers
2134 2134
2135 2135 def _cmp(self, other):
2136 2136 for prefkey, prefvalue in self.prefers:
2137 2137 avalue = self.value.get(prefkey)
2138 2138 bvalue = other.value.get(prefkey)
2139 2139
2140 2140 # Special case for b missing attribute and a matches exactly.
2141 2141 if avalue is not None and bvalue is None and avalue == prefvalue:
2142 2142 return -1
2143 2143
2144 2144 # Special case for a missing attribute and b matches exactly.
2145 2145 if bvalue is not None and avalue is None and bvalue == prefvalue:
2146 2146 return 1
2147 2147
2148 2148 # We can't compare unless attribute present on both.
2149 2149 if avalue is None or bvalue is None:
2150 2150 continue
2151 2151
2152 2152 # Same values should fall back to next attribute.
2153 2153 if avalue == bvalue:
2154 2154 continue
2155 2155
2156 2156 # Exact matches come first.
2157 2157 if avalue == prefvalue:
2158 2158 return -1
2159 2159 if bvalue == prefvalue:
2160 2160 return 1
2161 2161
2162 2162 # Fall back to next attribute.
2163 2163 continue
2164 2164
2165 2165 # If we got here we couldn't sort by attributes and prefers. Fall
2166 2166 # back to index order.
2167 2167 return 0
2168 2168
2169 2169 def __lt__(self, other):
2170 2170 return self._cmp(other) < 0
2171 2171
2172 2172 def __gt__(self, other):
2173 2173 return self._cmp(other) > 0
2174 2174
2175 2175 def __eq__(self, other):
2176 2176 return self._cmp(other) == 0
2177 2177
2178 2178 def __le__(self, other):
2179 2179 return self._cmp(other) <= 0
2180 2180
2181 2181 def __ge__(self, other):
2182 2182 return self._cmp(other) >= 0
2183 2183
2184 2184 def __ne__(self, other):
2185 2185 return self._cmp(other) != 0
2186 2186
2187 2187 def sortclonebundleentries(ui, entries):
2188 2188 prefers = ui.configlist('ui', 'clonebundleprefers')
2189 2189 if not prefers:
2190 2190 return list(entries)
2191 2191
2192 2192 prefers = [p.split('=', 1) for p in prefers]
2193 2193
2194 2194 items = sorted(clonebundleentry(v, prefers) for v in entries)
2195 2195 return [i.value for i in items]
2196 2196
2197 2197 def trypullbundlefromurl(ui, repo, url):
2198 2198 """Attempt to apply a bundle from a URL."""
2199 2199 with repo.lock(), repo.transaction('bundleurl') as tr:
2200 2200 try:
2201 2201 fh = urlmod.open(ui, url)
2202 2202 cg = readbundle(ui, fh, 'stream')
2203 2203
2204 2204 if isinstance(cg, streamclone.streamcloneapplier):
2205 2205 cg.apply(repo)
2206 2206 else:
2207 2207 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2208 2208 return True
2209 2209 except urlerr.httperror as e:
2210 2210 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2211 2211 except urlerr.urlerror as e:
2212 2212 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2213 2213
2214 2214 return False
@@ -1,1107 +1,1107
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18
19 19 from . import (
20 20 bookmarks,
21 21 bundlerepo,
22 22 cmdutil,
23 23 destutil,
24 24 discovery,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 httppeer,
29 29 localrepo,
30 30 lock,
31 logexchange,
31 32 merge as mergemod,
32 33 node,
33 34 phases,
34 remotenames,
35 35 repoview,
36 36 scmutil,
37 37 sshpeer,
38 38 statichttprepo,
39 39 ui as uimod,
40 40 unionrepo,
41 41 url,
42 42 util,
43 43 verify as verifymod,
44 44 vfs as vfsmod,
45 45 )
46 46
47 47 release = lock.release
48 48
49 49 # shared features
50 50 sharedbookmarks = 'bookmarks'
51 51
52 52 def _local(path):
53 53 path = util.expandpath(util.urllocalpath(path))
54 54 return (os.path.isfile(path) and bundlerepo or localrepo)
55 55
56 56 def addbranchrevs(lrepo, other, branches, revs):
57 57 peer = other.peer() # a courtesy to callers using a localrepo for other
58 58 hashbranch, branches = branches
59 59 if not hashbranch and not branches:
60 60 x = revs or None
61 61 if util.safehasattr(revs, 'first'):
62 62 y = revs.first()
63 63 elif revs:
64 64 y = revs[0]
65 65 else:
66 66 y = None
67 67 return x, y
68 68 if revs:
69 69 revs = list(revs)
70 70 else:
71 71 revs = []
72 72
73 73 if not peer.capable('branchmap'):
74 74 if branches:
75 75 raise error.Abort(_("remote branch lookup not supported"))
76 76 revs.append(hashbranch)
77 77 return revs, revs[0]
78 78 branchmap = peer.branchmap()
79 79
80 80 def primary(branch):
81 81 if branch == '.':
82 82 if not lrepo:
83 83 raise error.Abort(_("dirstate branch not accessible"))
84 84 branch = lrepo.dirstate.branch()
85 85 if branch in branchmap:
86 86 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
87 87 return True
88 88 else:
89 89 return False
90 90
91 91 for branch in branches:
92 92 if not primary(branch):
93 93 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
94 94 if hashbranch:
95 95 if not primary(hashbranch):
96 96 revs.append(hashbranch)
97 97 return revs, revs[0]
98 98
99 99 def parseurl(path, branches=None):
100 100 '''parse url#branch, returning (url, (branch, branches))'''
101 101
102 102 u = util.url(path)
103 103 branch = None
104 104 if u.fragment:
105 105 branch = u.fragment
106 106 u.fragment = None
107 107 return bytes(u), (branch, branches or [])
108 108
109 109 schemes = {
110 110 'bundle': bundlerepo,
111 111 'union': unionrepo,
112 112 'file': _local,
113 113 'http': httppeer,
114 114 'https': httppeer,
115 115 'ssh': sshpeer,
116 116 'static-http': statichttprepo,
117 117 }
118 118
119 119 def _peerlookup(path):
120 120 u = util.url(path)
121 121 scheme = u.scheme or 'file'
122 122 thing = schemes.get(scheme) or schemes['file']
123 123 try:
124 124 return thing(path)
125 125 except TypeError:
126 126 # we can't test callable(thing) because 'thing' can be an unloaded
127 127 # module that implements __call__
128 128 if not util.safehasattr(thing, 'instance'):
129 129 raise
130 130 return thing
131 131
132 132 def islocal(repo):
133 133 '''return true if repo (or path pointing to repo) is local'''
134 134 if isinstance(repo, bytes):
135 135 try:
136 136 return _peerlookup(repo).islocal(repo)
137 137 except AttributeError:
138 138 return False
139 139 return repo.local()
140 140
141 141 def openpath(ui, path):
142 142 '''open path with open if local, url.open if remote'''
143 143 pathurl = util.url(path, parsequery=False, parsefragment=False)
144 144 if pathurl.islocal():
145 145 return util.posixfile(pathurl.localpath(), 'rb')
146 146 else:
147 147 return url.open(ui, path)
148 148
149 149 # a list of (ui, repo) functions called for wire peer initialization
150 150 wirepeersetupfuncs = []
151 151
152 152 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
153 153 """return a repository object for the specified path"""
154 154 obj = _peerlookup(path).instance(ui, path, create)
155 155 ui = getattr(obj, "ui", ui)
156 156 for f in presetupfuncs or []:
157 157 f(ui, obj)
158 158 for name, module in extensions.extensions(ui):
159 159 hook = getattr(module, 'reposetup', None)
160 160 if hook:
161 161 hook(ui, obj)
162 162 if not obj.local():
163 163 for f in wirepeersetupfuncs:
164 164 f(ui, obj)
165 165 return obj
166 166
167 167 def repository(ui, path='', create=False, presetupfuncs=None):
168 168 """return a repository object for the specified path"""
169 169 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
170 170 repo = peer.local()
171 171 if not repo:
172 172 raise error.Abort(_("repository '%s' is not local") %
173 173 (path or peer.url()))
174 174 return repo.filtered('visible')
175 175
176 176 def peer(uiorrepo, opts, path, create=False):
177 177 '''return a repository peer for the specified path'''
178 178 rui = remoteui(uiorrepo, opts)
179 179 return _peerorrepo(rui, path, create).peer()
180 180
181 181 def defaultdest(source):
182 182 '''return default destination of clone if none is given
183 183
184 184 >>> defaultdest(b'foo')
185 185 'foo'
186 186 >>> defaultdest(b'/foo/bar')
187 187 'bar'
188 188 >>> defaultdest(b'/')
189 189 ''
190 190 >>> defaultdest(b'')
191 191 ''
192 192 >>> defaultdest(b'http://example.org/')
193 193 ''
194 194 >>> defaultdest(b'http://example.org/foo/')
195 195 'foo'
196 196 '''
197 197 path = util.url(source).path
198 198 if not path:
199 199 return ''
200 200 return os.path.basename(os.path.normpath(path))
201 201
202 202 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
203 203 relative=False):
204 204 '''create a shared repository'''
205 205
206 206 if not islocal(source):
207 207 raise error.Abort(_('can only share local repositories'))
208 208
209 209 if not dest:
210 210 dest = defaultdest(source)
211 211 else:
212 212 dest = ui.expandpath(dest)
213 213
214 214 if isinstance(source, str):
215 215 origsource = ui.expandpath(source)
216 216 source, branches = parseurl(origsource)
217 217 srcrepo = repository(ui, source)
218 218 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
219 219 else:
220 220 srcrepo = source.local()
221 221 origsource = source = srcrepo.url()
222 222 checkout = None
223 223
224 224 sharedpath = srcrepo.sharedpath # if our source is already sharing
225 225
226 226 destwvfs = vfsmod.vfs(dest, realpath=True)
227 227 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
228 228
229 229 if destvfs.lexists():
230 230 raise error.Abort(_('destination already exists'))
231 231
232 232 if not destwvfs.isdir():
233 233 destwvfs.mkdir()
234 234 destvfs.makedir()
235 235
236 236 requirements = ''
237 237 try:
238 238 requirements = srcrepo.vfs.read('requires')
239 239 except IOError as inst:
240 240 if inst.errno != errno.ENOENT:
241 241 raise
242 242
243 243 if relative:
244 244 try:
245 245 sharedpath = os.path.relpath(sharedpath, destvfs.base)
246 246 requirements += 'relshared\n'
247 247 except (IOError, ValueError) as e:
248 248 # ValueError is raised on Windows if the drive letters differ on
249 249 # each path
250 250 raise error.Abort(_('cannot calculate relative path'),
251 251 hint=str(e))
252 252 else:
253 253 requirements += 'shared\n'
254 254
255 255 destvfs.write('requires', requirements)
256 256 destvfs.write('sharedpath', sharedpath)
257 257
258 258 r = repository(ui, destwvfs.base)
259 259 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
260 260 _postshareupdate(r, update, checkout=checkout)
261 261 return r
262 262
263 263 def unshare(ui, repo):
264 264 """convert a shared repository to a normal one
265 265
266 266 Copy the store data to the repo and remove the sharedpath data.
267 267 """
268 268
269 269 destlock = lock = None
270 270 lock = repo.lock()
271 271 try:
272 272 # we use locks here because if we race with commit, we
273 273 # can end up with extra data in the cloned revlogs that's
274 274 # not pointed to by changesets, thus causing verify to
275 275 # fail
276 276
277 277 destlock = copystore(ui, repo, repo.path)
278 278
279 279 sharefile = repo.vfs.join('sharedpath')
280 280 util.rename(sharefile, sharefile + '.old')
281 281
282 282 repo.requirements.discard('shared')
283 283 repo.requirements.discard('relshared')
284 284 repo._writerequirements()
285 285 finally:
286 286 destlock and destlock.release()
287 287 lock and lock.release()
288 288
289 289 # update store, spath, svfs and sjoin of repo
290 290 repo.unfiltered().__init__(repo.baseui, repo.root)
291 291
292 292 # TODO: figure out how to access subrepos that exist, but were previously
293 293 # removed from .hgsub
294 294 c = repo['.']
295 295 subs = c.substate
296 296 for s in sorted(subs):
297 297 c.sub(s).unshare()
298 298
299 299 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
300 300 """Called after a new shared repo is created.
301 301
302 302 The new repo only has a requirements file and pointer to the source.
303 303 This function configures additional shared data.
304 304
305 305 Extensions can wrap this function and write additional entries to
306 306 destrepo/.hg/shared to indicate additional pieces of data to be shared.
307 307 """
308 308 default = defaultpath or sourcerepo.ui.config('paths', 'default')
309 309 if default:
310 310 fp = destrepo.vfs("hgrc", "w", text=True)
311 311 fp.write("[paths]\n")
312 312 fp.write("default = %s\n" % default)
313 313 fp.close()
314 314
315 315 with destrepo.wlock():
316 316 if bookmarks:
317 317 fp = destrepo.vfs('shared', 'w')
318 318 fp.write(sharedbookmarks + '\n')
319 319 fp.close()
320 320
321 321 def _postshareupdate(repo, update, checkout=None):
322 322 """Maybe perform a working directory update after a shared repo is created.
323 323
324 324 ``update`` can be a boolean or a revision to update to.
325 325 """
326 326 if not update:
327 327 return
328 328
329 329 repo.ui.status(_("updating working directory\n"))
330 330 if update is not True:
331 331 checkout = update
332 332 for test in (checkout, 'default', 'tip'):
333 333 if test is None:
334 334 continue
335 335 try:
336 336 uprev = repo.lookup(test)
337 337 break
338 338 except error.RepoLookupError:
339 339 continue
340 340 _update(repo, uprev)
341 341
342 342 def copystore(ui, srcrepo, destpath):
343 343 '''copy files from store of srcrepo in destpath
344 344
345 345 returns destlock
346 346 '''
347 347 destlock = None
348 348 try:
349 349 hardlink = None
350 350 num = 0
351 351 closetopic = [None]
352 352 def prog(topic, pos):
353 353 if pos is None:
354 354 closetopic[0] = topic
355 355 else:
356 356 ui.progress(topic, pos + num)
357 357 srcpublishing = srcrepo.publishing()
358 358 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
359 359 dstvfs = vfsmod.vfs(destpath)
360 360 for f in srcrepo.store.copylist():
361 361 if srcpublishing and f.endswith('phaseroots'):
362 362 continue
363 363 dstbase = os.path.dirname(f)
364 364 if dstbase and not dstvfs.exists(dstbase):
365 365 dstvfs.mkdir(dstbase)
366 366 if srcvfs.exists(f):
367 367 if f.endswith('data'):
368 368 # 'dstbase' may be empty (e.g. revlog format 0)
369 369 lockfile = os.path.join(dstbase, "lock")
370 370 # lock to avoid premature writing to the target
371 371 destlock = lock.lock(dstvfs, lockfile)
372 372 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
373 373 hardlink, progress=prog)
374 374 num += n
375 375 if hardlink:
376 376 ui.debug("linked %d files\n" % num)
377 377 if closetopic[0]:
378 378 ui.progress(closetopic[0], None)
379 379 else:
380 380 ui.debug("copied %d files\n" % num)
381 381 if closetopic[0]:
382 382 ui.progress(closetopic[0], None)
383 383 return destlock
384 384 except: # re-raises
385 385 release(destlock)
386 386 raise
387 387
388 388 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
389 389 rev=None, update=True, stream=False):
390 390 """Perform a clone using a shared repo.
391 391
392 392 The store for the repository will be located at <sharepath>/.hg. The
393 393 specified revisions will be cloned or pulled from "source". A shared repo
394 394 will be created at "dest" and a working copy will be created if "update" is
395 395 True.
396 396 """
397 397 revs = None
398 398 if rev:
399 399 if not srcpeer.capable('lookup'):
400 400 raise error.Abort(_("src repository does not support "
401 401 "revision lookup and so doesn't "
402 402 "support clone by revision"))
403 403 revs = [srcpeer.lookup(r) for r in rev]
404 404
405 405 # Obtain a lock before checking for or cloning the pooled repo otherwise
406 406 # 2 clients may race creating or populating it.
407 407 pooldir = os.path.dirname(sharepath)
408 408 # lock class requires the directory to exist.
409 409 try:
410 410 util.makedir(pooldir, False)
411 411 except OSError as e:
412 412 if e.errno != errno.EEXIST:
413 413 raise
414 414
415 415 poolvfs = vfsmod.vfs(pooldir)
416 416 basename = os.path.basename(sharepath)
417 417
418 418 with lock.lock(poolvfs, '%s.lock' % basename):
419 419 if os.path.exists(sharepath):
420 420 ui.status(_('(sharing from existing pooled repository %s)\n') %
421 421 basename)
422 422 else:
423 423 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
424 424 # Always use pull mode because hardlinks in share mode don't work
425 425 # well. Never update because working copies aren't necessary in
426 426 # share mode.
427 427 clone(ui, peeropts, source, dest=sharepath, pull=True,
428 428 rev=rev, update=False, stream=stream)
429 429
430 430 # Resolve the value to put in [paths] section for the source.
431 431 if islocal(source):
432 432 defaultpath = os.path.abspath(util.urllocalpath(source))
433 433 else:
434 434 defaultpath = source
435 435
436 436 sharerepo = repository(ui, path=sharepath)
437 437 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
438 438 defaultpath=defaultpath)
439 439
440 440 # We need to perform a pull against the dest repo to fetch bookmarks
441 441 # and other non-store data that isn't shared by default. In the case of
442 442 # non-existing shared repo, this means we pull from the remote twice. This
443 443 # is a bit weird. But at the time it was implemented, there wasn't an easy
444 444 # way to pull just non-changegroup data.
445 445 destrepo = repository(ui, path=dest)
446 446 exchange.pull(destrepo, srcpeer, heads=revs)
447 447
448 448 _postshareupdate(destrepo, update)
449 449
450 450 return srcpeer, peer(ui, peeropts, dest)
451 451
452 452 # Recomputing branch cache might be slow on big repos,
453 453 # so just copy it
454 454 def _copycache(srcrepo, dstcachedir, fname):
455 455 """copy a cache from srcrepo to destcachedir (if it exists)"""
456 456 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
457 457 dstbranchcache = os.path.join(dstcachedir, fname)
458 458 if os.path.exists(srcbranchcache):
459 459 if not os.path.exists(dstcachedir):
460 460 os.mkdir(dstcachedir)
461 461 util.copyfile(srcbranchcache, dstbranchcache)
462 462
463 463 def _cachetocopy(srcrepo):
464 464 """return the list of cache file valuable to copy during a clone"""
465 465 # In local clones we're copying all nodes, not just served
466 466 # ones. Therefore copy all branch caches over.
467 467 cachefiles = ['branch2']
468 468 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
469 469 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
470 470 cachefiles += ['tags2']
471 471 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
472 472 cachefiles += ['hgtagsfnodes1']
473 473 return cachefiles
474 474
475 475 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
476 476 update=True, stream=False, branch=None, shareopts=None):
477 477 """Make a copy of an existing repository.
478 478
479 479 Create a copy of an existing repository in a new directory. The
480 480 source and destination are URLs, as passed to the repository
481 481 function. Returns a pair of repository peers, the source and
482 482 newly created destination.
483 483
484 484 The location of the source is added to the new repository's
485 485 .hg/hgrc file, as the default to be used for future pulls and
486 486 pushes.
487 487
488 488 If an exception is raised, the partly cloned/updated destination
489 489 repository will be deleted.
490 490
491 491 Arguments:
492 492
493 493 source: repository object or URL
494 494
495 495 dest: URL of destination repository to create (defaults to base
496 496 name of source repository)
497 497
498 498 pull: always pull from source repository, even in local case or if the
499 499 server prefers streaming
500 500
501 501 stream: stream raw data uncompressed from repository (fast over
502 502 LAN, slow over WAN)
503 503
504 504 rev: revision to clone up to (implies pull=True)
505 505
506 506 update: update working directory after clone completes, if
507 507 destination is local repository (True means update to default rev,
508 508 anything else is treated as a revision)
509 509
510 510 branch: branches to clone
511 511
512 512 shareopts: dict of options to control auto sharing behavior. The "pool" key
513 513 activates auto sharing mode and defines the directory for stores. The
514 514 "mode" key determines how to construct the directory name of the shared
515 515 repository. "identity" means the name is derived from the node of the first
516 516 changeset in the repository. "remote" means the name is derived from the
517 517 remote's path/URL. Defaults to "identity."
518 518 """
519 519
520 520 if isinstance(source, bytes):
521 521 origsource = ui.expandpath(source)
522 522 source, branch = parseurl(origsource, branch)
523 523 srcpeer = peer(ui, peeropts, source)
524 524 else:
525 525 srcpeer = source.peer() # in case we were called with a localrepo
526 526 branch = (None, branch or [])
527 527 origsource = source = srcpeer.url()
528 528 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
529 529
530 530 if dest is None:
531 531 dest = defaultdest(source)
532 532 if dest:
533 533 ui.status(_("destination directory: %s\n") % dest)
534 534 else:
535 535 dest = ui.expandpath(dest)
536 536
537 537 dest = util.urllocalpath(dest)
538 538 source = util.urllocalpath(source)
539 539
540 540 if not dest:
541 541 raise error.Abort(_("empty destination path is not valid"))
542 542
543 543 destvfs = vfsmod.vfs(dest, expandpath=True)
544 544 if destvfs.lexists():
545 545 if not destvfs.isdir():
546 546 raise error.Abort(_("destination '%s' already exists") % dest)
547 547 elif destvfs.listdir():
548 548 raise error.Abort(_("destination '%s' is not empty") % dest)
549 549
550 550 shareopts = shareopts or {}
551 551 sharepool = shareopts.get('pool')
552 552 sharenamemode = shareopts.get('mode')
553 553 if sharepool and islocal(dest):
554 554 sharepath = None
555 555 if sharenamemode == 'identity':
556 556 # Resolve the name from the initial changeset in the remote
557 557 # repository. This returns nullid when the remote is empty. It
558 558 # raises RepoLookupError if revision 0 is filtered or otherwise
559 559 # not available. If we fail to resolve, sharing is not enabled.
560 560 try:
561 561 rootnode = srcpeer.lookup('0')
562 562 if rootnode != node.nullid:
563 563 sharepath = os.path.join(sharepool, node.hex(rootnode))
564 564 else:
565 565 ui.status(_('(not using pooled storage: '
566 566 'remote appears to be empty)\n'))
567 567 except error.RepoLookupError:
568 568 ui.status(_('(not using pooled storage: '
569 569 'unable to resolve identity of remote)\n'))
570 570 elif sharenamemode == 'remote':
571 571 sharepath = os.path.join(
572 572 sharepool, hashlib.sha1(source).hexdigest())
573 573 else:
574 574 raise error.Abort(_('unknown share naming mode: %s') %
575 575 sharenamemode)
576 576
577 577 if sharepath:
578 578 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
579 579 dest, pull=pull, rev=rev, update=update,
580 580 stream=stream)
581 581
582 582 srclock = destlock = cleandir = None
583 583 srcrepo = srcpeer.local()
584 584 try:
585 585 abspath = origsource
586 586 if islocal(origsource):
587 587 abspath = os.path.abspath(util.urllocalpath(origsource))
588 588
589 589 if islocal(dest):
590 590 cleandir = dest
591 591
592 592 copy = False
593 593 if (srcrepo and srcrepo.cancopy() and islocal(dest)
594 594 and not phases.hassecret(srcrepo)):
595 595 copy = not pull and not rev
596 596
597 597 if copy:
598 598 try:
599 599 # we use a lock here because if we race with commit, we
600 600 # can end up with extra data in the cloned revlogs that's
601 601 # not pointed to by changesets, thus causing verify to
602 602 # fail
603 603 srclock = srcrepo.lock(wait=False)
604 604 except error.LockError:
605 605 copy = False
606 606
607 607 if copy:
608 608 srcrepo.hook('preoutgoing', throw=True, source='clone')
609 609 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
610 610 if not os.path.exists(dest):
611 611 os.mkdir(dest)
612 612 else:
613 613 # only clean up directories we create ourselves
614 614 cleandir = hgdir
615 615 try:
616 616 destpath = hgdir
617 617 util.makedir(destpath, notindexed=True)
618 618 except OSError as inst:
619 619 if inst.errno == errno.EEXIST:
620 620 cleandir = None
621 621 raise error.Abort(_("destination '%s' already exists")
622 622 % dest)
623 623 raise
624 624
625 625 destlock = copystore(ui, srcrepo, destpath)
626 626 # copy bookmarks over
627 627 srcbookmarks = srcrepo.vfs.join('bookmarks')
628 628 dstbookmarks = os.path.join(destpath, 'bookmarks')
629 629 if os.path.exists(srcbookmarks):
630 630 util.copyfile(srcbookmarks, dstbookmarks)
631 631
632 632 dstcachedir = os.path.join(destpath, 'cache')
633 633 for cache in _cachetocopy(srcrepo):
634 634 _copycache(srcrepo, dstcachedir, cache)
635 635
636 636 # we need to re-init the repo after manually copying the data
637 637 # into it
638 638 destpeer = peer(srcrepo, peeropts, dest)
639 639 srcrepo.hook('outgoing', source='clone',
640 640 node=node.hex(node.nullid))
641 641 else:
642 642 try:
643 643 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
644 644 # only pass ui when no srcrepo
645 645 except OSError as inst:
646 646 if inst.errno == errno.EEXIST:
647 647 cleandir = None
648 648 raise error.Abort(_("destination '%s' already exists")
649 649 % dest)
650 650 raise
651 651
652 652 revs = None
653 653 if rev:
654 654 if not srcpeer.capable('lookup'):
655 655 raise error.Abort(_("src repository does not support "
656 656 "revision lookup and so doesn't "
657 657 "support clone by revision"))
658 658 revs = [srcpeer.lookup(r) for r in rev]
659 659 checkout = revs[0]
660 660 local = destpeer.local()
661 661 if local:
662 662 if not stream:
663 663 if pull:
664 664 stream = False
665 665 else:
666 666 stream = None
667 667 # internal config: ui.quietbookmarkmove
668 668 overrides = {('ui', 'quietbookmarkmove'): True}
669 669 with local.ui.configoverride(overrides, 'clone'):
670 670 exchange.pull(local, srcpeer, revs,
671 671 streamclonerequested=stream)
672 672 elif srcrepo:
673 673 exchange.push(srcrepo, destpeer, revs=revs,
674 674 bookmarks=srcrepo._bookmarks.keys())
675 675 else:
676 676 raise error.Abort(_("clone from remote to remote not supported")
677 677 )
678 678
679 679 cleandir = None
680 680
681 681 destrepo = destpeer.local()
682 682 if destrepo:
683 683 template = uimod.samplehgrcs['cloned']
684 684 fp = destrepo.vfs("hgrc", "wb")
685 685 u = util.url(abspath)
686 686 u.passwd = None
687 687 defaulturl = bytes(u)
688 688 fp.write(util.tonativeeol(template % defaulturl))
689 689 fp.close()
690 690
691 691 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
692 692
693 693 if ui.configbool('experimental', 'remotenames'):
694 remotenames.pullremotenames(destrepo, srcpeer)
694 logexchange.pullremotenames(destrepo, srcpeer)
695 695
696 696 if update:
697 697 if update is not True:
698 698 checkout = srcpeer.lookup(update)
699 699 uprev = None
700 700 status = None
701 701 if checkout is not None:
702 702 try:
703 703 uprev = destrepo.lookup(checkout)
704 704 except error.RepoLookupError:
705 705 if update is not True:
706 706 try:
707 707 uprev = destrepo.lookup(update)
708 708 except error.RepoLookupError:
709 709 pass
710 710 if uprev is None:
711 711 try:
712 712 uprev = destrepo._bookmarks['@']
713 713 update = '@'
714 714 bn = destrepo[uprev].branch()
715 715 if bn == 'default':
716 716 status = _("updating to bookmark @\n")
717 717 else:
718 718 status = (_("updating to bookmark @ on branch %s\n")
719 719 % bn)
720 720 except KeyError:
721 721 try:
722 722 uprev = destrepo.branchtip('default')
723 723 except error.RepoLookupError:
724 724 uprev = destrepo.lookup('tip')
725 725 if not status:
726 726 bn = destrepo[uprev].branch()
727 727 status = _("updating to branch %s\n") % bn
728 728 destrepo.ui.status(status)
729 729 _update(destrepo, uprev)
730 730 if update in destrepo._bookmarks:
731 731 bookmarks.activate(destrepo, update)
732 732 finally:
733 733 release(srclock, destlock)
734 734 if cleandir is not None:
735 735 shutil.rmtree(cleandir, True)
736 736 if srcpeer is not None:
737 737 srcpeer.close()
738 738 return srcpeer, destpeer
739 739
740 740 def _showstats(repo, stats, quietempty=False):
741 741 if quietempty and not any(stats):
742 742 return
743 743 repo.ui.status(_("%d files updated, %d files merged, "
744 744 "%d files removed, %d files unresolved\n") % stats)
745 745
746 746 def updaterepo(repo, node, overwrite, updatecheck=None):
747 747 """Update the working directory to node.
748 748
749 749 When overwrite is set, changes are clobbered, merged else
750 750
751 751 returns stats (see pydoc mercurial.merge.applyupdates)"""
752 752 return mergemod.update(repo, node, False, overwrite,
753 753 labels=['working copy', 'destination'],
754 754 updatecheck=updatecheck)
755 755
756 756 def update(repo, node, quietempty=False, updatecheck=None):
757 757 """update the working directory to node"""
758 758 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
759 759 _showstats(repo, stats, quietempty)
760 760 if stats[3]:
761 761 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
762 762 return stats[3] > 0
763 763
764 764 # naming conflict in clone()
765 765 _update = update
766 766
767 767 def clean(repo, node, show_stats=True, quietempty=False):
768 768 """forcibly switch the working directory to node, clobbering changes"""
769 769 stats = updaterepo(repo, node, True)
770 770 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
771 771 if show_stats:
772 772 _showstats(repo, stats, quietempty)
773 773 return stats[3] > 0
774 774
775 775 # naming conflict in updatetotally()
776 776 _clean = clean
777 777
778 778 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
779 779 """Update the working directory with extra care for non-file components
780 780
781 781 This takes care of non-file components below:
782 782
783 783 :bookmark: might be advanced or (in)activated
784 784
785 785 This takes arguments below:
786 786
787 787 :checkout: to which revision the working directory is updated
788 788 :brev: a name, which might be a bookmark to be activated after updating
789 789 :clean: whether changes in the working directory can be discarded
790 790 :updatecheck: how to deal with a dirty working directory
791 791
792 792 Valid values for updatecheck are (None => linear):
793 793
794 794 * abort: abort if the working directory is dirty
795 795 * none: don't check (merge working directory changes into destination)
796 796 * linear: check that update is linear before merging working directory
797 797 changes into destination
798 798 * noconflict: check that the update does not result in file merges
799 799
800 800 This returns whether conflict is detected at updating or not.
801 801 """
802 802 if updatecheck is None:
803 803 updatecheck = ui.config('commands', 'update.check')
804 804 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
805 805 # If not configured, or invalid value configured
806 806 updatecheck = 'linear'
807 807 with repo.wlock():
808 808 movemarkfrom = None
809 809 warndest = False
810 810 if checkout is None:
811 811 updata = destutil.destupdate(repo, clean=clean)
812 812 checkout, movemarkfrom, brev = updata
813 813 warndest = True
814 814
815 815 if clean:
816 816 ret = _clean(repo, checkout)
817 817 else:
818 818 if updatecheck == 'abort':
819 819 cmdutil.bailifchanged(repo, merge=False)
820 820 updatecheck = 'none'
821 821 ret = _update(repo, checkout, updatecheck=updatecheck)
822 822
823 823 if not ret and movemarkfrom:
824 824 if movemarkfrom == repo['.'].node():
825 825 pass # no-op update
826 826 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
827 827 b = ui.label(repo._activebookmark, 'bookmarks.active')
828 828 ui.status(_("updating bookmark %s\n") % b)
829 829 else:
830 830 # this can happen with a non-linear update
831 831 b = ui.label(repo._activebookmark, 'bookmarks')
832 832 ui.status(_("(leaving bookmark %s)\n") % b)
833 833 bookmarks.deactivate(repo)
834 834 elif brev in repo._bookmarks:
835 835 if brev != repo._activebookmark:
836 836 b = ui.label(brev, 'bookmarks.active')
837 837 ui.status(_("(activating bookmark %s)\n") % b)
838 838 bookmarks.activate(repo, brev)
839 839 elif brev:
840 840 if repo._activebookmark:
841 841 b = ui.label(repo._activebookmark, 'bookmarks')
842 842 ui.status(_("(leaving bookmark %s)\n") % b)
843 843 bookmarks.deactivate(repo)
844 844
845 845 if warndest:
846 846 destutil.statusotherdests(ui, repo)
847 847
848 848 return ret
849 849
850 850 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
851 851 """Branch merge with node, resolving changes. Return true if any
852 852 unresolved conflicts."""
853 853 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
854 854 labels=labels)
855 855 _showstats(repo, stats)
856 856 if stats[3]:
857 857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
858 858 "or 'hg update -C .' to abandon\n"))
859 859 elif remind:
860 860 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
861 861 return stats[3] > 0
862 862
863 863 def _incoming(displaychlist, subreporecurse, ui, repo, source,
864 864 opts, buffered=False):
865 865 """
866 866 Helper for incoming / gincoming.
867 867 displaychlist gets called with
868 868 (remoterepo, incomingchangesetlist, displayer) parameters,
869 869 and is supposed to contain only code that can't be unified.
870 870 """
871 871 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
872 872 other = peer(repo, opts, source)
873 873 ui.status(_('comparing with %s\n') % util.hidepassword(source))
874 874 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
875 875
876 876 if revs:
877 877 revs = [other.lookup(rev) for rev in revs]
878 878 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
879 879 revs, opts["bundle"], opts["force"])
880 880 try:
881 881 if not chlist:
882 882 ui.status(_("no changes found\n"))
883 883 return subreporecurse()
884 884 ui.pager('incoming')
885 885 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
886 886 displaychlist(other, chlist, displayer)
887 887 displayer.close()
888 888 finally:
889 889 cleanupfn()
890 890 subreporecurse()
891 891 return 0 # exit code is zero since we found incoming changes
892 892
893 893 def incoming(ui, repo, source, opts):
894 894 def subreporecurse():
895 895 ret = 1
896 896 if opts.get('subrepos'):
897 897 ctx = repo[None]
898 898 for subpath in sorted(ctx.substate):
899 899 sub = ctx.sub(subpath)
900 900 ret = min(ret, sub.incoming(ui, source, opts))
901 901 return ret
902 902
903 903 def display(other, chlist, displayer):
904 904 limit = cmdutil.loglimit(opts)
905 905 if opts.get('newest_first'):
906 906 chlist.reverse()
907 907 count = 0
908 908 for n in chlist:
909 909 if limit is not None and count >= limit:
910 910 break
911 911 parents = [p for p in other.changelog.parents(n) if p != nullid]
912 912 if opts.get('no_merges') and len(parents) == 2:
913 913 continue
914 914 count += 1
915 915 displayer.show(other[n])
916 916 return _incoming(display, subreporecurse, ui, repo, source, opts)
917 917
918 918 def _outgoing(ui, repo, dest, opts):
919 919 dest = ui.expandpath(dest or 'default-push', dest or 'default')
920 920 dest, branches = parseurl(dest, opts.get('branch'))
921 921 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
922 922 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
923 923 if revs:
924 924 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
925 925
926 926 other = peer(repo, opts, dest)
927 927 outgoing = discovery.findcommonoutgoing(repo, other, revs,
928 928 force=opts.get('force'))
929 929 o = outgoing.missing
930 930 if not o:
931 931 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
932 932 return o, other
933 933
934 934 def outgoing(ui, repo, dest, opts):
935 935 def recurse():
936 936 ret = 1
937 937 if opts.get('subrepos'):
938 938 ctx = repo[None]
939 939 for subpath in sorted(ctx.substate):
940 940 sub = ctx.sub(subpath)
941 941 ret = min(ret, sub.outgoing(ui, dest, opts))
942 942 return ret
943 943
944 944 limit = cmdutil.loglimit(opts)
945 945 o, other = _outgoing(ui, repo, dest, opts)
946 946 if not o:
947 947 cmdutil.outgoinghooks(ui, repo, other, opts, o)
948 948 return recurse()
949 949
950 950 if opts.get('newest_first'):
951 951 o.reverse()
952 952 ui.pager('outgoing')
953 953 displayer = cmdutil.show_changeset(ui, repo, opts)
954 954 count = 0
955 955 for n in o:
956 956 if limit is not None and count >= limit:
957 957 break
958 958 parents = [p for p in repo.changelog.parents(n) if p != nullid]
959 959 if opts.get('no_merges') and len(parents) == 2:
960 960 continue
961 961 count += 1
962 962 displayer.show(repo[n])
963 963 displayer.close()
964 964 cmdutil.outgoinghooks(ui, repo, other, opts, o)
965 965 recurse()
966 966 return 0 # exit code is zero since we found outgoing changes
967 967
968 968 def verify(repo):
969 969 """verify the consistency of a repository"""
970 970 ret = verifymod.verify(repo)
971 971
972 972 # Broken subrepo references in hidden csets don't seem worth worrying about,
973 973 # since they can't be pushed/pulled, and --hidden can be used if they are a
974 974 # concern.
975 975
976 976 # pathto() is needed for -R case
977 977 revs = repo.revs("filelog(%s)",
978 978 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
979 979
980 980 if revs:
981 981 repo.ui.status(_('checking subrepo links\n'))
982 982 for rev in revs:
983 983 ctx = repo[rev]
984 984 try:
985 985 for subpath in ctx.substate:
986 986 try:
987 987 ret = (ctx.sub(subpath, allowcreate=False).verify()
988 988 or ret)
989 989 except error.RepoError as e:
990 990 repo.ui.warn(('%s: %s\n') % (rev, e))
991 991 except Exception:
992 992 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
993 993 node.short(ctx.node()))
994 994
995 995 return ret
996 996
997 997 def remoteui(src, opts):
998 998 'build a remote ui from ui or repo and opts'
999 999 if util.safehasattr(src, 'baseui'): # looks like a repository
1000 1000 dst = src.baseui.copy() # drop repo-specific config
1001 1001 src = src.ui # copy target options from repo
1002 1002 else: # assume it's a global ui object
1003 1003 dst = src.copy() # keep all global options
1004 1004
1005 1005 # copy ssh-specific options
1006 1006 for o in 'ssh', 'remotecmd':
1007 1007 v = opts.get(o) or src.config('ui', o)
1008 1008 if v:
1009 1009 dst.setconfig("ui", o, v, 'copied')
1010 1010
1011 1011 # copy bundle-specific options
1012 1012 r = src.config('bundle', 'mainreporoot')
1013 1013 if r:
1014 1014 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1015 1015
1016 1016 # copy selected local settings to the remote ui
1017 1017 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1018 1018 for key, val in src.configitems(sect):
1019 1019 dst.setconfig(sect, key, val, 'copied')
1020 1020 v = src.config('web', 'cacerts')
1021 1021 if v:
1022 1022 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1023 1023
1024 1024 return dst
1025 1025
1026 1026 # Files of interest
1027 1027 # Used to check if the repository has changed looking at mtime and size of
1028 1028 # these files.
1029 1029 foi = [('spath', '00changelog.i'),
1030 1030 ('spath', 'phaseroots'), # ! phase can change content at the same size
1031 1031 ('spath', 'obsstore'),
1032 1032 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1033 1033 ]
1034 1034
1035 1035 class cachedlocalrepo(object):
1036 1036 """Holds a localrepository that can be cached and reused."""
1037 1037
1038 1038 def __init__(self, repo):
1039 1039 """Create a new cached repo from an existing repo.
1040 1040
1041 1041 We assume the passed in repo was recently created. If the
1042 1042 repo has changed between when it was created and when it was
1043 1043 turned into a cache, it may not refresh properly.
1044 1044 """
1045 1045 assert isinstance(repo, localrepo.localrepository)
1046 1046 self._repo = repo
1047 1047 self._state, self.mtime = self._repostate()
1048 1048 self._filtername = repo.filtername
1049 1049
1050 1050 def fetch(self):
1051 1051 """Refresh (if necessary) and return a repository.
1052 1052
1053 1053 If the cached instance is out of date, it will be recreated
1054 1054 automatically and returned.
1055 1055
1056 1056 Returns a tuple of the repo and a boolean indicating whether a new
1057 1057 repo instance was created.
1058 1058 """
1059 1059 # We compare the mtimes and sizes of some well-known files to
1060 1060 # determine if the repo changed. This is not precise, as mtimes
1061 1061 # are susceptible to clock skew and imprecise filesystems and
1062 1062 # file content can change while maintaining the same size.
1063 1063
1064 1064 state, mtime = self._repostate()
1065 1065 if state == self._state:
1066 1066 return self._repo, False
1067 1067
1068 1068 repo = repository(self._repo.baseui, self._repo.url())
1069 1069 if self._filtername:
1070 1070 self._repo = repo.filtered(self._filtername)
1071 1071 else:
1072 1072 self._repo = repo.unfiltered()
1073 1073 self._state = state
1074 1074 self.mtime = mtime
1075 1075
1076 1076 return self._repo, True
1077 1077
1078 1078 def _repostate(self):
1079 1079 state = []
1080 1080 maxmtime = -1
1081 1081 for attr, fname in foi:
1082 1082 prefix = getattr(self._repo, attr)
1083 1083 p = os.path.join(prefix, fname)
1084 1084 try:
1085 1085 st = os.stat(p)
1086 1086 except OSError:
1087 1087 st = os.stat(prefix)
1088 1088 state.append((st.st_mtime, st.st_size))
1089 1089 maxmtime = max(maxmtime, st.st_mtime)
1090 1090
1091 1091 return tuple(state), maxmtime
1092 1092
1093 1093 def copy(self):
1094 1094 """Obtain a copy of this class instance.
1095 1095
1096 1096 A new localrepository instance is obtained. The new instance should be
1097 1097 completely independent of the original.
1098 1098 """
1099 1099 repo = repository(self._repo.baseui, self._repo.origroot)
1100 1100 if self._filtername:
1101 1101 repo = repo.filtered(self._filtername)
1102 1102 else:
1103 1103 repo = repo.unfiltered()
1104 1104 c = cachedlocalrepo(repo)
1105 1105 c._state = self._state
1106 1106 c.mtime = self.mtime
1107 1107 return c
@@ -1,118 +1,118
1 # remotenames.py
1 # logexchange.py
2 2 #
3 3 # Copyright 2017 Augie Fackler <raf@durin42.com>
4 4 # Copyright 2017 Sean Farley <sean@farley.io>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 from .node import hex
12 12
13 13 from . import (
14 14 vfs as vfsmod,
15 15 )
16 16
17 17 # directory name in .hg/ in which remotenames files will be present
18 remotenamedir = 'remotenames'
18 remotenamedir = 'logexchange'
19 19
20 20 def readremotenamefile(repo, filename):
21 21 """
22 reads a file from .hg/remotenames/ directory and yields it's content
22 reads a file from .hg/logexchange/ directory and yields it's content
23 23 filename: the file to be read
24 24 yield a tuple (node, remotepath, name)
25 25 """
26 26
27 27 vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
28 28 if not vfs.exists(filename):
29 29 return
30 30 f = vfs(filename)
31 31 lineno = 0
32 32 for line in f:
33 33 line = line.strip()
34 34 if not line:
35 35 continue
36 36 # contains the version number
37 37 if lineno == 0:
38 38 lineno += 1
39 39 try:
40 40 node, remote, rname = line.split('\0')
41 41 yield node, remote, rname
42 42 except ValueError:
43 43 pass
44 44
45 45 f.close()
46 46
47 47 def readremotenames(repo):
48 48 """
49 read the details about the remotenames stored in .hg/remotenames/ and
49 read the details about the remotenames stored in .hg/logexchange/ and
50 50 yields a tuple (node, remotepath, name). It does not yields information
51 51 about whether an entry yielded is branch or bookmark. To get that
52 52 information, call the respective functions.
53 53 """
54 54
55 55 for bmentry in readremotenamefile(repo, 'bookmarks'):
56 56 yield bmentry
57 57 for branchentry in readremotenamefile(repo, 'branches'):
58 58 yield branchentry
59 59
60 60 def writeremotenamefile(repo, remotepath, names, nametype):
61 61 vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
62 62 f = vfs(nametype, 'w', atomictemp=True)
63 63 # write the storage version info on top of file
64 64 # version '0' represents the very initial version of the storage format
65 65 f.write('0\n\n')
66 66
67 67 olddata = set(readremotenamefile(repo, nametype))
68 68 # re-save the data from a different remote than this one.
69 69 for node, oldpath, rname in sorted(olddata):
70 70 if oldpath != remotepath:
71 71 f.write('%s\0%s\0%s\n' % (node, oldpath, rname))
72 72
73 73 for name, node in sorted(names.iteritems()):
74 74 if nametype == "branches":
75 75 for n in node:
76 76 f.write('%s\0%s\0%s\n' % (n, remotepath, name))
77 77 elif nametype == "bookmarks":
78 78 if node:
79 79 f.write('%s\0%s\0%s\n' % (node, remotepath, name))
80 80
81 81 f.close()
82 82
83 83 def saveremotenames(repo, remotepath, branches=None, bookmarks=None):
84 84 """
85 85 save remotenames i.e. remotebookmarks and remotebranches in their
86 respective files under ".hg/remotenames/" directory.
86 respective files under ".hg/logexchange/" directory.
87 87 """
88 88 wlock = repo.wlock()
89 89 try:
90 90 if bookmarks:
91 91 writeremotenamefile(repo, remotepath, bookmarks, 'bookmarks')
92 92 if branches:
93 93 writeremotenamefile(repo, remotepath, branches, 'branches')
94 94 finally:
95 95 wlock.release()
96 96
97 97 def pullremotenames(localrepo, remoterepo):
98 98 """
99 99 pulls bookmarks and branches information of the remote repo during a
100 100 pull or clone operation.
101 101 localrepo is our local repository
102 102 remoterepo is the peer instance
103 103 """
104 104 remotepath = remoterepo.url()
105 105 bookmarks = remoterepo.listkeys('bookmarks')
106 106 # on a push, we don't want to keep obsolete heads since
107 107 # they won't show up as heads on the next pull, so we
108 108 # remove them here otherwise we would require the user
109 109 # to issue a pull to refresh the storage
110 110 bmap = {}
111 111 repo = localrepo.unfiltered()
112 112 for branch, nodes in remoterepo.branchmap().iteritems():
113 113 bmap[branch] = []
114 114 for node in nodes:
115 115 if node in repo and not repo[node].obsolete():
116 116 bmap[branch].append(hex(node))
117 117
118 118 saveremotenames(localrepo, remotepath, bmap, bookmarks)
@@ -1,108 +1,108
1 1 Testing the functionality to pull remotenames
2 2 =============================================
3 3
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [alias]
6 6 > glog = log -G -T '{rev}:{node|short} {desc}'
7 7 > [experimental]
8 8 > remotenames = True
9 9 > EOF
10 10
11 11 Making a server repo
12 12 --------------------
13 13
14 14 $ hg init server
15 15 $ cd server
16 16 $ for ch in a b c d e f g h; do
17 17 > echo "foo" >> $ch
18 18 > hg ci -Aqm "Added "$ch
19 19 > done
20 20 $ hg glog
21 21 @ 7:ec2426147f0e Added h
22 22 |
23 23 o 6:87d6d6676308 Added g
24 24 |
25 25 o 5:825660c69f0c Added f
26 26 |
27 27 o 4:aa98ab95a928 Added e
28 28 |
29 29 o 3:62615734edd5 Added d
30 30 |
31 31 o 2:28ad74487de9 Added c
32 32 |
33 33 o 1:29becc82797a Added b
34 34 |
35 35 o 0:18d04c59bb5d Added a
36 36
37 37 $ hg bookmark -r 3 foo
38 38 $ hg bookmark -r 6 bar
39 39 $ hg up 4
40 40 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
41 41 $ hg branch wat
42 42 marked working directory as branch wat
43 43 (branches are permanent and global, did you want a bookmark?)
44 44 $ echo foo >> bar
45 45 $ hg ci -Aqm "added bar"
46 46
47 47 Making a client repo
48 48 --------------------
49 49
50 50 $ cd ..
51 51
52 52 $ hg clone server client
53 53 updating to branch default
54 54 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 55
56 56 $ cd client
57 $ cat .hg/remotenames/bookmarks
57 $ cat .hg/logexchange/bookmarks
58 58 0
59 59
60 60 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
61 61 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
62 62
63 $ cat .hg/remotenames/branches
63 $ cat .hg/logexchange/branches
64 64 0
65 65
66 66 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
67 67 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
68 68
69 69 Making a new server
70 70 -------------------
71 71
72 72 $ cd ..
73 73 $ hg init server2
74 74 $ cd server2
75 75 $ hg pull ../server/
76 76 pulling from ../server/
77 77 requesting all changes
78 78 adding changesets
79 79 adding manifests
80 80 adding file changes
81 81 added 9 changesets with 9 changes to 9 files (+1 heads)
82 82 adding remote bookmark bar
83 83 adding remote bookmark foo
84 84 new changesets 18d04c59bb5d:3e1487808078
85 85 (run 'hg heads' to see heads)
86 86
87 87 Pulling form the new server
88 88 ---------------------------
89 89 $ cd ../client/
90 90 $ hg pull ../server2/
91 91 pulling from ../server2/
92 92 searching for changes
93 93 no changes found
94 $ cat .hg/remotenames/bookmarks
94 $ cat .hg/logexchange/bookmarks
95 95 0
96 96
97 97 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
98 98 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
99 99 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server2\x00bar (esc)
100 100 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server2\x00foo (esc)
101 101
102 $ cat .hg/remotenames/branches
102 $ cat .hg/logexchange/branches
103 103 0
104 104
105 105 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
106 106 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
107 107 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server2\x00default (esc)
108 108 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server2\x00wat (esc)
General Comments 0
You need to be logged in to leave comments. Login now