##// END OF EJS Templates
cache: avoid warming the fnodetags cache after clone...
marmoute -
r47985:8be95673 stable
parent child Browse files
Show More
@@ -1,1596 +1,1596 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullhex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .pycompat import getattr
24 24
25 25 from . import (
26 26 bookmarks,
27 27 bundlerepo,
28 28 cacheutil,
29 29 cmdutil,
30 30 destutil,
31 31 discovery,
32 32 error,
33 33 exchange,
34 34 extensions,
35 35 graphmod,
36 36 httppeer,
37 37 localrepo,
38 38 lock,
39 39 logcmdutil,
40 40 logexchange,
41 41 merge as mergemod,
42 42 mergestate as mergestatemod,
43 43 narrowspec,
44 44 phases,
45 45 requirements,
46 46 scmutil,
47 47 sshpeer,
48 48 statichttprepo,
49 49 ui as uimod,
50 50 unionrepo,
51 51 url,
52 52 util,
53 53 verify as verifymod,
54 54 vfs as vfsmod,
55 55 )
56 56 from .utils import (
57 57 hashutil,
58 58 stringutil,
59 59 urlutil,
60 60 )
61 61
62 62
63 63 release = lock.release
64 64
65 65 # shared features
66 66 sharedbookmarks = b'bookmarks'
67 67
68 68
69 69 def _local(path):
70 70 path = util.expandpath(urlutil.urllocalpath(path))
71 71
72 72 try:
73 73 # we use os.stat() directly here instead of os.path.isfile()
74 74 # because the latter started returning `False` on invalid path
75 75 # exceptions starting in 3.8 and we care about handling
76 76 # invalid paths specially here.
77 77 st = os.stat(path)
78 78 isfile = stat.S_ISREG(st.st_mode)
79 79 # Python 2 raises TypeError, Python 3 ValueError.
80 80 except (TypeError, ValueError) as e:
81 81 raise error.Abort(
82 82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 83 )
84 84 except OSError:
85 85 isfile = False
86 86
87 87 return isfile and bundlerepo or localrepo
88 88
89 89
90 90 def addbranchrevs(lrepo, other, branches, revs):
91 91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 92 hashbranch, branches = branches
93 93 if not hashbranch and not branches:
94 94 x = revs or None
95 95 if revs:
96 96 y = revs[0]
97 97 else:
98 98 y = None
99 99 return x, y
100 100 if revs:
101 101 revs = list(revs)
102 102 else:
103 103 revs = []
104 104
105 105 if not peer.capable(b'branchmap'):
106 106 if branches:
107 107 raise error.Abort(_(b"remote branch lookup not supported"))
108 108 revs.append(hashbranch)
109 109 return revs, revs[0]
110 110
111 111 with peer.commandexecutor() as e:
112 112 branchmap = e.callcommand(b'branchmap', {}).result()
113 113
114 114 def primary(branch):
115 115 if branch == b'.':
116 116 if not lrepo:
117 117 raise error.Abort(_(b"dirstate branch not accessible"))
118 118 branch = lrepo.dirstate.branch()
119 119 if branch in branchmap:
120 120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 121 return True
122 122 else:
123 123 return False
124 124
125 125 for branch in branches:
126 126 if not primary(branch):
127 127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 128 if hashbranch:
129 129 if not primary(hashbranch):
130 130 revs.append(hashbranch)
131 131 return revs, revs[0]
132 132
133 133
134 134 def parseurl(path, branches=None):
135 135 '''parse url#branch, returning (url, (branch, branches))'''
136 136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 138 return urlutil.parseurl(path, branches=branches)
139 139
140 140
141 141 schemes = {
142 142 b'bundle': bundlerepo,
143 143 b'union': unionrepo,
144 144 b'file': _local,
145 145 b'http': httppeer,
146 146 b'https': httppeer,
147 147 b'ssh': sshpeer,
148 148 b'static-http': statichttprepo,
149 149 }
150 150
151 151
152 152 def _peerlookup(path):
153 153 u = urlutil.url(path)
154 154 scheme = u.scheme or b'file'
155 155 thing = schemes.get(scheme) or schemes[b'file']
156 156 try:
157 157 return thing(path)
158 158 except TypeError:
159 159 # we can't test callable(thing) because 'thing' can be an unloaded
160 160 # module that implements __call__
161 161 if not util.safehasattr(thing, b'instance'):
162 162 raise
163 163 return thing
164 164
165 165
166 166 def islocal(repo):
167 167 '''return true if repo (or path pointing to repo) is local'''
168 168 if isinstance(repo, bytes):
169 169 try:
170 170 return _peerlookup(repo).islocal(repo)
171 171 except AttributeError:
172 172 return False
173 173 return repo.local()
174 174
175 175
176 176 def openpath(ui, path, sendaccept=True):
177 177 '''open path with open if local, url.open if remote'''
178 178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 179 if pathurl.islocal():
180 180 return util.posixfile(pathurl.localpath(), b'rb')
181 181 else:
182 182 return url.open(ui, path, sendaccept=sendaccept)
183 183
184 184
185 185 # a list of (ui, repo) functions called for wire peer initialization
186 186 wirepeersetupfuncs = []
187 187
188 188
189 189 def _peerorrepo(
190 190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 191 ):
192 192 """return a repository object for the specified path"""
193 193 obj = _peerlookup(path).instance(
194 194 ui, path, create, intents=intents, createopts=createopts
195 195 )
196 196 ui = getattr(obj, "ui", ui)
197 197 for f in presetupfuncs or []:
198 198 f(ui, obj)
199 199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 200 with util.timedcm('all reposetup') as allreposetupstats:
201 201 for name, module in extensions.extensions(ui):
202 202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 203 hook = getattr(module, 'reposetup', None)
204 204 if hook:
205 205 with util.timedcm('reposetup %r', name) as stats:
206 206 hook(ui, obj)
207 207 ui.log(
208 208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 209 )
210 210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 211 if not obj.local():
212 212 for f in wirepeersetupfuncs:
213 213 f(ui, obj)
214 214 return obj
215 215
216 216
217 217 def repository(
218 218 ui,
219 219 path=b'',
220 220 create=False,
221 221 presetupfuncs=None,
222 222 intents=None,
223 223 createopts=None,
224 224 ):
225 225 """return a repository object for the specified path"""
226 226 peer = _peerorrepo(
227 227 ui,
228 228 path,
229 229 create,
230 230 presetupfuncs=presetupfuncs,
231 231 intents=intents,
232 232 createopts=createopts,
233 233 )
234 234 repo = peer.local()
235 235 if not repo:
236 236 raise error.Abort(
237 237 _(b"repository '%s' is not local") % (path or peer.url())
238 238 )
239 239 return repo.filtered(b'visible')
240 240
241 241
242 242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 243 '''return a repository peer for the specified path'''
244 244 rui = remoteui(uiorrepo, opts)
245 245 return _peerorrepo(
246 246 rui, path, create, intents=intents, createopts=createopts
247 247 ).peer()
248 248
249 249
250 250 def defaultdest(source):
251 251 """return default destination of clone if none is given
252 252
253 253 >>> defaultdest(b'foo')
254 254 'foo'
255 255 >>> defaultdest(b'/foo/bar')
256 256 'bar'
257 257 >>> defaultdest(b'/')
258 258 ''
259 259 >>> defaultdest(b'')
260 260 ''
261 261 >>> defaultdest(b'http://example.org/')
262 262 ''
263 263 >>> defaultdest(b'http://example.org/foo/')
264 264 'foo'
265 265 """
266 266 path = urlutil.url(source).path
267 267 if not path:
268 268 return b''
269 269 return os.path.basename(os.path.normpath(path))
270 270
271 271
272 272 def sharedreposource(repo):
273 273 """Returns repository object for source repository of a shared repo.
274 274
275 275 If repo is not a shared repository, returns None.
276 276 """
277 277 if repo.sharedpath == repo.path:
278 278 return None
279 279
280 280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 281 return repo.srcrepo
282 282
283 283 # the sharedpath always ends in the .hg; we want the path to the repo
284 284 source = repo.vfs.split(repo.sharedpath)[0]
285 285 srcurl, branches = urlutil.parseurl(source)
286 286 srcrepo = repository(repo.ui, srcurl)
287 287 repo.srcrepo = srcrepo
288 288 return srcrepo
289 289
290 290
291 291 def share(
292 292 ui,
293 293 source,
294 294 dest=None,
295 295 update=True,
296 296 bookmarks=True,
297 297 defaultpath=None,
298 298 relative=False,
299 299 ):
300 300 '''create a shared repository'''
301 301
302 302 if not islocal(source):
303 303 raise error.Abort(_(b'can only share local repositories'))
304 304
305 305 if not dest:
306 306 dest = defaultdest(source)
307 307 else:
308 308 dest = urlutil.get_clone_path(ui, dest)[1]
309 309
310 310 if isinstance(source, bytes):
311 311 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 312 srcrepo = repository(ui, source)
313 313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 314 else:
315 315 srcrepo = source.local()
316 316 checkout = None
317 317
318 318 shareditems = set()
319 319 if bookmarks:
320 320 shareditems.add(sharedbookmarks)
321 321
322 322 r = repository(
323 323 ui,
324 324 dest,
325 325 create=True,
326 326 createopts={
327 327 b'sharedrepo': srcrepo,
328 328 b'sharedrelative': relative,
329 329 b'shareditems': shareditems,
330 330 },
331 331 )
332 332
333 333 postshare(srcrepo, r, defaultpath=defaultpath)
334 334 r = repository(ui, dest)
335 335 _postshareupdate(r, update, checkout=checkout)
336 336 return r
337 337
338 338
339 339 def _prependsourcehgrc(repo):
340 340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 341 on unshare. This is only done if the share was perfomed using share safe
342 342 method where we share config of source in shares"""
343 343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 344 dstvfs = vfsmod.vfs(repo.path)
345 345
346 346 if not srcvfs.exists(b'hgrc'):
347 347 return
348 348
349 349 currentconfig = b''
350 350 if dstvfs.exists(b'hgrc'):
351 351 currentconfig = dstvfs.read(b'hgrc')
352 352
353 353 with dstvfs(b'hgrc', b'wb') as fp:
354 354 sourceconfig = srcvfs.read(b'hgrc')
355 355 fp.write(b"# Config copied from shared source\n")
356 356 fp.write(sourceconfig)
357 357 fp.write(b'\n')
358 358 fp.write(currentconfig)
359 359
360 360
361 361 def unshare(ui, repo):
362 362 """convert a shared repository to a normal one
363 363
364 364 Copy the store data to the repo and remove the sharedpath data.
365 365
366 366 Returns a new repository object representing the unshared repository.
367 367
368 368 The passed repository object is not usable after this function is
369 369 called.
370 370 """
371 371
372 372 with repo.lock():
373 373 # we use locks here because if we race with commit, we
374 374 # can end up with extra data in the cloned revlogs that's
375 375 # not pointed to by changesets, thus causing verify to
376 376 # fail
377 377 destlock = copystore(ui, repo, repo.path)
378 378 with destlock or util.nullcontextmanager():
379 379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 380 # we were sharing .hg/hgrc of the share source with the current
381 381 # repo. We need to copy that while unsharing otherwise it can
382 382 # disable hooks and other checks
383 383 _prependsourcehgrc(repo)
384 384
385 385 sharefile = repo.vfs.join(b'sharedpath')
386 386 util.rename(sharefile, sharefile + b'.old')
387 387
388 388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 390 scmutil.writereporequirements(repo)
391 391
392 392 # Removing share changes some fundamental properties of the repo instance.
393 393 # So we instantiate a new repo object and operate on it rather than
394 394 # try to keep the existing repo usable.
395 395 newrepo = repository(repo.baseui, repo.root, create=False)
396 396
397 397 # TODO: figure out how to access subrepos that exist, but were previously
398 398 # removed from .hgsub
399 399 c = newrepo[b'.']
400 400 subs = c.substate
401 401 for s in sorted(subs):
402 402 c.sub(s).unshare()
403 403
404 404 localrepo.poisonrepository(repo)
405 405
406 406 return newrepo
407 407
408 408
409 409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 410 """Called after a new shared repo is created.
411 411
412 412 The new repo only has a requirements file and pointer to the source.
413 413 This function configures additional shared data.
414 414
415 415 Extensions can wrap this function and write additional entries to
416 416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 417 """
418 418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 419 if default:
420 420 template = b'[paths]\ndefault = %s\n'
421 421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 423 with destrepo.wlock():
424 424 narrowspec.copytoworkingcopy(destrepo)
425 425
426 426
427 427 def _postshareupdate(repo, update, checkout=None):
428 428 """Maybe perform a working directory update after a shared repo is created.
429 429
430 430 ``update`` can be a boolean or a revision to update to.
431 431 """
432 432 if not update:
433 433 return
434 434
435 435 repo.ui.status(_(b"updating working directory\n"))
436 436 if update is not True:
437 437 checkout = update
438 438 for test in (checkout, b'default', b'tip'):
439 439 if test is None:
440 440 continue
441 441 try:
442 442 uprev = repo.lookup(test)
443 443 break
444 444 except error.RepoLookupError:
445 445 continue
446 446 _update(repo, uprev)
447 447
448 448
449 449 def copystore(ui, srcrepo, destpath):
450 450 """copy files from store of srcrepo in destpath
451 451
452 452 returns destlock
453 453 """
454 454 destlock = None
455 455 try:
456 456 hardlink = None
457 457 topic = _(b'linking') if hardlink else _(b'copying')
458 458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 459 num = 0
460 460 srcpublishing = srcrepo.publishing()
461 461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 462 dstvfs = vfsmod.vfs(destpath)
463 463 for f in srcrepo.store.copylist():
464 464 if srcpublishing and f.endswith(b'phaseroots'):
465 465 continue
466 466 dstbase = os.path.dirname(f)
467 467 if dstbase and not dstvfs.exists(dstbase):
468 468 dstvfs.mkdir(dstbase)
469 469 if srcvfs.exists(f):
470 470 if f.endswith(b'data'):
471 471 # 'dstbase' may be empty (e.g. revlog format 0)
472 472 lockfile = os.path.join(dstbase, b"lock")
473 473 # lock to avoid premature writing to the target
474 474 destlock = lock.lock(dstvfs, lockfile)
475 475 hardlink, n = util.copyfiles(
476 476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 477 )
478 478 num += n
479 479 if hardlink:
480 480 ui.debug(b"linked %d files\n" % num)
481 481 else:
482 482 ui.debug(b"copied %d files\n" % num)
483 483 return destlock
484 484 except: # re-raises
485 485 release(destlock)
486 486 raise
487 487
488 488
489 489 def clonewithshare(
490 490 ui,
491 491 peeropts,
492 492 sharepath,
493 493 source,
494 494 srcpeer,
495 495 dest,
496 496 pull=False,
497 497 rev=None,
498 498 update=True,
499 499 stream=False,
500 500 ):
501 501 """Perform a clone using a shared repo.
502 502
503 503 The store for the repository will be located at <sharepath>/.hg. The
504 504 specified revisions will be cloned or pulled from "source". A shared repo
505 505 will be created at "dest" and a working copy will be created if "update" is
506 506 True.
507 507 """
508 508 revs = None
509 509 if rev:
510 510 if not srcpeer.capable(b'lookup'):
511 511 raise error.Abort(
512 512 _(
513 513 b"src repository does not support "
514 514 b"revision lookup and so doesn't "
515 515 b"support clone by revision"
516 516 )
517 517 )
518 518
519 519 # TODO this is batchable.
520 520 remoterevs = []
521 521 for r in rev:
522 522 with srcpeer.commandexecutor() as e:
523 523 remoterevs.append(
524 524 e.callcommand(
525 525 b'lookup',
526 526 {
527 527 b'key': r,
528 528 },
529 529 ).result()
530 530 )
531 531 revs = remoterevs
532 532
533 533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 534 # 2 clients may race creating or populating it.
535 535 pooldir = os.path.dirname(sharepath)
536 536 # lock class requires the directory to exist.
537 537 try:
538 538 util.makedir(pooldir, False)
539 539 except OSError as e:
540 540 if e.errno != errno.EEXIST:
541 541 raise
542 542
543 543 poolvfs = vfsmod.vfs(pooldir)
544 544 basename = os.path.basename(sharepath)
545 545
546 546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 547 if os.path.exists(sharepath):
548 548 ui.status(
549 549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 550 )
551 551 else:
552 552 ui.status(
553 553 _(b'(sharing from new pooled repository %s)\n') % basename
554 554 )
555 555 # Always use pull mode because hardlinks in share mode don't work
556 556 # well. Never update because working copies aren't necessary in
557 557 # share mode.
558 558 clone(
559 559 ui,
560 560 peeropts,
561 561 source,
562 562 dest=sharepath,
563 563 pull=True,
564 564 revs=rev,
565 565 update=False,
566 566 stream=stream,
567 567 )
568 568
569 569 # Resolve the value to put in [paths] section for the source.
570 570 if islocal(source):
571 571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
572 572 else:
573 573 defaultpath = source
574 574
575 575 sharerepo = repository(ui, path=sharepath)
576 576 destrepo = share(
577 577 ui,
578 578 sharerepo,
579 579 dest=dest,
580 580 update=False,
581 581 bookmarks=False,
582 582 defaultpath=defaultpath,
583 583 )
584 584
585 585 # We need to perform a pull against the dest repo to fetch bookmarks
586 586 # and other non-store data that isn't shared by default. In the case of
587 587 # non-existing shared repo, this means we pull from the remote twice. This
588 588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 589 # way to pull just non-changegroup data.
590 590 exchange.pull(destrepo, srcpeer, heads=revs)
591 591
592 592 _postshareupdate(destrepo, update)
593 593
594 594 return srcpeer, peer(ui, peeropts, dest)
595 595
596 596
597 597 # Recomputing caches is often slow on big repos, so copy them.
598 598 def _copycache(srcrepo, dstcachedir, fname):
599 599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 600 srcfname = srcrepo.cachevfs.join(fname)
601 601 dstfname = os.path.join(dstcachedir, fname)
602 602 if os.path.exists(srcfname):
603 603 if not os.path.exists(dstcachedir):
604 604 os.mkdir(dstcachedir)
605 605 util.copyfile(srcfname, dstfname)
606 606
607 607
608 608 def clone(
609 609 ui,
610 610 peeropts,
611 611 source,
612 612 dest=None,
613 613 pull=False,
614 614 revs=None,
615 615 update=True,
616 616 stream=False,
617 617 branch=None,
618 618 shareopts=None,
619 619 storeincludepats=None,
620 620 storeexcludepats=None,
621 621 depth=None,
622 622 ):
623 623 """Make a copy of an existing repository.
624 624
625 625 Create a copy of an existing repository in a new directory. The
626 626 source and destination are URLs, as passed to the repository
627 627 function. Returns a pair of repository peers, the source and
628 628 newly created destination.
629 629
630 630 The location of the source is added to the new repository's
631 631 .hg/hgrc file, as the default to be used for future pulls and
632 632 pushes.
633 633
634 634 If an exception is raised, the partly cloned/updated destination
635 635 repository will be deleted.
636 636
637 637 Arguments:
638 638
639 639 source: repository object or URL
640 640
641 641 dest: URL of destination repository to create (defaults to base
642 642 name of source repository)
643 643
644 644 pull: always pull from source repository, even in local case or if the
645 645 server prefers streaming
646 646
647 647 stream: stream raw data uncompressed from repository (fast over
648 648 LAN, slow over WAN)
649 649
650 650 revs: revision to clone up to (implies pull=True)
651 651
652 652 update: update working directory after clone completes, if
653 653 destination is local repository (True means update to default rev,
654 654 anything else is treated as a revision)
655 655
656 656 branch: branches to clone
657 657
658 658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 659 activates auto sharing mode and defines the directory for stores. The
660 660 "mode" key determines how to construct the directory name of the shared
661 661 repository. "identity" means the name is derived from the node of the first
662 662 changeset in the repository. "remote" means the name is derived from the
663 663 remote's path/URL. Defaults to "identity."
664 664
665 665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 666 exclude in the repository copy, respectively. If not defined, all files
667 667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 668 only the requested files will be performed. If ``storeincludepats`` is not
669 669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 670 ``path:.``. If both are empty sets, no files will be cloned.
671 671 """
672 672
673 673 if isinstance(source, bytes):
674 674 src = urlutil.get_clone_path(ui, source, branch)
675 675 origsource, source, branches = src
676 676 srcpeer = peer(ui, peeropts, source)
677 677 else:
678 678 srcpeer = source.peer() # in case we were called with a localrepo
679 679 branches = (None, branch or [])
680 680 origsource = source = srcpeer.url()
681 681 srclock = destlock = cleandir = None
682 682 destpeer = None
683 683 try:
684 684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685 685
686 686 if dest is None:
687 687 dest = defaultdest(source)
688 688 if dest:
689 689 ui.status(_(b"destination directory: %s\n") % dest)
690 690 else:
691 691 dest = urlutil.get_clone_path(ui, dest)[0]
692 692
693 693 dest = urlutil.urllocalpath(dest)
694 694 source = urlutil.urllocalpath(source)
695 695
696 696 if not dest:
697 697 raise error.InputError(_(b"empty destination path is not valid"))
698 698
699 699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 700 if destvfs.lexists():
701 701 if not destvfs.isdir():
702 702 raise error.InputError(
703 703 _(b"destination '%s' already exists") % dest
704 704 )
705 705 elif destvfs.listdir():
706 706 raise error.InputError(
707 707 _(b"destination '%s' is not empty") % dest
708 708 )
709 709
710 710 createopts = {}
711 711 narrow = False
712 712
713 713 if storeincludepats is not None:
714 714 narrowspec.validatepatterns(storeincludepats)
715 715 narrow = True
716 716
717 717 if storeexcludepats is not None:
718 718 narrowspec.validatepatterns(storeexcludepats)
719 719 narrow = True
720 720
721 721 if narrow:
722 722 # Include everything by default if only exclusion patterns defined.
723 723 if storeexcludepats and not storeincludepats:
724 724 storeincludepats = {b'path:.'}
725 725
726 726 createopts[b'narrowfiles'] = True
727 727
728 728 if depth:
729 729 createopts[b'shallowfilestore'] = True
730 730
731 731 if srcpeer.capable(b'lfs-serve'):
732 732 # Repository creation honors the config if it disabled the extension, so
733 733 # we can't just announce that lfs will be enabled. This check avoids
734 734 # saying that lfs will be enabled, and then saying it's an unknown
735 735 # feature. The lfs creation option is set in either case so that a
736 736 # requirement is added. If the extension is explicitly disabled but the
737 737 # requirement is set, the clone aborts early, before transferring any
738 738 # data.
739 739 createopts[b'lfs'] = True
740 740
741 741 if extensions.disabled_help(b'lfs'):
742 742 ui.status(
743 743 _(
744 744 b'(remote is using large file support (lfs), but it is '
745 745 b'explicitly disabled in the local configuration)\n'
746 746 )
747 747 )
748 748 else:
749 749 ui.status(
750 750 _(
751 751 b'(remote is using large file support (lfs); lfs will '
752 752 b'be enabled for this repository)\n'
753 753 )
754 754 )
755 755
756 756 shareopts = shareopts or {}
757 757 sharepool = shareopts.get(b'pool')
758 758 sharenamemode = shareopts.get(b'mode')
759 759 if sharepool and islocal(dest):
760 760 sharepath = None
761 761 if sharenamemode == b'identity':
762 762 # Resolve the name from the initial changeset in the remote
763 763 # repository. This returns nullid when the remote is empty. It
764 764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 765 # not available. If we fail to resolve, sharing is not enabled.
766 766 try:
767 767 with srcpeer.commandexecutor() as e:
768 768 rootnode = e.callcommand(
769 769 b'lookup',
770 770 {
771 771 b'key': b'0',
772 772 },
773 773 ).result()
774 774
775 775 if rootnode != nullid:
776 776 sharepath = os.path.join(sharepool, hex(rootnode))
777 777 else:
778 778 ui.status(
779 779 _(
780 780 b'(not using pooled storage: '
781 781 b'remote appears to be empty)\n'
782 782 )
783 783 )
784 784 except error.RepoLookupError:
785 785 ui.status(
786 786 _(
787 787 b'(not using pooled storage: '
788 788 b'unable to resolve identity of remote)\n'
789 789 )
790 790 )
791 791 elif sharenamemode == b'remote':
792 792 sharepath = os.path.join(
793 793 sharepool, hex(hashutil.sha1(source).digest())
794 794 )
795 795 else:
796 796 raise error.Abort(
797 797 _(b'unknown share naming mode: %s') % sharenamemode
798 798 )
799 799
800 800 # TODO this is a somewhat arbitrary restriction.
801 801 if narrow:
802 802 ui.status(
803 803 _(b'(pooled storage not supported for narrow clones)\n')
804 804 )
805 805 sharepath = None
806 806
807 807 if sharepath:
808 808 return clonewithshare(
809 809 ui,
810 810 peeropts,
811 811 sharepath,
812 812 source,
813 813 srcpeer,
814 814 dest,
815 815 pull=pull,
816 816 rev=revs,
817 817 update=update,
818 818 stream=stream,
819 819 )
820 820
821 821 srcrepo = srcpeer.local()
822 822
823 823 abspath = origsource
824 824 if islocal(origsource):
825 825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
826 826
827 827 if islocal(dest):
828 828 cleandir = dest
829 829
830 830 copy = False
831 831 if (
832 832 srcrepo
833 833 and srcrepo.cancopy()
834 834 and islocal(dest)
835 835 and not phases.hassecret(srcrepo)
836 836 ):
837 837 copy = not pull and not revs
838 838
839 839 # TODO this is a somewhat arbitrary restriction.
840 840 if narrow:
841 841 copy = False
842 842
843 843 if copy:
844 844 try:
845 845 # we use a lock here because if we race with commit, we
846 846 # can end up with extra data in the cloned revlogs that's
847 847 # not pointed to by changesets, thus causing verify to
848 848 # fail
849 849 srclock = srcrepo.lock(wait=False)
850 850 except error.LockError:
851 851 copy = False
852 852
853 853 if copy:
854 854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 856 if not os.path.exists(dest):
857 857 util.makedirs(dest)
858 858 else:
859 859 # only clean up directories we create ourselves
860 860 cleandir = hgdir
861 861 try:
862 862 destpath = hgdir
863 863 util.makedir(destpath, notindexed=True)
864 864 except OSError as inst:
865 865 if inst.errno == errno.EEXIST:
866 866 cleandir = None
867 867 raise error.Abort(
868 868 _(b"destination '%s' already exists") % dest
869 869 )
870 870 raise
871 871
872 872 destlock = copystore(ui, srcrepo, destpath)
873 873 # copy bookmarks over
874 874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 875 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 876 if os.path.exists(srcbookmarks):
877 877 util.copyfile(srcbookmarks, dstbookmarks)
878 878
879 879 dstcachedir = os.path.join(destpath, b'cache')
880 880 for cache in cacheutil.cachetocopy(srcrepo):
881 881 _copycache(srcrepo, dstcachedir, cache)
882 882
883 883 # we need to re-init the repo after manually copying the data
884 884 # into it
885 885 destpeer = peer(srcrepo, peeropts, dest)
886 886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
887 887 else:
888 888 try:
889 889 # only pass ui when no srcrepo
890 890 destpeer = peer(
891 891 srcrepo or ui,
892 892 peeropts,
893 893 dest,
894 894 create=True,
895 895 createopts=createopts,
896 896 )
897 897 except OSError as inst:
898 898 if inst.errno == errno.EEXIST:
899 899 cleandir = None
900 900 raise error.Abort(
901 901 _(b"destination '%s' already exists") % dest
902 902 )
903 903 raise
904 904
905 905 if revs:
906 906 if not srcpeer.capable(b'lookup'):
907 907 raise error.Abort(
908 908 _(
909 909 b"src repository does not support "
910 910 b"revision lookup and so doesn't "
911 911 b"support clone by revision"
912 912 )
913 913 )
914 914
915 915 # TODO this is batchable.
916 916 remoterevs = []
917 917 for rev in revs:
918 918 with srcpeer.commandexecutor() as e:
919 919 remoterevs.append(
920 920 e.callcommand(
921 921 b'lookup',
922 922 {
923 923 b'key': rev,
924 924 },
925 925 ).result()
926 926 )
927 927 revs = remoterevs
928 928
929 929 checkout = revs[0]
930 930 else:
931 931 revs = None
932 932 local = destpeer.local()
933 933 if local:
934 934 if narrow:
935 935 with local.wlock(), local.lock():
936 936 local.setnarrowpats(storeincludepats, storeexcludepats)
937 937 narrowspec.copytoworkingcopy(local)
938 938
939 939 u = urlutil.url(abspath)
940 940 defaulturl = bytes(u)
941 941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 942 if not stream:
943 943 if pull:
944 944 stream = False
945 945 else:
946 946 stream = None
947 947 # internal config: ui.quietbookmarkmove
948 948 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 949 with local.ui.configoverride(overrides, b'clone'):
950 950 exchange.pull(
951 951 local,
952 952 srcpeer,
953 953 revs,
954 954 streamclonerequested=stream,
955 955 includepats=storeincludepats,
956 956 excludepats=storeexcludepats,
957 957 depth=depth,
958 958 )
959 959 elif srcrepo:
960 960 # TODO lift restriction once exchange.push() accepts narrow
961 961 # push.
962 962 if narrow:
963 963 raise error.Abort(
964 964 _(
965 965 b'narrow clone not available for '
966 966 b'remote destinations'
967 967 )
968 968 )
969 969
970 970 exchange.push(
971 971 srcrepo,
972 972 destpeer,
973 973 revs=revs,
974 974 bookmarks=srcrepo._bookmarks.keys(),
975 975 )
976 976 else:
977 977 raise error.Abort(
978 978 _(b"clone from remote to remote not supported")
979 979 )
980 980
981 981 cleandir = None
982 982
983 983 destrepo = destpeer.local()
984 984 if destrepo:
985 985 template = uimod.samplehgrcs[b'cloned']
986 986 u = urlutil.url(abspath)
987 987 u.passwd = None
988 988 defaulturl = bytes(u)
989 989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991 991
992 992 if ui.configbool(b'experimental', b'remotenames'):
993 993 logexchange.pullremotenames(destrepo, srcpeer)
994 994
995 995 if update:
996 996 if update is not True:
997 997 with srcpeer.commandexecutor() as e:
998 998 checkout = e.callcommand(
999 999 b'lookup',
1000 1000 {
1001 1001 b'key': update,
1002 1002 },
1003 1003 ).result()
1004 1004
1005 1005 uprev = None
1006 1006 status = None
1007 1007 if checkout is not None:
1008 1008 # Some extensions (at least hg-git and hg-subversion) have
1009 1009 # a peer.lookup() implementation that returns a name instead
1010 1010 # of a nodeid. We work around it here until we've figured
1011 1011 # out a better solution.
1012 1012 if len(checkout) == 20 and checkout in destrepo:
1013 1013 uprev = checkout
1014 1014 elif scmutil.isrevsymbol(destrepo, checkout):
1015 1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 1016 else:
1017 1017 if update is not True:
1018 1018 try:
1019 1019 uprev = destrepo.lookup(update)
1020 1020 except error.RepoLookupError:
1021 1021 pass
1022 1022 if uprev is None:
1023 1023 try:
1024 1024 if destrepo._activebookmark:
1025 1025 uprev = destrepo.lookup(destrepo._activebookmark)
1026 1026 update = destrepo._activebookmark
1027 1027 else:
1028 1028 uprev = destrepo._bookmarks[b'@']
1029 1029 update = b'@'
1030 1030 bn = destrepo[uprev].branch()
1031 1031 if bn == b'default':
1032 1032 status = _(b"updating to bookmark %s\n" % update)
1033 1033 else:
1034 1034 status = (
1035 1035 _(b"updating to bookmark %s on branch %s\n")
1036 1036 ) % (update, bn)
1037 1037 except KeyError:
1038 1038 try:
1039 1039 uprev = destrepo.branchtip(b'default')
1040 1040 except error.RepoLookupError:
1041 1041 uprev = destrepo.lookup(b'tip')
1042 1042 if not status:
1043 1043 bn = destrepo[uprev].branch()
1044 1044 status = _(b"updating to branch %s\n") % bn
1045 1045 destrepo.ui.status(status)
1046 1046 _update(destrepo, uprev)
1047 1047 if update in destrepo._bookmarks:
1048 1048 bookmarks.activate(destrepo, update)
1049 1049 if destlock is not None:
1050 1050 release(destlock)
1051 1051 # here is a tiny windows were someone could end up writing the
1052 1052 # repository before the cache are sure to be warm. This is "fine"
1053 1053 # as the only "bad" outcome would be some slowness. That potential
1054 1054 # slowness already affect reader.
1055 1055 with destrepo.lock():
1056 destrepo.updatecaches(full=True)
1056 destrepo.updatecaches(full=b"post-clone")
1057 1057 finally:
1058 1058 release(srclock, destlock)
1059 1059 if cleandir is not None:
1060 1060 shutil.rmtree(cleandir, True)
1061 1061 if srcpeer is not None:
1062 1062 srcpeer.close()
1063 1063 if destpeer and destpeer.local() is None:
1064 1064 destpeer.close()
1065 1065 return srcpeer, destpeer
1066 1066
1067 1067
1068 1068 def _showstats(repo, stats, quietempty=False):
1069 1069 if quietempty and stats.isempty():
1070 1070 return
1071 1071 repo.ui.status(
1072 1072 _(
1073 1073 b"%d files updated, %d files merged, "
1074 1074 b"%d files removed, %d files unresolved\n"
1075 1075 )
1076 1076 % (
1077 1077 stats.updatedcount,
1078 1078 stats.mergedcount,
1079 1079 stats.removedcount,
1080 1080 stats.unresolvedcount,
1081 1081 )
1082 1082 )
1083 1083
1084 1084
1085 1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1086 1086 """Update the working directory to node.
1087 1087
1088 1088 When overwrite is set, changes are clobbered, merged else
1089 1089
1090 1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1091 1091 repo.ui.deprecwarn(
1092 1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1093 1093 b'5.7',
1094 1094 )
1095 1095 return mergemod._update(
1096 1096 repo,
1097 1097 node,
1098 1098 branchmerge=False,
1099 1099 force=overwrite,
1100 1100 labels=[b'working copy', b'destination'],
1101 1101 updatecheck=updatecheck,
1102 1102 )
1103 1103
1104 1104
1105 1105 def update(repo, node, quietempty=False, updatecheck=None):
1106 1106 """update the working directory to node"""
1107 1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1108 1108 _showstats(repo, stats, quietempty)
1109 1109 if stats.unresolvedcount:
1110 1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1111 1111 return stats.unresolvedcount > 0
1112 1112
1113 1113
1114 1114 # naming conflict in clone()
1115 1115 _update = update
1116 1116
1117 1117
1118 1118 def clean(repo, node, show_stats=True, quietempty=False):
1119 1119 """forcibly switch the working directory to node, clobbering changes"""
1120 1120 stats = mergemod.clean_update(repo[node])
1121 1121 assert stats.unresolvedcount == 0
1122 1122 if show_stats:
1123 1123 _showstats(repo, stats, quietempty)
1124 1124 return False
1125 1125
1126 1126
1127 1127 # naming conflict in updatetotally()
1128 1128 _clean = clean
1129 1129
1130 1130 _VALID_UPDATECHECKS = {
1131 1131 mergemod.UPDATECHECK_ABORT,
1132 1132 mergemod.UPDATECHECK_NONE,
1133 1133 mergemod.UPDATECHECK_LINEAR,
1134 1134 mergemod.UPDATECHECK_NO_CONFLICT,
1135 1135 }
1136 1136
1137 1137
1138 1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1139 1139 """Update the working directory with extra care for non-file components
1140 1140
1141 1141 This takes care of non-file components below:
1142 1142
1143 1143 :bookmark: might be advanced or (in)activated
1144 1144
1145 1145 This takes arguments below:
1146 1146
1147 1147 :checkout: to which revision the working directory is updated
1148 1148 :brev: a name, which might be a bookmark to be activated after updating
1149 1149 :clean: whether changes in the working directory can be discarded
1150 1150 :updatecheck: how to deal with a dirty working directory
1151 1151
1152 1152 Valid values for updatecheck are the UPDATECHECK_* constants
1153 1153 defined in the merge module. Passing `None` will result in using the
1154 1154 configured default.
1155 1155
1156 1156 * ABORT: abort if the working directory is dirty
1157 1157 * NONE: don't check (merge working directory changes into destination)
1158 1158 * LINEAR: check that update is linear before merging working directory
1159 1159 changes into destination
1160 1160 * NO_CONFLICT: check that the update does not result in file merges
1161 1161
1162 1162 This returns whether conflict is detected at updating or not.
1163 1163 """
1164 1164 if updatecheck is None:
1165 1165 updatecheck = ui.config(b'commands', b'update.check')
1166 1166 if updatecheck not in _VALID_UPDATECHECKS:
1167 1167 # If not configured, or invalid value configured
1168 1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1169 1169 if updatecheck not in _VALID_UPDATECHECKS:
1170 1170 raise ValueError(
1171 1171 r'Invalid updatecheck value %r (can accept %r)'
1172 1172 % (updatecheck, _VALID_UPDATECHECKS)
1173 1173 )
1174 1174 with repo.wlock():
1175 1175 movemarkfrom = None
1176 1176 warndest = False
1177 1177 if checkout is None:
1178 1178 updata = destutil.destupdate(repo, clean=clean)
1179 1179 checkout, movemarkfrom, brev = updata
1180 1180 warndest = True
1181 1181
1182 1182 if clean:
1183 1183 ret = _clean(repo, checkout)
1184 1184 else:
1185 1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1186 1186 cmdutil.bailifchanged(repo, merge=False)
1187 1187 updatecheck = mergemod.UPDATECHECK_NONE
1188 1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1189 1189
1190 1190 if not ret and movemarkfrom:
1191 1191 if movemarkfrom == repo[b'.'].node():
1192 1192 pass # no-op update
1193 1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1194 1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1195 1195 ui.status(_(b"updating bookmark %s\n") % b)
1196 1196 else:
1197 1197 # this can happen with a non-linear update
1198 1198 b = ui.label(repo._activebookmark, b'bookmarks')
1199 1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1200 1200 bookmarks.deactivate(repo)
1201 1201 elif brev in repo._bookmarks:
1202 1202 if brev != repo._activebookmark:
1203 1203 b = ui.label(brev, b'bookmarks.active')
1204 1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1205 1205 bookmarks.activate(repo, brev)
1206 1206 elif brev:
1207 1207 if repo._activebookmark:
1208 1208 b = ui.label(repo._activebookmark, b'bookmarks')
1209 1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 1210 bookmarks.deactivate(repo)
1211 1211
1212 1212 if warndest:
1213 1213 destutil.statusotherdests(ui, repo)
1214 1214
1215 1215 return ret
1216 1216
1217 1217
1218 1218 def merge(
1219 1219 ctx,
1220 1220 force=False,
1221 1221 remind=True,
1222 1222 labels=None,
1223 1223 ):
1224 1224 """Branch merge with node, resolving changes. Return true if any
1225 1225 unresolved conflicts."""
1226 1226 repo = ctx.repo()
1227 1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1228 1228 _showstats(repo, stats)
1229 1229 if stats.unresolvedcount:
1230 1230 repo.ui.status(
1231 1231 _(
1232 1232 b"use 'hg resolve' to retry unresolved file merges "
1233 1233 b"or 'hg merge --abort' to abandon\n"
1234 1234 )
1235 1235 )
1236 1236 elif remind:
1237 1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1238 1238 return stats.unresolvedcount > 0
1239 1239
1240 1240
1241 1241 def abortmerge(ui, repo):
1242 1242 ms = mergestatemod.mergestate.read(repo)
1243 1243 if ms.active():
1244 1244 # there were conflicts
1245 1245 node = ms.localctx.hex()
1246 1246 else:
1247 1247 # there were no conficts, mergestate was not stored
1248 1248 node = repo[b'.'].hex()
1249 1249
1250 1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1251 1251 stats = mergemod.clean_update(repo[node])
1252 1252 assert stats.unresolvedcount == 0
1253 1253 _showstats(repo, stats)
1254 1254
1255 1255
1256 1256 def _incoming(
1257 1257 displaychlist,
1258 1258 subreporecurse,
1259 1259 ui,
1260 1260 repo,
1261 1261 source,
1262 1262 opts,
1263 1263 buffered=False,
1264 1264 subpath=None,
1265 1265 ):
1266 1266 """
1267 1267 Helper for incoming / gincoming.
1268 1268 displaychlist gets called with
1269 1269 (remoterepo, incomingchangesetlist, displayer) parameters,
1270 1270 and is supposed to contain only code that can't be unified.
1271 1271 """
1272 1272 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1273 1273 srcs = list(srcs)
1274 1274 if len(srcs) != 1:
1275 1275 msg = _(b'for now, incoming supports only a single source, %d provided')
1276 1276 msg %= len(srcs)
1277 1277 raise error.Abort(msg)
1278 1278 source, branches = srcs[0]
1279 1279 if subpath is not None:
1280 1280 subpath = urlutil.url(subpath)
1281 1281 if subpath.isabs():
1282 1282 source = bytes(subpath)
1283 1283 else:
1284 1284 p = urlutil.url(source)
1285 1285 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1286 1286 source = bytes(p)
1287 1287 other = peer(repo, opts, source)
1288 1288 cleanupfn = other.close
1289 1289 try:
1290 1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1291 1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1292 1292
1293 1293 if revs:
1294 1294 revs = [other.lookup(rev) for rev in revs]
1295 1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1296 1296 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1297 1297 )
1298 1298
1299 1299 if not chlist:
1300 1300 ui.status(_(b"no changes found\n"))
1301 1301 return subreporecurse()
1302 1302 ui.pager(b'incoming')
1303 1303 displayer = logcmdutil.changesetdisplayer(
1304 1304 ui, other, opts, buffered=buffered
1305 1305 )
1306 1306 displaychlist(other, chlist, displayer)
1307 1307 displayer.close()
1308 1308 finally:
1309 1309 cleanupfn()
1310 1310 subreporecurse()
1311 1311 return 0 # exit code is zero since we found incoming changes
1312 1312
1313 1313
1314 1314 def incoming(ui, repo, source, opts, subpath=None):
1315 1315 def subreporecurse():
1316 1316 ret = 1
1317 1317 if opts.get(b'subrepos'):
1318 1318 ctx = repo[None]
1319 1319 for subpath in sorted(ctx.substate):
1320 1320 sub = ctx.sub(subpath)
1321 1321 ret = min(ret, sub.incoming(ui, source, opts))
1322 1322 return ret
1323 1323
1324 1324 def display(other, chlist, displayer):
1325 1325 limit = logcmdutil.getlimit(opts)
1326 1326 if opts.get(b'newest_first'):
1327 1327 chlist.reverse()
1328 1328 count = 0
1329 1329 for n in chlist:
1330 1330 if limit is not None and count >= limit:
1331 1331 break
1332 1332 parents = [p for p in other.changelog.parents(n) if p != nullid]
1333 1333 if opts.get(b'no_merges') and len(parents) == 2:
1334 1334 continue
1335 1335 count += 1
1336 1336 displayer.show(other[n])
1337 1337
1338 1338 return _incoming(
1339 1339 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1340 1340 )
1341 1341
1342 1342
1343 1343 def _outgoing(ui, repo, dests, opts, subpath=None):
1344 1344 out = set()
1345 1345 others = []
1346 1346 for path in urlutil.get_push_paths(repo, ui, dests):
1347 1347 dest = path.pushloc or path.loc
1348 1348 if subpath is not None:
1349 1349 subpath = urlutil.url(subpath)
1350 1350 if subpath.isabs():
1351 1351 dest = bytes(subpath)
1352 1352 else:
1353 1353 p = urlutil.url(dest)
1354 1354 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1355 1355 dest = bytes(p)
1356 1356 branches = path.branch, opts.get(b'branch') or []
1357 1357
1358 1358 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1359 1359 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1360 1360 if revs:
1361 1361 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1362 1362
1363 1363 other = peer(repo, opts, dest)
1364 1364 try:
1365 1365 outgoing = discovery.findcommonoutgoing(
1366 1366 repo, other, revs, force=opts.get(b'force')
1367 1367 )
1368 1368 o = outgoing.missing
1369 1369 out.update(o)
1370 1370 if not o:
1371 1371 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1372 1372 others.append(other)
1373 1373 except: # re-raises
1374 1374 other.close()
1375 1375 raise
1376 1376 # make sure this is ordered by revision number
1377 1377 outgoing_revs = list(out)
1378 1378 cl = repo.changelog
1379 1379 outgoing_revs.sort(key=cl.rev)
1380 1380 return outgoing_revs, others
1381 1381
1382 1382
1383 1383 def _outgoing_recurse(ui, repo, dests, opts):
1384 1384 ret = 1
1385 1385 if opts.get(b'subrepos'):
1386 1386 ctx = repo[None]
1387 1387 for subpath in sorted(ctx.substate):
1388 1388 sub = ctx.sub(subpath)
1389 1389 ret = min(ret, sub.outgoing(ui, dests, opts))
1390 1390 return ret
1391 1391
1392 1392
1393 1393 def _outgoing_filter(repo, revs, opts):
1394 1394 """apply revision filtering/ordering option for outgoing"""
1395 1395 limit = logcmdutil.getlimit(opts)
1396 1396 no_merges = opts.get(b'no_merges')
1397 1397 if opts.get(b'newest_first'):
1398 1398 revs.reverse()
1399 1399 if limit is None and not no_merges:
1400 1400 for r in revs:
1401 1401 yield r
1402 1402 return
1403 1403
1404 1404 count = 0
1405 1405 cl = repo.changelog
1406 1406 for n in revs:
1407 1407 if limit is not None and count >= limit:
1408 1408 break
1409 1409 parents = [p for p in cl.parents(n) if p != nullid]
1410 1410 if no_merges and len(parents) == 2:
1411 1411 continue
1412 1412 count += 1
1413 1413 yield n
1414 1414
1415 1415
1416 1416 def outgoing(ui, repo, dests, opts, subpath=None):
1417 1417 if opts.get(b'graph'):
1418 1418 logcmdutil.checkunsupportedgraphflags([], opts)
1419 1419 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1420 1420 ret = 1
1421 1421 try:
1422 1422 if o:
1423 1423 ret = 0
1424 1424
1425 1425 if opts.get(b'graph'):
1426 1426 revdag = logcmdutil.graphrevs(repo, o, opts)
1427 1427 ui.pager(b'outgoing')
1428 1428 displayer = logcmdutil.changesetdisplayer(
1429 1429 ui, repo, opts, buffered=True
1430 1430 )
1431 1431 logcmdutil.displaygraph(
1432 1432 ui, repo, revdag, displayer, graphmod.asciiedges
1433 1433 )
1434 1434 else:
1435 1435 ui.pager(b'outgoing')
1436 1436 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1437 1437 for n in _outgoing_filter(repo, o, opts):
1438 1438 displayer.show(repo[n])
1439 1439 displayer.close()
1440 1440 for oth in others:
1441 1441 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1442 1442 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1443 1443 return ret # exit code is zero since we found outgoing changes
1444 1444 finally:
1445 1445 for oth in others:
1446 1446 oth.close()
1447 1447
1448 1448
1449 1449 def verify(repo, level=None):
1450 1450 """verify the consistency of a repository"""
1451 1451 ret = verifymod.verify(repo, level=level)
1452 1452
1453 1453 # Broken subrepo references in hidden csets don't seem worth worrying about,
1454 1454 # since they can't be pushed/pulled, and --hidden can be used if they are a
1455 1455 # concern.
1456 1456
1457 1457 # pathto() is needed for -R case
1458 1458 revs = repo.revs(
1459 1459 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1460 1460 )
1461 1461
1462 1462 if revs:
1463 1463 repo.ui.status(_(b'checking subrepo links\n'))
1464 1464 for rev in revs:
1465 1465 ctx = repo[rev]
1466 1466 try:
1467 1467 for subpath in ctx.substate:
1468 1468 try:
1469 1469 ret = (
1470 1470 ctx.sub(subpath, allowcreate=False).verify() or ret
1471 1471 )
1472 1472 except error.RepoError as e:
1473 1473 repo.ui.warn(b'%d: %s\n' % (rev, e))
1474 1474 except Exception:
1475 1475 repo.ui.warn(
1476 1476 _(b'.hgsubstate is corrupt in revision %s\n')
1477 1477 % short(ctx.node())
1478 1478 )
1479 1479
1480 1480 return ret
1481 1481
1482 1482
1483 1483 def remoteui(src, opts):
1484 1484 """build a remote ui from ui or repo and opts"""
1485 1485 if util.safehasattr(src, b'baseui'): # looks like a repository
1486 1486 dst = src.baseui.copy() # drop repo-specific config
1487 1487 src = src.ui # copy target options from repo
1488 1488 else: # assume it's a global ui object
1489 1489 dst = src.copy() # keep all global options
1490 1490
1491 1491 # copy ssh-specific options
1492 1492 for o in b'ssh', b'remotecmd':
1493 1493 v = opts.get(o) or src.config(b'ui', o)
1494 1494 if v:
1495 1495 dst.setconfig(b"ui", o, v, b'copied')
1496 1496
1497 1497 # copy bundle-specific options
1498 1498 r = src.config(b'bundle', b'mainreporoot')
1499 1499 if r:
1500 1500 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1501 1501
1502 1502 # copy selected local settings to the remote ui
1503 1503 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1504 1504 for key, val in src.configitems(sect):
1505 1505 dst.setconfig(sect, key, val, b'copied')
1506 1506 v = src.config(b'web', b'cacerts')
1507 1507 if v:
1508 1508 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1509 1509
1510 1510 return dst
1511 1511
1512 1512
1513 1513 # Files of interest
1514 1514 # Used to check if the repository has changed looking at mtime and size of
1515 1515 # these files.
1516 1516 foi = [
1517 1517 (b'spath', b'00changelog.i'),
1518 1518 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1519 1519 (b'spath', b'obsstore'),
1520 1520 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1521 1521 ]
1522 1522
1523 1523
1524 1524 class cachedlocalrepo(object):
1525 1525 """Holds a localrepository that can be cached and reused."""
1526 1526
1527 1527 def __init__(self, repo):
1528 1528 """Create a new cached repo from an existing repo.
1529 1529
1530 1530 We assume the passed in repo was recently created. If the
1531 1531 repo has changed between when it was created and when it was
1532 1532 turned into a cache, it may not refresh properly.
1533 1533 """
1534 1534 assert isinstance(repo, localrepo.localrepository)
1535 1535 self._repo = repo
1536 1536 self._state, self.mtime = self._repostate()
1537 1537 self._filtername = repo.filtername
1538 1538
1539 1539 def fetch(self):
1540 1540 """Refresh (if necessary) and return a repository.
1541 1541
1542 1542 If the cached instance is out of date, it will be recreated
1543 1543 automatically and returned.
1544 1544
1545 1545 Returns a tuple of the repo and a boolean indicating whether a new
1546 1546 repo instance was created.
1547 1547 """
1548 1548 # We compare the mtimes and sizes of some well-known files to
1549 1549 # determine if the repo changed. This is not precise, as mtimes
1550 1550 # are susceptible to clock skew and imprecise filesystems and
1551 1551 # file content can change while maintaining the same size.
1552 1552
1553 1553 state, mtime = self._repostate()
1554 1554 if state == self._state:
1555 1555 return self._repo, False
1556 1556
1557 1557 repo = repository(self._repo.baseui, self._repo.url())
1558 1558 if self._filtername:
1559 1559 self._repo = repo.filtered(self._filtername)
1560 1560 else:
1561 1561 self._repo = repo.unfiltered()
1562 1562 self._state = state
1563 1563 self.mtime = mtime
1564 1564
1565 1565 return self._repo, True
1566 1566
1567 1567 def _repostate(self):
1568 1568 state = []
1569 1569 maxmtime = -1
1570 1570 for attr, fname in foi:
1571 1571 prefix = getattr(self._repo, attr)
1572 1572 p = os.path.join(prefix, fname)
1573 1573 try:
1574 1574 st = os.stat(p)
1575 1575 except OSError:
1576 1576 st = os.stat(prefix)
1577 1577 state.append((st[stat.ST_MTIME], st.st_size))
1578 1578 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1579 1579
1580 1580 return tuple(state), maxmtime
1581 1581
1582 1582 def copy(self):
1583 1583 """Obtain a copy of this class instance.
1584 1584
1585 1585 A new localrepository instance is obtained. The new instance should be
1586 1586 completely independent of the original.
1587 1587 """
1588 1588 repo = repository(self._repo.baseui, self._repo.origroot)
1589 1589 if self._filtername:
1590 1590 repo = repo.filtered(self._filtername)
1591 1591 else:
1592 1592 repo = repo.unfiltered()
1593 1593 c = cachedlocalrepo(repo)
1594 1594 c._state = self._state
1595 1595 c.mtime = self.mtime
1596 1596 return c
@@ -1,3759 +1,3765 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 sha1nodeconstants,
25 25 short,
26 26 )
27 27 from .pycompat import (
28 28 delattr,
29 29 getattr,
30 30 )
31 31 from . import (
32 32 bookmarks,
33 33 branchmap,
34 34 bundle2,
35 35 bundlecaches,
36 36 changegroup,
37 37 color,
38 38 commit,
39 39 context,
40 40 dirstate,
41 41 dirstateguard,
42 42 discovery,
43 43 encoding,
44 44 error,
45 45 exchange,
46 46 extensions,
47 47 filelog,
48 48 hook,
49 49 lock as lockmod,
50 50 match as matchmod,
51 51 mergestate as mergestatemod,
52 52 mergeutil,
53 53 metadata as metadatamod,
54 54 namespaces,
55 55 narrowspec,
56 56 obsolete,
57 57 pathutil,
58 58 phases,
59 59 pushkey,
60 60 pycompat,
61 61 rcutil,
62 62 repoview,
63 63 requirements as requirementsmod,
64 64 revlog,
65 65 revset,
66 66 revsetlang,
67 67 scmutil,
68 68 sparse,
69 69 store as storemod,
70 70 subrepoutil,
71 71 tags as tagsmod,
72 72 transaction,
73 73 txnutil,
74 74 util,
75 75 vfs as vfsmod,
76 76 wireprototypes,
77 77 )
78 78
79 79 from .interfaces import (
80 80 repository,
81 81 util as interfaceutil,
82 82 )
83 83
84 84 from .utils import (
85 85 hashutil,
86 86 procutil,
87 87 stringutil,
88 88 urlutil,
89 89 )
90 90
91 91 from .revlogutils import (
92 92 concurrency_checker as revlogchecker,
93 93 constants as revlogconst,
94 94 )
95 95
96 96 release = lockmod.release
97 97 urlerr = util.urlerr
98 98 urlreq = util.urlreq
99 99
100 100 # set of (path, vfs-location) tuples. vfs-location is:
101 101 # - 'plain for vfs relative paths
102 102 # - '' for svfs relative paths
103 103 _cachedfiles = set()
104 104
105 105
106 106 class _basefilecache(scmutil.filecache):
107 107 """All filecache usage on repo are done for logic that should be unfiltered"""
108 108
109 109 def __get__(self, repo, type=None):
110 110 if repo is None:
111 111 return self
112 112 # proxy to unfiltered __dict__ since filtered repo has no entry
113 113 unfi = repo.unfiltered()
114 114 try:
115 115 return unfi.__dict__[self.sname]
116 116 except KeyError:
117 117 pass
118 118 return super(_basefilecache, self).__get__(unfi, type)
119 119
120 120 def set(self, repo, value):
121 121 return super(_basefilecache, self).set(repo.unfiltered(), value)
122 122
123 123
124 124 class repofilecache(_basefilecache):
125 125 """filecache for files in .hg but outside of .hg/store"""
126 126
127 127 def __init__(self, *paths):
128 128 super(repofilecache, self).__init__(*paths)
129 129 for path in paths:
130 130 _cachedfiles.add((path, b'plain'))
131 131
132 132 def join(self, obj, fname):
133 133 return obj.vfs.join(fname)
134 134
135 135
136 136 class storecache(_basefilecache):
137 137 """filecache for files in the store"""
138 138
139 139 def __init__(self, *paths):
140 140 super(storecache, self).__init__(*paths)
141 141 for path in paths:
142 142 _cachedfiles.add((path, b''))
143 143
144 144 def join(self, obj, fname):
145 145 return obj.sjoin(fname)
146 146
147 147
148 148 class mixedrepostorecache(_basefilecache):
149 149 """filecache for a mix files in .hg/store and outside"""
150 150
151 151 def __init__(self, *pathsandlocations):
152 152 # scmutil.filecache only uses the path for passing back into our
153 153 # join(), so we can safely pass a list of paths and locations
154 154 super(mixedrepostorecache, self).__init__(*pathsandlocations)
155 155 _cachedfiles.update(pathsandlocations)
156 156
157 157 def join(self, obj, fnameandlocation):
158 158 fname, location = fnameandlocation
159 159 if location == b'plain':
160 160 return obj.vfs.join(fname)
161 161 else:
162 162 if location != b'':
163 163 raise error.ProgrammingError(
164 164 b'unexpected location: %s' % location
165 165 )
166 166 return obj.sjoin(fname)
167 167
168 168
169 169 def isfilecached(repo, name):
170 170 """check if a repo has already cached "name" filecache-ed property
171 171
172 172 This returns (cachedobj-or-None, iscached) tuple.
173 173 """
174 174 cacheentry = repo.unfiltered()._filecache.get(name, None)
175 175 if not cacheentry:
176 176 return None, False
177 177 return cacheentry.obj, True
178 178
179 179
180 180 class unfilteredpropertycache(util.propertycache):
181 181 """propertycache that apply to unfiltered repo only"""
182 182
183 183 def __get__(self, repo, type=None):
184 184 unfi = repo.unfiltered()
185 185 if unfi is repo:
186 186 return super(unfilteredpropertycache, self).__get__(unfi)
187 187 return getattr(unfi, self.name)
188 188
189 189
190 190 class filteredpropertycache(util.propertycache):
191 191 """propertycache that must take filtering in account"""
192 192
193 193 def cachevalue(self, obj, value):
194 194 object.__setattr__(obj, self.name, value)
195 195
196 196
197 197 def hasunfilteredcache(repo, name):
198 198 """check if a repo has an unfilteredpropertycache value for <name>"""
199 199 return name in vars(repo.unfiltered())
200 200
201 201
202 202 def unfilteredmethod(orig):
203 203 """decorate method that always need to be run on unfiltered version"""
204 204
205 205 @functools.wraps(orig)
206 206 def wrapper(repo, *args, **kwargs):
207 207 return orig(repo.unfiltered(), *args, **kwargs)
208 208
209 209 return wrapper
210 210
211 211
212 212 moderncaps = {
213 213 b'lookup',
214 214 b'branchmap',
215 215 b'pushkey',
216 216 b'known',
217 217 b'getbundle',
218 218 b'unbundle',
219 219 }
220 220 legacycaps = moderncaps.union({b'changegroupsubset'})
221 221
222 222
223 223 @interfaceutil.implementer(repository.ipeercommandexecutor)
224 224 class localcommandexecutor(object):
225 225 def __init__(self, peer):
226 226 self._peer = peer
227 227 self._sent = False
228 228 self._closed = False
229 229
230 230 def __enter__(self):
231 231 return self
232 232
233 233 def __exit__(self, exctype, excvalue, exctb):
234 234 self.close()
235 235
236 236 def callcommand(self, command, args):
237 237 if self._sent:
238 238 raise error.ProgrammingError(
239 239 b'callcommand() cannot be used after sendcommands()'
240 240 )
241 241
242 242 if self._closed:
243 243 raise error.ProgrammingError(
244 244 b'callcommand() cannot be used after close()'
245 245 )
246 246
247 247 # We don't need to support anything fancy. Just call the named
248 248 # method on the peer and return a resolved future.
249 249 fn = getattr(self._peer, pycompat.sysstr(command))
250 250
251 251 f = pycompat.futures.Future()
252 252
253 253 try:
254 254 result = fn(**pycompat.strkwargs(args))
255 255 except Exception:
256 256 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
257 257 else:
258 258 f.set_result(result)
259 259
260 260 return f
261 261
262 262 def sendcommands(self):
263 263 self._sent = True
264 264
265 265 def close(self):
266 266 self._closed = True
267 267
268 268
269 269 @interfaceutil.implementer(repository.ipeercommands)
270 270 class localpeer(repository.peer):
271 271 '''peer for a local repo; reflects only the most recent API'''
272 272
273 273 def __init__(self, repo, caps=None):
274 274 super(localpeer, self).__init__()
275 275
276 276 if caps is None:
277 277 caps = moderncaps.copy()
278 278 self._repo = repo.filtered(b'served')
279 279 self.ui = repo.ui
280 280
281 281 if repo._wanted_sidedata:
282 282 formatted = bundle2.format_remote_wanted_sidedata(repo)
283 283 caps.add(b'exp-wanted-sidedata=' + formatted)
284 284
285 285 self._caps = repo._restrictcapabilities(caps)
286 286
287 287 # Begin of _basepeer interface.
288 288
289 289 def url(self):
290 290 return self._repo.url()
291 291
292 292 def local(self):
293 293 return self._repo
294 294
295 295 def peer(self):
296 296 return self
297 297
298 298 def canpush(self):
299 299 return True
300 300
301 301 def close(self):
302 302 self._repo.close()
303 303
304 304 # End of _basepeer interface.
305 305
306 306 # Begin of _basewirecommands interface.
307 307
308 308 def branchmap(self):
309 309 return self._repo.branchmap()
310 310
311 311 def capabilities(self):
312 312 return self._caps
313 313
314 314 def clonebundles(self):
315 315 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
316 316
317 317 def debugwireargs(self, one, two, three=None, four=None, five=None):
318 318 """Used to test argument passing over the wire"""
319 319 return b"%s %s %s %s %s" % (
320 320 one,
321 321 two,
322 322 pycompat.bytestr(three),
323 323 pycompat.bytestr(four),
324 324 pycompat.bytestr(five),
325 325 )
326 326
327 327 def getbundle(
328 328 self,
329 329 source,
330 330 heads=None,
331 331 common=None,
332 332 bundlecaps=None,
333 333 remote_sidedata=None,
334 334 **kwargs
335 335 ):
336 336 chunks = exchange.getbundlechunks(
337 337 self._repo,
338 338 source,
339 339 heads=heads,
340 340 common=common,
341 341 bundlecaps=bundlecaps,
342 342 remote_sidedata=remote_sidedata,
343 343 **kwargs
344 344 )[1]
345 345 cb = util.chunkbuffer(chunks)
346 346
347 347 if exchange.bundle2requested(bundlecaps):
348 348 # When requesting a bundle2, getbundle returns a stream to make the
349 349 # wire level function happier. We need to build a proper object
350 350 # from it in local peer.
351 351 return bundle2.getunbundler(self.ui, cb)
352 352 else:
353 353 return changegroup.getunbundler(b'01', cb, None)
354 354
355 355 def heads(self):
356 356 return self._repo.heads()
357 357
358 358 def known(self, nodes):
359 359 return self._repo.known(nodes)
360 360
361 361 def listkeys(self, namespace):
362 362 return self._repo.listkeys(namespace)
363 363
364 364 def lookup(self, key):
365 365 return self._repo.lookup(key)
366 366
367 367 def pushkey(self, namespace, key, old, new):
368 368 return self._repo.pushkey(namespace, key, old, new)
369 369
370 370 def stream_out(self):
371 371 raise error.Abort(_(b'cannot perform stream clone against local peer'))
372 372
373 373 def unbundle(self, bundle, heads, url):
374 374 """apply a bundle on a repo
375 375
376 376 This function handles the repo locking itself."""
377 377 try:
378 378 try:
379 379 bundle = exchange.readbundle(self.ui, bundle, None)
380 380 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
381 381 if util.safehasattr(ret, b'getchunks'):
382 382 # This is a bundle20 object, turn it into an unbundler.
383 383 # This little dance should be dropped eventually when the
384 384 # API is finally improved.
385 385 stream = util.chunkbuffer(ret.getchunks())
386 386 ret = bundle2.getunbundler(self.ui, stream)
387 387 return ret
388 388 except Exception as exc:
389 389 # If the exception contains output salvaged from a bundle2
390 390 # reply, we need to make sure it is printed before continuing
391 391 # to fail. So we build a bundle2 with such output and consume
392 392 # it directly.
393 393 #
394 394 # This is not very elegant but allows a "simple" solution for
395 395 # issue4594
396 396 output = getattr(exc, '_bundle2salvagedoutput', ())
397 397 if output:
398 398 bundler = bundle2.bundle20(self._repo.ui)
399 399 for out in output:
400 400 bundler.addpart(out)
401 401 stream = util.chunkbuffer(bundler.getchunks())
402 402 b = bundle2.getunbundler(self.ui, stream)
403 403 bundle2.processbundle(self._repo, b)
404 404 raise
405 405 except error.PushRaced as exc:
406 406 raise error.ResponseError(
407 407 _(b'push failed:'), stringutil.forcebytestr(exc)
408 408 )
409 409
410 410 # End of _basewirecommands interface.
411 411
412 412 # Begin of peer interface.
413 413
414 414 def commandexecutor(self):
415 415 return localcommandexecutor(self)
416 416
417 417 # End of peer interface.
418 418
419 419
420 420 @interfaceutil.implementer(repository.ipeerlegacycommands)
421 421 class locallegacypeer(localpeer):
422 422 """peer extension which implements legacy methods too; used for tests with
423 423 restricted capabilities"""
424 424
425 425 def __init__(self, repo):
426 426 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
427 427
428 428 # Begin of baselegacywirecommands interface.
429 429
430 430 def between(self, pairs):
431 431 return self._repo.between(pairs)
432 432
433 433 def branches(self, nodes):
434 434 return self._repo.branches(nodes)
435 435
436 436 def changegroup(self, nodes, source):
437 437 outgoing = discovery.outgoing(
438 438 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
439 439 )
440 440 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
441 441
442 442 def changegroupsubset(self, bases, heads, source):
443 443 outgoing = discovery.outgoing(
444 444 self._repo, missingroots=bases, ancestorsof=heads
445 445 )
446 446 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
447 447
448 448 # End of baselegacywirecommands interface.
449 449
450 450
451 451 # Functions receiving (ui, features) that extensions can register to impact
452 452 # the ability to load repositories with custom requirements. Only
453 453 # functions defined in loaded extensions are called.
454 454 #
455 455 # The function receives a set of requirement strings that the repository
456 456 # is capable of opening. Functions will typically add elements to the
457 457 # set to reflect that the extension knows how to handle that requirements.
458 458 featuresetupfuncs = set()
459 459
460 460
461 461 def _getsharedvfs(hgvfs, requirements):
462 462 """returns the vfs object pointing to root of shared source
463 463 repo for a shared repository
464 464
465 465 hgvfs is vfs pointing at .hg/ of current repo (shared one)
466 466 requirements is a set of requirements of current repo (shared one)
467 467 """
468 468 # The ``shared`` or ``relshared`` requirements indicate the
469 469 # store lives in the path contained in the ``.hg/sharedpath`` file.
470 470 # This is an absolute path for ``shared`` and relative to
471 471 # ``.hg/`` for ``relshared``.
472 472 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
473 473 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
474 474 sharedpath = util.normpath(hgvfs.join(sharedpath))
475 475
476 476 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
477 477
478 478 if not sharedvfs.exists():
479 479 raise error.RepoError(
480 480 _(b'.hg/sharedpath points to nonexistent directory %s')
481 481 % sharedvfs.base
482 482 )
483 483 return sharedvfs
484 484
485 485
486 486 def _readrequires(vfs, allowmissing):
487 487 """reads the require file present at root of this vfs
488 488 and return a set of requirements
489 489
490 490 If allowmissing is True, we suppress ENOENT if raised"""
491 491 # requires file contains a newline-delimited list of
492 492 # features/capabilities the opener (us) must have in order to use
493 493 # the repository. This file was introduced in Mercurial 0.9.2,
494 494 # which means very old repositories may not have one. We assume
495 495 # a missing file translates to no requirements.
496 496 try:
497 497 requirements = set(vfs.read(b'requires').splitlines())
498 498 except IOError as e:
499 499 if not (allowmissing and e.errno == errno.ENOENT):
500 500 raise
501 501 requirements = set()
502 502 return requirements
503 503
504 504
505 505 def makelocalrepository(baseui, path, intents=None):
506 506 """Create a local repository object.
507 507
508 508 Given arguments needed to construct a local repository, this function
509 509 performs various early repository loading functionality (such as
510 510 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
511 511 the repository can be opened, derives a type suitable for representing
512 512 that repository, and returns an instance of it.
513 513
514 514 The returned object conforms to the ``repository.completelocalrepository``
515 515 interface.
516 516
517 517 The repository type is derived by calling a series of factory functions
518 518 for each aspect/interface of the final repository. These are defined by
519 519 ``REPO_INTERFACES``.
520 520
521 521 Each factory function is called to produce a type implementing a specific
522 522 interface. The cumulative list of returned types will be combined into a
523 523 new type and that type will be instantiated to represent the local
524 524 repository.
525 525
526 526 The factory functions each receive various state that may be consulted
527 527 as part of deriving a type.
528 528
529 529 Extensions should wrap these factory functions to customize repository type
530 530 creation. Note that an extension's wrapped function may be called even if
531 531 that extension is not loaded for the repo being constructed. Extensions
532 532 should check if their ``__name__`` appears in the
533 533 ``extensionmodulenames`` set passed to the factory function and no-op if
534 534 not.
535 535 """
536 536 ui = baseui.copy()
537 537 # Prevent copying repo configuration.
538 538 ui.copy = baseui.copy
539 539
540 540 # Working directory VFS rooted at repository root.
541 541 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
542 542
543 543 # Main VFS for .hg/ directory.
544 544 hgpath = wdirvfs.join(b'.hg')
545 545 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
546 546 # Whether this repository is shared one or not
547 547 shared = False
548 548 # If this repository is shared, vfs pointing to shared repo
549 549 sharedvfs = None
550 550
551 551 # The .hg/ path should exist and should be a directory. All other
552 552 # cases are errors.
553 553 if not hgvfs.isdir():
554 554 try:
555 555 hgvfs.stat()
556 556 except OSError as e:
557 557 if e.errno != errno.ENOENT:
558 558 raise
559 559 except ValueError as e:
560 560 # Can be raised on Python 3.8 when path is invalid.
561 561 raise error.Abort(
562 562 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
563 563 )
564 564
565 565 raise error.RepoError(_(b'repository %s not found') % path)
566 566
567 567 requirements = _readrequires(hgvfs, True)
568 568 shared = (
569 569 requirementsmod.SHARED_REQUIREMENT in requirements
570 570 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
571 571 )
572 572 storevfs = None
573 573 if shared:
574 574 # This is a shared repo
575 575 sharedvfs = _getsharedvfs(hgvfs, requirements)
576 576 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
577 577 else:
578 578 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
579 579
580 580 # if .hg/requires contains the sharesafe requirement, it means
581 581 # there exists a `.hg/store/requires` too and we should read it
582 582 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
583 583 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
584 584 # is not present, refer checkrequirementscompat() for that
585 585 #
586 586 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
587 587 # repository was shared the old way. We check the share source .hg/requires
588 588 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
589 589 # to be reshared
590 590 hint = _(b"see `hg help config.format.use-share-safe` for more information")
591 591 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
592 592
593 593 if (
594 594 shared
595 595 and requirementsmod.SHARESAFE_REQUIREMENT
596 596 not in _readrequires(sharedvfs, True)
597 597 ):
598 598 mismatch_warn = ui.configbool(
599 599 b'share', b'safe-mismatch.source-not-safe.warn'
600 600 )
601 601 mismatch_config = ui.config(
602 602 b'share', b'safe-mismatch.source-not-safe'
603 603 )
604 604 if mismatch_config in (
605 605 b'downgrade-allow',
606 606 b'allow',
607 607 b'downgrade-abort',
608 608 ):
609 609 # prevent cyclic import localrepo -> upgrade -> localrepo
610 610 from . import upgrade
611 611
612 612 upgrade.downgrade_share_to_non_safe(
613 613 ui,
614 614 hgvfs,
615 615 sharedvfs,
616 616 requirements,
617 617 mismatch_config,
618 618 mismatch_warn,
619 619 )
620 620 elif mismatch_config == b'abort':
621 621 raise error.Abort(
622 622 _(b"share source does not support share-safe requirement"),
623 623 hint=hint,
624 624 )
625 625 else:
626 626 raise error.Abort(
627 627 _(
628 628 b"share-safe mismatch with source.\nUnrecognized"
629 629 b" value '%s' of `share.safe-mismatch.source-not-safe`"
630 630 b" set."
631 631 )
632 632 % mismatch_config,
633 633 hint=hint,
634 634 )
635 635 else:
636 636 requirements |= _readrequires(storevfs, False)
637 637 elif shared:
638 638 sourcerequires = _readrequires(sharedvfs, False)
639 639 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
640 640 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
641 641 mismatch_warn = ui.configbool(
642 642 b'share', b'safe-mismatch.source-safe.warn'
643 643 )
644 644 if mismatch_config in (
645 645 b'upgrade-allow',
646 646 b'allow',
647 647 b'upgrade-abort',
648 648 ):
649 649 # prevent cyclic import localrepo -> upgrade -> localrepo
650 650 from . import upgrade
651 651
652 652 upgrade.upgrade_share_to_safe(
653 653 ui,
654 654 hgvfs,
655 655 storevfs,
656 656 requirements,
657 657 mismatch_config,
658 658 mismatch_warn,
659 659 )
660 660 elif mismatch_config == b'abort':
661 661 raise error.Abort(
662 662 _(
663 663 b'version mismatch: source uses share-safe'
664 664 b' functionality while the current share does not'
665 665 ),
666 666 hint=hint,
667 667 )
668 668 else:
669 669 raise error.Abort(
670 670 _(
671 671 b"share-safe mismatch with source.\nUnrecognized"
672 672 b" value '%s' of `share.safe-mismatch.source-safe` set."
673 673 )
674 674 % mismatch_config,
675 675 hint=hint,
676 676 )
677 677
678 678 # The .hg/hgrc file may load extensions or contain config options
679 679 # that influence repository construction. Attempt to load it and
680 680 # process any new extensions that it may have pulled in.
681 681 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
682 682 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
683 683 extensions.loadall(ui)
684 684 extensions.populateui(ui)
685 685
686 686 # Set of module names of extensions loaded for this repository.
687 687 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
688 688
689 689 supportedrequirements = gathersupportedrequirements(ui)
690 690
691 691 # We first validate the requirements are known.
692 692 ensurerequirementsrecognized(requirements, supportedrequirements)
693 693
694 694 # Then we validate that the known set is reasonable to use together.
695 695 ensurerequirementscompatible(ui, requirements)
696 696
697 697 # TODO there are unhandled edge cases related to opening repositories with
698 698 # shared storage. If storage is shared, we should also test for requirements
699 699 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
700 700 # that repo, as that repo may load extensions needed to open it. This is a
701 701 # bit complicated because we don't want the other hgrc to overwrite settings
702 702 # in this hgrc.
703 703 #
704 704 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
705 705 # file when sharing repos. But if a requirement is added after the share is
706 706 # performed, thereby introducing a new requirement for the opener, we may
707 707 # will not see that and could encounter a run-time error interacting with
708 708 # that shared store since it has an unknown-to-us requirement.
709 709
710 710 # At this point, we know we should be capable of opening the repository.
711 711 # Now get on with doing that.
712 712
713 713 features = set()
714 714
715 715 # The "store" part of the repository holds versioned data. How it is
716 716 # accessed is determined by various requirements. If `shared` or
717 717 # `relshared` requirements are present, this indicates current repository
718 718 # is a share and store exists in path mentioned in `.hg/sharedpath`
719 719 if shared:
720 720 storebasepath = sharedvfs.base
721 721 cachepath = sharedvfs.join(b'cache')
722 722 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
723 723 else:
724 724 storebasepath = hgvfs.base
725 725 cachepath = hgvfs.join(b'cache')
726 726 wcachepath = hgvfs.join(b'wcache')
727 727
728 728 # The store has changed over time and the exact layout is dictated by
729 729 # requirements. The store interface abstracts differences across all
730 730 # of them.
731 731 store = makestore(
732 732 requirements,
733 733 storebasepath,
734 734 lambda base: vfsmod.vfs(base, cacheaudited=True),
735 735 )
736 736 hgvfs.createmode = store.createmode
737 737
738 738 storevfs = store.vfs
739 739 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
740 740
741 741 # The cache vfs is used to manage cache files.
742 742 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
743 743 cachevfs.createmode = store.createmode
744 744 # The cache vfs is used to manage cache files related to the working copy
745 745 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
746 746 wcachevfs.createmode = store.createmode
747 747
748 748 # Now resolve the type for the repository object. We do this by repeatedly
749 749 # calling a factory function to produces types for specific aspects of the
750 750 # repo's operation. The aggregate returned types are used as base classes
751 751 # for a dynamically-derived type, which will represent our new repository.
752 752
753 753 bases = []
754 754 extrastate = {}
755 755
756 756 for iface, fn in REPO_INTERFACES:
757 757 # We pass all potentially useful state to give extensions tons of
758 758 # flexibility.
759 759 typ = fn()(
760 760 ui=ui,
761 761 intents=intents,
762 762 requirements=requirements,
763 763 features=features,
764 764 wdirvfs=wdirvfs,
765 765 hgvfs=hgvfs,
766 766 store=store,
767 767 storevfs=storevfs,
768 768 storeoptions=storevfs.options,
769 769 cachevfs=cachevfs,
770 770 wcachevfs=wcachevfs,
771 771 extensionmodulenames=extensionmodulenames,
772 772 extrastate=extrastate,
773 773 baseclasses=bases,
774 774 )
775 775
776 776 if not isinstance(typ, type):
777 777 raise error.ProgrammingError(
778 778 b'unable to construct type for %s' % iface
779 779 )
780 780
781 781 bases.append(typ)
782 782
783 783 # type() allows you to use characters in type names that wouldn't be
784 784 # recognized as Python symbols in source code. We abuse that to add
785 785 # rich information about our constructed repo.
786 786 name = pycompat.sysstr(
787 787 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
788 788 )
789 789
790 790 cls = type(name, tuple(bases), {})
791 791
792 792 return cls(
793 793 baseui=baseui,
794 794 ui=ui,
795 795 origroot=path,
796 796 wdirvfs=wdirvfs,
797 797 hgvfs=hgvfs,
798 798 requirements=requirements,
799 799 supportedrequirements=supportedrequirements,
800 800 sharedpath=storebasepath,
801 801 store=store,
802 802 cachevfs=cachevfs,
803 803 wcachevfs=wcachevfs,
804 804 features=features,
805 805 intents=intents,
806 806 )
807 807
808 808
809 809 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
810 810 """Load hgrc files/content into a ui instance.
811 811
812 812 This is called during repository opening to load any additional
813 813 config files or settings relevant to the current repository.
814 814
815 815 Returns a bool indicating whether any additional configs were loaded.
816 816
817 817 Extensions should monkeypatch this function to modify how per-repo
818 818 configs are loaded. For example, an extension may wish to pull in
819 819 configs from alternate files or sources.
820 820
821 821 sharedvfs is vfs object pointing to source repo if the current one is a
822 822 shared one
823 823 """
824 824 if not rcutil.use_repo_hgrc():
825 825 return False
826 826
827 827 ret = False
828 828 # first load config from shared source if we has to
829 829 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
830 830 try:
831 831 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
832 832 ret = True
833 833 except IOError:
834 834 pass
835 835
836 836 try:
837 837 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
838 838 ret = True
839 839 except IOError:
840 840 pass
841 841
842 842 try:
843 843 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
844 844 ret = True
845 845 except IOError:
846 846 pass
847 847
848 848 return ret
849 849
850 850
851 851 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
852 852 """Perform additional actions after .hg/hgrc is loaded.
853 853
854 854 This function is called during repository loading immediately after
855 855 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
856 856
857 857 The function can be used to validate configs, automatically add
858 858 options (including extensions) based on requirements, etc.
859 859 """
860 860
861 861 # Map of requirements to list of extensions to load automatically when
862 862 # requirement is present.
863 863 autoextensions = {
864 864 b'git': [b'git'],
865 865 b'largefiles': [b'largefiles'],
866 866 b'lfs': [b'lfs'],
867 867 }
868 868
869 869 for requirement, names in sorted(autoextensions.items()):
870 870 if requirement not in requirements:
871 871 continue
872 872
873 873 for name in names:
874 874 if not ui.hasconfig(b'extensions', name):
875 875 ui.setconfig(b'extensions', name, b'', source=b'autoload')
876 876
877 877
878 878 def gathersupportedrequirements(ui):
879 879 """Determine the complete set of recognized requirements."""
880 880 # Start with all requirements supported by this file.
881 881 supported = set(localrepository._basesupported)
882 882
883 883 # Execute ``featuresetupfuncs`` entries if they belong to an extension
884 884 # relevant to this ui instance.
885 885 modules = {m.__name__ for n, m in extensions.extensions(ui)}
886 886
887 887 for fn in featuresetupfuncs:
888 888 if fn.__module__ in modules:
889 889 fn(ui, supported)
890 890
891 891 # Add derived requirements from registered compression engines.
892 892 for name in util.compengines:
893 893 engine = util.compengines[name]
894 894 if engine.available() and engine.revlogheader():
895 895 supported.add(b'exp-compression-%s' % name)
896 896 if engine.name() == b'zstd':
897 897 supported.add(b'revlog-compression-zstd')
898 898
899 899 return supported
900 900
901 901
902 902 def ensurerequirementsrecognized(requirements, supported):
903 903 """Validate that a set of local requirements is recognized.
904 904
905 905 Receives a set of requirements. Raises an ``error.RepoError`` if there
906 906 exists any requirement in that set that currently loaded code doesn't
907 907 recognize.
908 908
909 909 Returns a set of supported requirements.
910 910 """
911 911 missing = set()
912 912
913 913 for requirement in requirements:
914 914 if requirement in supported:
915 915 continue
916 916
917 917 if not requirement or not requirement[0:1].isalnum():
918 918 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
919 919
920 920 missing.add(requirement)
921 921
922 922 if missing:
923 923 raise error.RequirementError(
924 924 _(b'repository requires features unknown to this Mercurial: %s')
925 925 % b' '.join(sorted(missing)),
926 926 hint=_(
927 927 b'see https://mercurial-scm.org/wiki/MissingRequirement '
928 928 b'for more information'
929 929 ),
930 930 )
931 931
932 932
933 933 def ensurerequirementscompatible(ui, requirements):
934 934 """Validates that a set of recognized requirements is mutually compatible.
935 935
936 936 Some requirements may not be compatible with others or require
937 937 config options that aren't enabled. This function is called during
938 938 repository opening to ensure that the set of requirements needed
939 939 to open a repository is sane and compatible with config options.
940 940
941 941 Extensions can monkeypatch this function to perform additional
942 942 checking.
943 943
944 944 ``error.RepoError`` should be raised on failure.
945 945 """
946 946 if (
947 947 requirementsmod.SPARSE_REQUIREMENT in requirements
948 948 and not sparse.enabled
949 949 ):
950 950 raise error.RepoError(
951 951 _(
952 952 b'repository is using sparse feature but '
953 953 b'sparse is not enabled; enable the '
954 954 b'"sparse" extensions to access'
955 955 )
956 956 )
957 957
958 958
959 959 def makestore(requirements, path, vfstype):
960 960 """Construct a storage object for a repository."""
961 961 if requirementsmod.STORE_REQUIREMENT in requirements:
962 962 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
963 963 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
964 964 return storemod.fncachestore(path, vfstype, dotencode)
965 965
966 966 return storemod.encodedstore(path, vfstype)
967 967
968 968 return storemod.basicstore(path, vfstype)
969 969
970 970
971 971 def resolvestorevfsoptions(ui, requirements, features):
972 972 """Resolve the options to pass to the store vfs opener.
973 973
974 974 The returned dict is used to influence behavior of the storage layer.
975 975 """
976 976 options = {}
977 977
978 978 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
979 979 options[b'treemanifest'] = True
980 980
981 981 # experimental config: format.manifestcachesize
982 982 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
983 983 if manifestcachesize is not None:
984 984 options[b'manifestcachesize'] = manifestcachesize
985 985
986 986 # In the absence of another requirement superseding a revlog-related
987 987 # requirement, we have to assume the repo is using revlog version 0.
988 988 # This revlog format is super old and we don't bother trying to parse
989 989 # opener options for it because those options wouldn't do anything
990 990 # meaningful on such old repos.
991 991 if (
992 992 requirementsmod.REVLOGV1_REQUIREMENT in requirements
993 993 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
994 994 ):
995 995 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
996 996 else: # explicitly mark repo as using revlogv0
997 997 options[b'revlogv0'] = True
998 998
999 999 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1000 1000 options[b'copies-storage'] = b'changeset-sidedata'
1001 1001 else:
1002 1002 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1003 1003 copiesextramode = (b'changeset-only', b'compatibility')
1004 1004 if writecopiesto in copiesextramode:
1005 1005 options[b'copies-storage'] = b'extra'
1006 1006
1007 1007 return options
1008 1008
1009 1009
1010 1010 def resolverevlogstorevfsoptions(ui, requirements, features):
1011 1011 """Resolve opener options specific to revlogs."""
1012 1012
1013 1013 options = {}
1014 1014 options[b'flagprocessors'] = {}
1015 1015
1016 1016 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1017 1017 options[b'revlogv1'] = True
1018 1018 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1019 1019 options[b'revlogv2'] = True
1020 1020
1021 1021 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1022 1022 options[b'generaldelta'] = True
1023 1023
1024 1024 # experimental config: format.chunkcachesize
1025 1025 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1026 1026 if chunkcachesize is not None:
1027 1027 options[b'chunkcachesize'] = chunkcachesize
1028 1028
1029 1029 deltabothparents = ui.configbool(
1030 1030 b'storage', b'revlog.optimize-delta-parent-choice'
1031 1031 )
1032 1032 options[b'deltabothparents'] = deltabothparents
1033 1033
1034 1034 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1035 1035 lazydeltabase = False
1036 1036 if lazydelta:
1037 1037 lazydeltabase = ui.configbool(
1038 1038 b'storage', b'revlog.reuse-external-delta-parent'
1039 1039 )
1040 1040 if lazydeltabase is None:
1041 1041 lazydeltabase = not scmutil.gddeltaconfig(ui)
1042 1042 options[b'lazydelta'] = lazydelta
1043 1043 options[b'lazydeltabase'] = lazydeltabase
1044 1044
1045 1045 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1046 1046 if 0 <= chainspan:
1047 1047 options[b'maxdeltachainspan'] = chainspan
1048 1048
1049 1049 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1050 1050 if mmapindexthreshold is not None:
1051 1051 options[b'mmapindexthreshold'] = mmapindexthreshold
1052 1052
1053 1053 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1054 1054 srdensitythres = float(
1055 1055 ui.config(b'experimental', b'sparse-read.density-threshold')
1056 1056 )
1057 1057 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1058 1058 options[b'with-sparse-read'] = withsparseread
1059 1059 options[b'sparse-read-density-threshold'] = srdensitythres
1060 1060 options[b'sparse-read-min-gap-size'] = srmingapsize
1061 1061
1062 1062 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1063 1063 options[b'sparse-revlog'] = sparserevlog
1064 1064 if sparserevlog:
1065 1065 options[b'generaldelta'] = True
1066 1066
1067 1067 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1068 1068 options[b'side-data'] = sidedata
1069 1069
1070 1070 maxchainlen = None
1071 1071 if sparserevlog:
1072 1072 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1073 1073 # experimental config: format.maxchainlen
1074 1074 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1075 1075 if maxchainlen is not None:
1076 1076 options[b'maxchainlen'] = maxchainlen
1077 1077
1078 1078 for r in requirements:
1079 1079 # we allow multiple compression engine requirement to co-exist because
1080 1080 # strickly speaking, revlog seems to support mixed compression style.
1081 1081 #
1082 1082 # The compression used for new entries will be "the last one"
1083 1083 prefix = r.startswith
1084 1084 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1085 1085 options[b'compengine'] = r.split(b'-', 2)[2]
1086 1086
1087 1087 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1088 1088 if options[b'zlib.level'] is not None:
1089 1089 if not (0 <= options[b'zlib.level'] <= 9):
1090 1090 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1091 1091 raise error.Abort(msg % options[b'zlib.level'])
1092 1092 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1093 1093 if options[b'zstd.level'] is not None:
1094 1094 if not (0 <= options[b'zstd.level'] <= 22):
1095 1095 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1096 1096 raise error.Abort(msg % options[b'zstd.level'])
1097 1097
1098 1098 if requirementsmod.NARROW_REQUIREMENT in requirements:
1099 1099 options[b'enableellipsis'] = True
1100 1100
1101 1101 if ui.configbool(b'experimental', b'rust.index'):
1102 1102 options[b'rust.index'] = True
1103 1103 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1104 1104 slow_path = ui.config(
1105 1105 b'storage', b'revlog.persistent-nodemap.slow-path'
1106 1106 )
1107 1107 if slow_path not in (b'allow', b'warn', b'abort'):
1108 1108 default = ui.config_default(
1109 1109 b'storage', b'revlog.persistent-nodemap.slow-path'
1110 1110 )
1111 1111 msg = _(
1112 1112 b'unknown value for config '
1113 1113 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1114 1114 )
1115 1115 ui.warn(msg % slow_path)
1116 1116 if not ui.quiet:
1117 1117 ui.warn(_(b'falling back to default value: %s\n') % default)
1118 1118 slow_path = default
1119 1119
1120 1120 msg = _(
1121 1121 b"accessing `persistent-nodemap` repository without associated "
1122 1122 b"fast implementation."
1123 1123 )
1124 1124 hint = _(
1125 1125 b"check `hg help config.format.use-persistent-nodemap` "
1126 1126 b"for details"
1127 1127 )
1128 1128 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1129 1129 if slow_path == b'warn':
1130 1130 msg = b"warning: " + msg + b'\n'
1131 1131 ui.warn(msg)
1132 1132 if not ui.quiet:
1133 1133 hint = b'(' + hint + b')\n'
1134 1134 ui.warn(hint)
1135 1135 if slow_path == b'abort':
1136 1136 raise error.Abort(msg, hint=hint)
1137 1137 options[b'persistent-nodemap'] = True
1138 1138 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1139 1139 options[b'persistent-nodemap.mmap'] = True
1140 1140 if ui.configbool(b'devel', b'persistent-nodemap'):
1141 1141 options[b'devel-force-nodemap'] = True
1142 1142
1143 1143 return options
1144 1144
1145 1145
1146 1146 def makemain(**kwargs):
1147 1147 """Produce a type conforming to ``ilocalrepositorymain``."""
1148 1148 return localrepository
1149 1149
1150 1150
1151 1151 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1152 1152 class revlogfilestorage(object):
1153 1153 """File storage when using revlogs."""
1154 1154
1155 1155 def file(self, path):
1156 1156 if path.startswith(b'/'):
1157 1157 path = path[1:]
1158 1158
1159 1159 return filelog.filelog(self.svfs, path)
1160 1160
1161 1161
1162 1162 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1163 1163 class revlognarrowfilestorage(object):
1164 1164 """File storage when using revlogs and narrow files."""
1165 1165
1166 1166 def file(self, path):
1167 1167 if path.startswith(b'/'):
1168 1168 path = path[1:]
1169 1169
1170 1170 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1171 1171
1172 1172
1173 1173 def makefilestorage(requirements, features, **kwargs):
1174 1174 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1175 1175 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1176 1176 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1177 1177
1178 1178 if requirementsmod.NARROW_REQUIREMENT in requirements:
1179 1179 return revlognarrowfilestorage
1180 1180 else:
1181 1181 return revlogfilestorage
1182 1182
1183 1183
1184 1184 # List of repository interfaces and factory functions for them. Each
1185 1185 # will be called in order during ``makelocalrepository()`` to iteratively
1186 1186 # derive the final type for a local repository instance. We capture the
1187 1187 # function as a lambda so we don't hold a reference and the module-level
1188 1188 # functions can be wrapped.
1189 1189 REPO_INTERFACES = [
1190 1190 (repository.ilocalrepositorymain, lambda: makemain),
1191 1191 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1192 1192 ]
1193 1193
1194 1194
1195 1195 @interfaceutil.implementer(repository.ilocalrepositorymain)
1196 1196 class localrepository(object):
1197 1197 """Main class for representing local repositories.
1198 1198
1199 1199 All local repositories are instances of this class.
1200 1200
1201 1201 Constructed on its own, instances of this class are not usable as
1202 1202 repository objects. To obtain a usable repository object, call
1203 1203 ``hg.repository()``, ``localrepo.instance()``, or
1204 1204 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1205 1205 ``instance()`` adds support for creating new repositories.
1206 1206 ``hg.repository()`` adds more extension integration, including calling
1207 1207 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1208 1208 used.
1209 1209 """
1210 1210
1211 1211 # obsolete experimental requirements:
1212 1212 # - manifestv2: An experimental new manifest format that allowed
1213 1213 # for stem compression of long paths. Experiment ended up not
1214 1214 # being successful (repository sizes went up due to worse delta
1215 1215 # chains), and the code was deleted in 4.6.
1216 1216 supportedformats = {
1217 1217 requirementsmod.REVLOGV1_REQUIREMENT,
1218 1218 requirementsmod.GENERALDELTA_REQUIREMENT,
1219 1219 requirementsmod.TREEMANIFEST_REQUIREMENT,
1220 1220 requirementsmod.COPIESSDC_REQUIREMENT,
1221 1221 requirementsmod.REVLOGV2_REQUIREMENT,
1222 1222 requirementsmod.SIDEDATA_REQUIREMENT,
1223 1223 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1224 1224 requirementsmod.NODEMAP_REQUIREMENT,
1225 1225 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1226 1226 requirementsmod.SHARESAFE_REQUIREMENT,
1227 1227 }
1228 1228 _basesupported = supportedformats | {
1229 1229 requirementsmod.STORE_REQUIREMENT,
1230 1230 requirementsmod.FNCACHE_REQUIREMENT,
1231 1231 requirementsmod.SHARED_REQUIREMENT,
1232 1232 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1233 1233 requirementsmod.DOTENCODE_REQUIREMENT,
1234 1234 requirementsmod.SPARSE_REQUIREMENT,
1235 1235 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1236 1236 }
1237 1237
1238 1238 # list of prefix for file which can be written without 'wlock'
1239 1239 # Extensions should extend this list when needed
1240 1240 _wlockfreeprefix = {
1241 1241 # We migh consider requiring 'wlock' for the next
1242 1242 # two, but pretty much all the existing code assume
1243 1243 # wlock is not needed so we keep them excluded for
1244 1244 # now.
1245 1245 b'hgrc',
1246 1246 b'requires',
1247 1247 # XXX cache is a complicatged business someone
1248 1248 # should investigate this in depth at some point
1249 1249 b'cache/',
1250 1250 # XXX shouldn't be dirstate covered by the wlock?
1251 1251 b'dirstate',
1252 1252 # XXX bisect was still a bit too messy at the time
1253 1253 # this changeset was introduced. Someone should fix
1254 1254 # the remainig bit and drop this line
1255 1255 b'bisect.state',
1256 1256 }
1257 1257
1258 1258 def __init__(
1259 1259 self,
1260 1260 baseui,
1261 1261 ui,
1262 1262 origroot,
1263 1263 wdirvfs,
1264 1264 hgvfs,
1265 1265 requirements,
1266 1266 supportedrequirements,
1267 1267 sharedpath,
1268 1268 store,
1269 1269 cachevfs,
1270 1270 wcachevfs,
1271 1271 features,
1272 1272 intents=None,
1273 1273 ):
1274 1274 """Create a new local repository instance.
1275 1275
1276 1276 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1277 1277 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1278 1278 object.
1279 1279
1280 1280 Arguments:
1281 1281
1282 1282 baseui
1283 1283 ``ui.ui`` instance that ``ui`` argument was based off of.
1284 1284
1285 1285 ui
1286 1286 ``ui.ui`` instance for use by the repository.
1287 1287
1288 1288 origroot
1289 1289 ``bytes`` path to working directory root of this repository.
1290 1290
1291 1291 wdirvfs
1292 1292 ``vfs.vfs`` rooted at the working directory.
1293 1293
1294 1294 hgvfs
1295 1295 ``vfs.vfs`` rooted at .hg/
1296 1296
1297 1297 requirements
1298 1298 ``set`` of bytestrings representing repository opening requirements.
1299 1299
1300 1300 supportedrequirements
1301 1301 ``set`` of bytestrings representing repository requirements that we
1302 1302 know how to open. May be a supetset of ``requirements``.
1303 1303
1304 1304 sharedpath
1305 1305 ``bytes`` Defining path to storage base directory. Points to a
1306 1306 ``.hg/`` directory somewhere.
1307 1307
1308 1308 store
1309 1309 ``store.basicstore`` (or derived) instance providing access to
1310 1310 versioned storage.
1311 1311
1312 1312 cachevfs
1313 1313 ``vfs.vfs`` used for cache files.
1314 1314
1315 1315 wcachevfs
1316 1316 ``vfs.vfs`` used for cache files related to the working copy.
1317 1317
1318 1318 features
1319 1319 ``set`` of bytestrings defining features/capabilities of this
1320 1320 instance.
1321 1321
1322 1322 intents
1323 1323 ``set`` of system strings indicating what this repo will be used
1324 1324 for.
1325 1325 """
1326 1326 self.baseui = baseui
1327 1327 self.ui = ui
1328 1328 self.origroot = origroot
1329 1329 # vfs rooted at working directory.
1330 1330 self.wvfs = wdirvfs
1331 1331 self.root = wdirvfs.base
1332 1332 # vfs rooted at .hg/. Used to access most non-store paths.
1333 1333 self.vfs = hgvfs
1334 1334 self.path = hgvfs.base
1335 1335 self.requirements = requirements
1336 1336 self.nodeconstants = sha1nodeconstants
1337 1337 self.nullid = self.nodeconstants.nullid
1338 1338 self.supported = supportedrequirements
1339 1339 self.sharedpath = sharedpath
1340 1340 self.store = store
1341 1341 self.cachevfs = cachevfs
1342 1342 self.wcachevfs = wcachevfs
1343 1343 self.features = features
1344 1344
1345 1345 self.filtername = None
1346 1346
1347 1347 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1348 1348 b'devel', b'check-locks'
1349 1349 ):
1350 1350 self.vfs.audit = self._getvfsward(self.vfs.audit)
1351 1351 # A list of callback to shape the phase if no data were found.
1352 1352 # Callback are in the form: func(repo, roots) --> processed root.
1353 1353 # This list it to be filled by extension during repo setup
1354 1354 self._phasedefaults = []
1355 1355
1356 1356 color.setup(self.ui)
1357 1357
1358 1358 self.spath = self.store.path
1359 1359 self.svfs = self.store.vfs
1360 1360 self.sjoin = self.store.join
1361 1361 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1362 1362 b'devel', b'check-locks'
1363 1363 ):
1364 1364 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1365 1365 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1366 1366 else: # standard vfs
1367 1367 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1368 1368
1369 1369 self._dirstatevalidatewarned = False
1370 1370
1371 1371 self._branchcaches = branchmap.BranchMapCache()
1372 1372 self._revbranchcache = None
1373 1373 self._filterpats = {}
1374 1374 self._datafilters = {}
1375 1375 self._transref = self._lockref = self._wlockref = None
1376 1376
1377 1377 # A cache for various files under .hg/ that tracks file changes,
1378 1378 # (used by the filecache decorator)
1379 1379 #
1380 1380 # Maps a property name to its util.filecacheentry
1381 1381 self._filecache = {}
1382 1382
1383 1383 # hold sets of revision to be filtered
1384 1384 # should be cleared when something might have changed the filter value:
1385 1385 # - new changesets,
1386 1386 # - phase change,
1387 1387 # - new obsolescence marker,
1388 1388 # - working directory parent change,
1389 1389 # - bookmark changes
1390 1390 self.filteredrevcache = {}
1391 1391
1392 1392 # post-dirstate-status hooks
1393 1393 self._postdsstatus = []
1394 1394
1395 1395 # generic mapping between names and nodes
1396 1396 self.names = namespaces.namespaces()
1397 1397
1398 1398 # Key to signature value.
1399 1399 self._sparsesignaturecache = {}
1400 1400 # Signature to cached matcher instance.
1401 1401 self._sparsematchercache = {}
1402 1402
1403 1403 self._extrafilterid = repoview.extrafilter(ui)
1404 1404
1405 1405 self.filecopiesmode = None
1406 1406 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1407 1407 self.filecopiesmode = b'changeset-sidedata'
1408 1408
1409 1409 self._wanted_sidedata = set()
1410 1410 self._sidedata_computers = {}
1411 1411 metadatamod.set_sidedata_spec_for_repo(self)
1412 1412
1413 1413 def _getvfsward(self, origfunc):
1414 1414 """build a ward for self.vfs"""
1415 1415 rref = weakref.ref(self)
1416 1416
1417 1417 def checkvfs(path, mode=None):
1418 1418 ret = origfunc(path, mode=mode)
1419 1419 repo = rref()
1420 1420 if (
1421 1421 repo is None
1422 1422 or not util.safehasattr(repo, b'_wlockref')
1423 1423 or not util.safehasattr(repo, b'_lockref')
1424 1424 ):
1425 1425 return
1426 1426 if mode in (None, b'r', b'rb'):
1427 1427 return
1428 1428 if path.startswith(repo.path):
1429 1429 # truncate name relative to the repository (.hg)
1430 1430 path = path[len(repo.path) + 1 :]
1431 1431 if path.startswith(b'cache/'):
1432 1432 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1433 1433 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1434 1434 # path prefixes covered by 'lock'
1435 1435 vfs_path_prefixes = (
1436 1436 b'journal.',
1437 1437 b'undo.',
1438 1438 b'strip-backup/',
1439 1439 b'cache/',
1440 1440 )
1441 1441 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1442 1442 if repo._currentlock(repo._lockref) is None:
1443 1443 repo.ui.develwarn(
1444 1444 b'write with no lock: "%s"' % path,
1445 1445 stacklevel=3,
1446 1446 config=b'check-locks',
1447 1447 )
1448 1448 elif repo._currentlock(repo._wlockref) is None:
1449 1449 # rest of vfs files are covered by 'wlock'
1450 1450 #
1451 1451 # exclude special files
1452 1452 for prefix in self._wlockfreeprefix:
1453 1453 if path.startswith(prefix):
1454 1454 return
1455 1455 repo.ui.develwarn(
1456 1456 b'write with no wlock: "%s"' % path,
1457 1457 stacklevel=3,
1458 1458 config=b'check-locks',
1459 1459 )
1460 1460 return ret
1461 1461
1462 1462 return checkvfs
1463 1463
1464 1464 def _getsvfsward(self, origfunc):
1465 1465 """build a ward for self.svfs"""
1466 1466 rref = weakref.ref(self)
1467 1467
1468 1468 def checksvfs(path, mode=None):
1469 1469 ret = origfunc(path, mode=mode)
1470 1470 repo = rref()
1471 1471 if repo is None or not util.safehasattr(repo, b'_lockref'):
1472 1472 return
1473 1473 if mode in (None, b'r', b'rb'):
1474 1474 return
1475 1475 if path.startswith(repo.sharedpath):
1476 1476 # truncate name relative to the repository (.hg)
1477 1477 path = path[len(repo.sharedpath) + 1 :]
1478 1478 if repo._currentlock(repo._lockref) is None:
1479 1479 repo.ui.develwarn(
1480 1480 b'write with no lock: "%s"' % path, stacklevel=4
1481 1481 )
1482 1482 return ret
1483 1483
1484 1484 return checksvfs
1485 1485
1486 1486 def close(self):
1487 1487 self._writecaches()
1488 1488
1489 1489 def _writecaches(self):
1490 1490 if self._revbranchcache:
1491 1491 self._revbranchcache.write()
1492 1492
1493 1493 def _restrictcapabilities(self, caps):
1494 1494 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1495 1495 caps = set(caps)
1496 1496 capsblob = bundle2.encodecaps(
1497 1497 bundle2.getrepocaps(self, role=b'client')
1498 1498 )
1499 1499 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1500 1500 if self.ui.configbool(b'experimental', b'narrow'):
1501 1501 caps.add(wireprototypes.NARROWCAP)
1502 1502 return caps
1503 1503
1504 1504 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1505 1505 # self -> auditor -> self._checknested -> self
1506 1506
1507 1507 @property
1508 1508 def auditor(self):
1509 1509 # This is only used by context.workingctx.match in order to
1510 1510 # detect files in subrepos.
1511 1511 return pathutil.pathauditor(self.root, callback=self._checknested)
1512 1512
1513 1513 @property
1514 1514 def nofsauditor(self):
1515 1515 # This is only used by context.basectx.match in order to detect
1516 1516 # files in subrepos.
1517 1517 return pathutil.pathauditor(
1518 1518 self.root, callback=self._checknested, realfs=False, cached=True
1519 1519 )
1520 1520
1521 1521 def _checknested(self, path):
1522 1522 """Determine if path is a legal nested repository."""
1523 1523 if not path.startswith(self.root):
1524 1524 return False
1525 1525 subpath = path[len(self.root) + 1 :]
1526 1526 normsubpath = util.pconvert(subpath)
1527 1527
1528 1528 # XXX: Checking against the current working copy is wrong in
1529 1529 # the sense that it can reject things like
1530 1530 #
1531 1531 # $ hg cat -r 10 sub/x.txt
1532 1532 #
1533 1533 # if sub/ is no longer a subrepository in the working copy
1534 1534 # parent revision.
1535 1535 #
1536 1536 # However, it can of course also allow things that would have
1537 1537 # been rejected before, such as the above cat command if sub/
1538 1538 # is a subrepository now, but was a normal directory before.
1539 1539 # The old path auditor would have rejected by mistake since it
1540 1540 # panics when it sees sub/.hg/.
1541 1541 #
1542 1542 # All in all, checking against the working copy seems sensible
1543 1543 # since we want to prevent access to nested repositories on
1544 1544 # the filesystem *now*.
1545 1545 ctx = self[None]
1546 1546 parts = util.splitpath(subpath)
1547 1547 while parts:
1548 1548 prefix = b'/'.join(parts)
1549 1549 if prefix in ctx.substate:
1550 1550 if prefix == normsubpath:
1551 1551 return True
1552 1552 else:
1553 1553 sub = ctx.sub(prefix)
1554 1554 return sub.checknested(subpath[len(prefix) + 1 :])
1555 1555 else:
1556 1556 parts.pop()
1557 1557 return False
1558 1558
1559 1559 def peer(self):
1560 1560 return localpeer(self) # not cached to avoid reference cycle
1561 1561
1562 1562 def unfiltered(self):
1563 1563 """Return unfiltered version of the repository
1564 1564
1565 1565 Intended to be overwritten by filtered repo."""
1566 1566 return self
1567 1567
1568 1568 def filtered(self, name, visibilityexceptions=None):
1569 1569 """Return a filtered version of a repository
1570 1570
1571 1571 The `name` parameter is the identifier of the requested view. This
1572 1572 will return a repoview object set "exactly" to the specified view.
1573 1573
1574 1574 This function does not apply recursive filtering to a repository. For
1575 1575 example calling `repo.filtered("served")` will return a repoview using
1576 1576 the "served" view, regardless of the initial view used by `repo`.
1577 1577
1578 1578 In other word, there is always only one level of `repoview` "filtering".
1579 1579 """
1580 1580 if self._extrafilterid is not None and b'%' not in name:
1581 1581 name = name + b'%' + self._extrafilterid
1582 1582
1583 1583 cls = repoview.newtype(self.unfiltered().__class__)
1584 1584 return cls(self, name, visibilityexceptions)
1585 1585
1586 1586 @mixedrepostorecache(
1587 1587 (b'bookmarks', b'plain'),
1588 1588 (b'bookmarks.current', b'plain'),
1589 1589 (b'bookmarks', b''),
1590 1590 (b'00changelog.i', b''),
1591 1591 )
1592 1592 def _bookmarks(self):
1593 1593 # Since the multiple files involved in the transaction cannot be
1594 1594 # written atomically (with current repository format), there is a race
1595 1595 # condition here.
1596 1596 #
1597 1597 # 1) changelog content A is read
1598 1598 # 2) outside transaction update changelog to content B
1599 1599 # 3) outside transaction update bookmark file referring to content B
1600 1600 # 4) bookmarks file content is read and filtered against changelog-A
1601 1601 #
1602 1602 # When this happens, bookmarks against nodes missing from A are dropped.
1603 1603 #
1604 1604 # Having this happening during read is not great, but it become worse
1605 1605 # when this happen during write because the bookmarks to the "unknown"
1606 1606 # nodes will be dropped for good. However, writes happen within locks.
1607 1607 # This locking makes it possible to have a race free consistent read.
1608 1608 # For this purpose data read from disc before locking are
1609 1609 # "invalidated" right after the locks are taken. This invalidations are
1610 1610 # "light", the `filecache` mechanism keep the data in memory and will
1611 1611 # reuse them if the underlying files did not changed. Not parsing the
1612 1612 # same data multiple times helps performances.
1613 1613 #
1614 1614 # Unfortunately in the case describe above, the files tracked by the
1615 1615 # bookmarks file cache might not have changed, but the in-memory
1616 1616 # content is still "wrong" because we used an older changelog content
1617 1617 # to process the on-disk data. So after locking, the changelog would be
1618 1618 # refreshed but `_bookmarks` would be preserved.
1619 1619 # Adding `00changelog.i` to the list of tracked file is not
1620 1620 # enough, because at the time we build the content for `_bookmarks` in
1621 1621 # (4), the changelog file has already diverged from the content used
1622 1622 # for loading `changelog` in (1)
1623 1623 #
1624 1624 # To prevent the issue, we force the changelog to be explicitly
1625 1625 # reloaded while computing `_bookmarks`. The data race can still happen
1626 1626 # without the lock (with a narrower window), but it would no longer go
1627 1627 # undetected during the lock time refresh.
1628 1628 #
1629 1629 # The new schedule is as follow
1630 1630 #
1631 1631 # 1) filecache logic detect that `_bookmarks` needs to be computed
1632 1632 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1633 1633 # 3) We force `changelog` filecache to be tested
1634 1634 # 4) cachestat for `changelog` are captured (for changelog)
1635 1635 # 5) `_bookmarks` is computed and cached
1636 1636 #
1637 1637 # The step in (3) ensure we have a changelog at least as recent as the
1638 1638 # cache stat computed in (1). As a result at locking time:
1639 1639 # * if the changelog did not changed since (1) -> we can reuse the data
1640 1640 # * otherwise -> the bookmarks get refreshed.
1641 1641 self._refreshchangelog()
1642 1642 return bookmarks.bmstore(self)
1643 1643
1644 1644 def _refreshchangelog(self):
1645 1645 """make sure the in memory changelog match the on-disk one"""
1646 1646 if 'changelog' in vars(self) and self.currenttransaction() is None:
1647 1647 del self.changelog
1648 1648
1649 1649 @property
1650 1650 def _activebookmark(self):
1651 1651 return self._bookmarks.active
1652 1652
1653 1653 # _phasesets depend on changelog. what we need is to call
1654 1654 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1655 1655 # can't be easily expressed in filecache mechanism.
1656 1656 @storecache(b'phaseroots', b'00changelog.i')
1657 1657 def _phasecache(self):
1658 1658 return phases.phasecache(self, self._phasedefaults)
1659 1659
1660 1660 @storecache(b'obsstore')
1661 1661 def obsstore(self):
1662 1662 return obsolete.makestore(self.ui, self)
1663 1663
1664 1664 @storecache(b'00changelog.i')
1665 1665 def changelog(self):
1666 1666 # load dirstate before changelog to avoid race see issue6303
1667 1667 self.dirstate.prefetch_parents()
1668 1668 return self.store.changelog(
1669 1669 txnutil.mayhavepending(self.root),
1670 1670 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1671 1671 )
1672 1672
1673 1673 @storecache(b'00manifest.i')
1674 1674 def manifestlog(self):
1675 1675 return self.store.manifestlog(self, self._storenarrowmatch)
1676 1676
1677 1677 @repofilecache(b'dirstate')
1678 1678 def dirstate(self):
1679 1679 return self._makedirstate()
1680 1680
1681 1681 def _makedirstate(self):
1682 1682 """Extension point for wrapping the dirstate per-repo."""
1683 1683 sparsematchfn = lambda: sparse.matcher(self)
1684 1684
1685 1685 return dirstate.dirstate(
1686 1686 self.vfs,
1687 1687 self.ui,
1688 1688 self.root,
1689 1689 self._dirstatevalidate,
1690 1690 sparsematchfn,
1691 1691 self.nodeconstants,
1692 1692 )
1693 1693
1694 1694 def _dirstatevalidate(self, node):
1695 1695 try:
1696 1696 self.changelog.rev(node)
1697 1697 return node
1698 1698 except error.LookupError:
1699 1699 if not self._dirstatevalidatewarned:
1700 1700 self._dirstatevalidatewarned = True
1701 1701 self.ui.warn(
1702 1702 _(b"warning: ignoring unknown working parent %s!\n")
1703 1703 % short(node)
1704 1704 )
1705 1705 return nullid
1706 1706
1707 1707 @storecache(narrowspec.FILENAME)
1708 1708 def narrowpats(self):
1709 1709 """matcher patterns for this repository's narrowspec
1710 1710
1711 1711 A tuple of (includes, excludes).
1712 1712 """
1713 1713 return narrowspec.load(self)
1714 1714
1715 1715 @storecache(narrowspec.FILENAME)
1716 1716 def _storenarrowmatch(self):
1717 1717 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1718 1718 return matchmod.always()
1719 1719 include, exclude = self.narrowpats
1720 1720 return narrowspec.match(self.root, include=include, exclude=exclude)
1721 1721
1722 1722 @storecache(narrowspec.FILENAME)
1723 1723 def _narrowmatch(self):
1724 1724 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1725 1725 return matchmod.always()
1726 1726 narrowspec.checkworkingcopynarrowspec(self)
1727 1727 include, exclude = self.narrowpats
1728 1728 return narrowspec.match(self.root, include=include, exclude=exclude)
1729 1729
1730 1730 def narrowmatch(self, match=None, includeexact=False):
1731 1731 """matcher corresponding the the repo's narrowspec
1732 1732
1733 1733 If `match` is given, then that will be intersected with the narrow
1734 1734 matcher.
1735 1735
1736 1736 If `includeexact` is True, then any exact matches from `match` will
1737 1737 be included even if they're outside the narrowspec.
1738 1738 """
1739 1739 if match:
1740 1740 if includeexact and not self._narrowmatch.always():
1741 1741 # do not exclude explicitly-specified paths so that they can
1742 1742 # be warned later on
1743 1743 em = matchmod.exact(match.files())
1744 1744 nm = matchmod.unionmatcher([self._narrowmatch, em])
1745 1745 return matchmod.intersectmatchers(match, nm)
1746 1746 return matchmod.intersectmatchers(match, self._narrowmatch)
1747 1747 return self._narrowmatch
1748 1748
1749 1749 def setnarrowpats(self, newincludes, newexcludes):
1750 1750 narrowspec.save(self, newincludes, newexcludes)
1751 1751 self.invalidate(clearfilecache=True)
1752 1752
1753 1753 @unfilteredpropertycache
1754 1754 def _quick_access_changeid_null(self):
1755 1755 return {
1756 1756 b'null': (nullrev, nullid),
1757 1757 nullrev: (nullrev, nullid),
1758 1758 nullid: (nullrev, nullid),
1759 1759 }
1760 1760
1761 1761 @unfilteredpropertycache
1762 1762 def _quick_access_changeid_wc(self):
1763 1763 # also fast path access to the working copy parents
1764 1764 # however, only do it for filter that ensure wc is visible.
1765 1765 quick = self._quick_access_changeid_null.copy()
1766 1766 cl = self.unfiltered().changelog
1767 1767 for node in self.dirstate.parents():
1768 1768 if node == nullid:
1769 1769 continue
1770 1770 rev = cl.index.get_rev(node)
1771 1771 if rev is None:
1772 1772 # unknown working copy parent case:
1773 1773 #
1774 1774 # skip the fast path and let higher code deal with it
1775 1775 continue
1776 1776 pair = (rev, node)
1777 1777 quick[rev] = pair
1778 1778 quick[node] = pair
1779 1779 # also add the parents of the parents
1780 1780 for r in cl.parentrevs(rev):
1781 1781 if r == nullrev:
1782 1782 continue
1783 1783 n = cl.node(r)
1784 1784 pair = (r, n)
1785 1785 quick[r] = pair
1786 1786 quick[n] = pair
1787 1787 p1node = self.dirstate.p1()
1788 1788 if p1node != nullid:
1789 1789 quick[b'.'] = quick[p1node]
1790 1790 return quick
1791 1791
1792 1792 @unfilteredmethod
1793 1793 def _quick_access_changeid_invalidate(self):
1794 1794 if '_quick_access_changeid_wc' in vars(self):
1795 1795 del self.__dict__['_quick_access_changeid_wc']
1796 1796
1797 1797 @property
1798 1798 def _quick_access_changeid(self):
1799 1799 """an helper dictionnary for __getitem__ calls
1800 1800
1801 1801 This contains a list of symbol we can recognise right away without
1802 1802 further processing.
1803 1803 """
1804 1804 if self.filtername in repoview.filter_has_wc:
1805 1805 return self._quick_access_changeid_wc
1806 1806 return self._quick_access_changeid_null
1807 1807
1808 1808 def __getitem__(self, changeid):
1809 1809 # dealing with special cases
1810 1810 if changeid is None:
1811 1811 return context.workingctx(self)
1812 1812 if isinstance(changeid, context.basectx):
1813 1813 return changeid
1814 1814
1815 1815 # dealing with multiple revisions
1816 1816 if isinstance(changeid, slice):
1817 1817 # wdirrev isn't contiguous so the slice shouldn't include it
1818 1818 return [
1819 1819 self[i]
1820 1820 for i in pycompat.xrange(*changeid.indices(len(self)))
1821 1821 if i not in self.changelog.filteredrevs
1822 1822 ]
1823 1823
1824 1824 # dealing with some special values
1825 1825 quick_access = self._quick_access_changeid.get(changeid)
1826 1826 if quick_access is not None:
1827 1827 rev, node = quick_access
1828 1828 return context.changectx(self, rev, node, maybe_filtered=False)
1829 1829 if changeid == b'tip':
1830 1830 node = self.changelog.tip()
1831 1831 rev = self.changelog.rev(node)
1832 1832 return context.changectx(self, rev, node)
1833 1833
1834 1834 # dealing with arbitrary values
1835 1835 try:
1836 1836 if isinstance(changeid, int):
1837 1837 node = self.changelog.node(changeid)
1838 1838 rev = changeid
1839 1839 elif changeid == b'.':
1840 1840 # this is a hack to delay/avoid loading obsmarkers
1841 1841 # when we know that '.' won't be hidden
1842 1842 node = self.dirstate.p1()
1843 1843 rev = self.unfiltered().changelog.rev(node)
1844 1844 elif len(changeid) == 20:
1845 1845 try:
1846 1846 node = changeid
1847 1847 rev = self.changelog.rev(changeid)
1848 1848 except error.FilteredLookupError:
1849 1849 changeid = hex(changeid) # for the error message
1850 1850 raise
1851 1851 except LookupError:
1852 1852 # check if it might have come from damaged dirstate
1853 1853 #
1854 1854 # XXX we could avoid the unfiltered if we had a recognizable
1855 1855 # exception for filtered changeset access
1856 1856 if (
1857 1857 self.local()
1858 1858 and changeid in self.unfiltered().dirstate.parents()
1859 1859 ):
1860 1860 msg = _(b"working directory has unknown parent '%s'!")
1861 1861 raise error.Abort(msg % short(changeid))
1862 1862 changeid = hex(changeid) # for the error message
1863 1863 raise
1864 1864
1865 1865 elif len(changeid) == 40:
1866 1866 node = bin(changeid)
1867 1867 rev = self.changelog.rev(node)
1868 1868 else:
1869 1869 raise error.ProgrammingError(
1870 1870 b"unsupported changeid '%s' of type %s"
1871 1871 % (changeid, pycompat.bytestr(type(changeid)))
1872 1872 )
1873 1873
1874 1874 return context.changectx(self, rev, node)
1875 1875
1876 1876 except (error.FilteredIndexError, error.FilteredLookupError):
1877 1877 raise error.FilteredRepoLookupError(
1878 1878 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1879 1879 )
1880 1880 except (IndexError, LookupError):
1881 1881 raise error.RepoLookupError(
1882 1882 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1883 1883 )
1884 1884 except error.WdirUnsupported:
1885 1885 return context.workingctx(self)
1886 1886
1887 1887 def __contains__(self, changeid):
1888 1888 """True if the given changeid exists"""
1889 1889 try:
1890 1890 self[changeid]
1891 1891 return True
1892 1892 except error.RepoLookupError:
1893 1893 return False
1894 1894
1895 1895 def __nonzero__(self):
1896 1896 return True
1897 1897
1898 1898 __bool__ = __nonzero__
1899 1899
1900 1900 def __len__(self):
1901 1901 # no need to pay the cost of repoview.changelog
1902 1902 unfi = self.unfiltered()
1903 1903 return len(unfi.changelog)
1904 1904
1905 1905 def __iter__(self):
1906 1906 return iter(self.changelog)
1907 1907
1908 1908 def revs(self, expr, *args):
1909 1909 """Find revisions matching a revset.
1910 1910
1911 1911 The revset is specified as a string ``expr`` that may contain
1912 1912 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1913 1913
1914 1914 Revset aliases from the configuration are not expanded. To expand
1915 1915 user aliases, consider calling ``scmutil.revrange()`` or
1916 1916 ``repo.anyrevs([expr], user=True)``.
1917 1917
1918 1918 Returns a smartset.abstractsmartset, which is a list-like interface
1919 1919 that contains integer revisions.
1920 1920 """
1921 1921 tree = revsetlang.spectree(expr, *args)
1922 1922 return revset.makematcher(tree)(self)
1923 1923
1924 1924 def set(self, expr, *args):
1925 1925 """Find revisions matching a revset and emit changectx instances.
1926 1926
1927 1927 This is a convenience wrapper around ``revs()`` that iterates the
1928 1928 result and is a generator of changectx instances.
1929 1929
1930 1930 Revset aliases from the configuration are not expanded. To expand
1931 1931 user aliases, consider calling ``scmutil.revrange()``.
1932 1932 """
1933 1933 for r in self.revs(expr, *args):
1934 1934 yield self[r]
1935 1935
1936 1936 def anyrevs(self, specs, user=False, localalias=None):
1937 1937 """Find revisions matching one of the given revsets.
1938 1938
1939 1939 Revset aliases from the configuration are not expanded by default. To
1940 1940 expand user aliases, specify ``user=True``. To provide some local
1941 1941 definitions overriding user aliases, set ``localalias`` to
1942 1942 ``{name: definitionstring}``.
1943 1943 """
1944 1944 if specs == [b'null']:
1945 1945 return revset.baseset([nullrev])
1946 1946 if specs == [b'.']:
1947 1947 quick_data = self._quick_access_changeid.get(b'.')
1948 1948 if quick_data is not None:
1949 1949 return revset.baseset([quick_data[0]])
1950 1950 if user:
1951 1951 m = revset.matchany(
1952 1952 self.ui,
1953 1953 specs,
1954 1954 lookup=revset.lookupfn(self),
1955 1955 localalias=localalias,
1956 1956 )
1957 1957 else:
1958 1958 m = revset.matchany(None, specs, localalias=localalias)
1959 1959 return m(self)
1960 1960
1961 1961 def url(self):
1962 1962 return b'file:' + self.root
1963 1963
1964 1964 def hook(self, name, throw=False, **args):
1965 1965 """Call a hook, passing this repo instance.
1966 1966
1967 1967 This a convenience method to aid invoking hooks. Extensions likely
1968 1968 won't call this unless they have registered a custom hook or are
1969 1969 replacing code that is expected to call a hook.
1970 1970 """
1971 1971 return hook.hook(self.ui, self, name, throw, **args)
1972 1972
1973 1973 @filteredpropertycache
1974 1974 def _tagscache(self):
1975 1975 """Returns a tagscache object that contains various tags related
1976 1976 caches."""
1977 1977
1978 1978 # This simplifies its cache management by having one decorated
1979 1979 # function (this one) and the rest simply fetch things from it.
1980 1980 class tagscache(object):
1981 1981 def __init__(self):
1982 1982 # These two define the set of tags for this repository. tags
1983 1983 # maps tag name to node; tagtypes maps tag name to 'global' or
1984 1984 # 'local'. (Global tags are defined by .hgtags across all
1985 1985 # heads, and local tags are defined in .hg/localtags.)
1986 1986 # They constitute the in-memory cache of tags.
1987 1987 self.tags = self.tagtypes = None
1988 1988
1989 1989 self.nodetagscache = self.tagslist = None
1990 1990
1991 1991 cache = tagscache()
1992 1992 cache.tags, cache.tagtypes = self._findtags()
1993 1993
1994 1994 return cache
1995 1995
1996 1996 def tags(self):
1997 1997 '''return a mapping of tag to node'''
1998 1998 t = {}
1999 1999 if self.changelog.filteredrevs:
2000 2000 tags, tt = self._findtags()
2001 2001 else:
2002 2002 tags = self._tagscache.tags
2003 2003 rev = self.changelog.rev
2004 2004 for k, v in pycompat.iteritems(tags):
2005 2005 try:
2006 2006 # ignore tags to unknown nodes
2007 2007 rev(v)
2008 2008 t[k] = v
2009 2009 except (error.LookupError, ValueError):
2010 2010 pass
2011 2011 return t
2012 2012
2013 2013 def _findtags(self):
2014 2014 """Do the hard work of finding tags. Return a pair of dicts
2015 2015 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2016 2016 maps tag name to a string like \'global\' or \'local\'.
2017 2017 Subclasses or extensions are free to add their own tags, but
2018 2018 should be aware that the returned dicts will be retained for the
2019 2019 duration of the localrepo object."""
2020 2020
2021 2021 # XXX what tagtype should subclasses/extensions use? Currently
2022 2022 # mq and bookmarks add tags, but do not set the tagtype at all.
2023 2023 # Should each extension invent its own tag type? Should there
2024 2024 # be one tagtype for all such "virtual" tags? Or is the status
2025 2025 # quo fine?
2026 2026
2027 2027 # map tag name to (node, hist)
2028 2028 alltags = tagsmod.findglobaltags(self.ui, self)
2029 2029 # map tag name to tag type
2030 2030 tagtypes = {tag: b'global' for tag in alltags}
2031 2031
2032 2032 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2033 2033
2034 2034 # Build the return dicts. Have to re-encode tag names because
2035 2035 # the tags module always uses UTF-8 (in order not to lose info
2036 2036 # writing to the cache), but the rest of Mercurial wants them in
2037 2037 # local encoding.
2038 2038 tags = {}
2039 2039 for (name, (node, hist)) in pycompat.iteritems(alltags):
2040 2040 if node != nullid:
2041 2041 tags[encoding.tolocal(name)] = node
2042 2042 tags[b'tip'] = self.changelog.tip()
2043 2043 tagtypes = {
2044 2044 encoding.tolocal(name): value
2045 2045 for (name, value) in pycompat.iteritems(tagtypes)
2046 2046 }
2047 2047 return (tags, tagtypes)
2048 2048
2049 2049 def tagtype(self, tagname):
2050 2050 """
2051 2051 return the type of the given tag. result can be:
2052 2052
2053 2053 'local' : a local tag
2054 2054 'global' : a global tag
2055 2055 None : tag does not exist
2056 2056 """
2057 2057
2058 2058 return self._tagscache.tagtypes.get(tagname)
2059 2059
2060 2060 def tagslist(self):
2061 2061 '''return a list of tags ordered by revision'''
2062 2062 if not self._tagscache.tagslist:
2063 2063 l = []
2064 2064 for t, n in pycompat.iteritems(self.tags()):
2065 2065 l.append((self.changelog.rev(n), t, n))
2066 2066 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2067 2067
2068 2068 return self._tagscache.tagslist
2069 2069
2070 2070 def nodetags(self, node):
2071 2071 '''return the tags associated with a node'''
2072 2072 if not self._tagscache.nodetagscache:
2073 2073 nodetagscache = {}
2074 2074 for t, n in pycompat.iteritems(self._tagscache.tags):
2075 2075 nodetagscache.setdefault(n, []).append(t)
2076 2076 for tags in pycompat.itervalues(nodetagscache):
2077 2077 tags.sort()
2078 2078 self._tagscache.nodetagscache = nodetagscache
2079 2079 return self._tagscache.nodetagscache.get(node, [])
2080 2080
2081 2081 def nodebookmarks(self, node):
2082 2082 """return the list of bookmarks pointing to the specified node"""
2083 2083 return self._bookmarks.names(node)
2084 2084
2085 2085 def branchmap(self):
2086 2086 """returns a dictionary {branch: [branchheads]} with branchheads
2087 2087 ordered by increasing revision number"""
2088 2088 return self._branchcaches[self]
2089 2089
2090 2090 @unfilteredmethod
2091 2091 def revbranchcache(self):
2092 2092 if not self._revbranchcache:
2093 2093 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2094 2094 return self._revbranchcache
2095 2095
2096 2096 def register_changeset(self, rev, changelogrevision):
2097 2097 self.revbranchcache().setdata(rev, changelogrevision)
2098 2098
2099 2099 def branchtip(self, branch, ignoremissing=False):
2100 2100 """return the tip node for a given branch
2101 2101
2102 2102 If ignoremissing is True, then this method will not raise an error.
2103 2103 This is helpful for callers that only expect None for a missing branch
2104 2104 (e.g. namespace).
2105 2105
2106 2106 """
2107 2107 try:
2108 2108 return self.branchmap().branchtip(branch)
2109 2109 except KeyError:
2110 2110 if not ignoremissing:
2111 2111 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2112 2112 else:
2113 2113 pass
2114 2114
2115 2115 def lookup(self, key):
2116 2116 node = scmutil.revsymbol(self, key).node()
2117 2117 if node is None:
2118 2118 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2119 2119 return node
2120 2120
2121 2121 def lookupbranch(self, key):
2122 2122 if self.branchmap().hasbranch(key):
2123 2123 return key
2124 2124
2125 2125 return scmutil.revsymbol(self, key).branch()
2126 2126
2127 2127 def known(self, nodes):
2128 2128 cl = self.changelog
2129 2129 get_rev = cl.index.get_rev
2130 2130 filtered = cl.filteredrevs
2131 2131 result = []
2132 2132 for n in nodes:
2133 2133 r = get_rev(n)
2134 2134 resp = not (r is None or r in filtered)
2135 2135 result.append(resp)
2136 2136 return result
2137 2137
2138 2138 def local(self):
2139 2139 return self
2140 2140
2141 2141 def publishing(self):
2142 2142 # it's safe (and desirable) to trust the publish flag unconditionally
2143 2143 # so that we don't finalize changes shared between users via ssh or nfs
2144 2144 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2145 2145
2146 2146 def cancopy(self):
2147 2147 # so statichttprepo's override of local() works
2148 2148 if not self.local():
2149 2149 return False
2150 2150 if not self.publishing():
2151 2151 return True
2152 2152 # if publishing we can't copy if there is filtered content
2153 2153 return not self.filtered(b'visible').changelog.filteredrevs
2154 2154
2155 2155 def shared(self):
2156 2156 '''the type of shared repository (None if not shared)'''
2157 2157 if self.sharedpath != self.path:
2158 2158 return b'store'
2159 2159 return None
2160 2160
2161 2161 def wjoin(self, f, *insidef):
2162 2162 return self.vfs.reljoin(self.root, f, *insidef)
2163 2163
2164 2164 def setparents(self, p1, p2=nullid):
2165 2165 self[None].setparents(p1, p2)
2166 2166 self._quick_access_changeid_invalidate()
2167 2167
2168 2168 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2169 2169 """changeid must be a changeset revision, if specified.
2170 2170 fileid can be a file revision or node."""
2171 2171 return context.filectx(
2172 2172 self, path, changeid, fileid, changectx=changectx
2173 2173 )
2174 2174
2175 2175 def getcwd(self):
2176 2176 return self.dirstate.getcwd()
2177 2177
2178 2178 def pathto(self, f, cwd=None):
2179 2179 return self.dirstate.pathto(f, cwd)
2180 2180
2181 2181 def _loadfilter(self, filter):
2182 2182 if filter not in self._filterpats:
2183 2183 l = []
2184 2184 for pat, cmd in self.ui.configitems(filter):
2185 2185 if cmd == b'!':
2186 2186 continue
2187 2187 mf = matchmod.match(self.root, b'', [pat])
2188 2188 fn = None
2189 2189 params = cmd
2190 2190 for name, filterfn in pycompat.iteritems(self._datafilters):
2191 2191 if cmd.startswith(name):
2192 2192 fn = filterfn
2193 2193 params = cmd[len(name) :].lstrip()
2194 2194 break
2195 2195 if not fn:
2196 2196 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2197 2197 fn.__name__ = 'commandfilter'
2198 2198 # Wrap old filters not supporting keyword arguments
2199 2199 if not pycompat.getargspec(fn)[2]:
2200 2200 oldfn = fn
2201 2201 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2202 2202 fn.__name__ = 'compat-' + oldfn.__name__
2203 2203 l.append((mf, fn, params))
2204 2204 self._filterpats[filter] = l
2205 2205 return self._filterpats[filter]
2206 2206
2207 2207 def _filter(self, filterpats, filename, data):
2208 2208 for mf, fn, cmd in filterpats:
2209 2209 if mf(filename):
2210 2210 self.ui.debug(
2211 2211 b"filtering %s through %s\n"
2212 2212 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2213 2213 )
2214 2214 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2215 2215 break
2216 2216
2217 2217 return data
2218 2218
2219 2219 @unfilteredpropertycache
2220 2220 def _encodefilterpats(self):
2221 2221 return self._loadfilter(b'encode')
2222 2222
2223 2223 @unfilteredpropertycache
2224 2224 def _decodefilterpats(self):
2225 2225 return self._loadfilter(b'decode')
2226 2226
2227 2227 def adddatafilter(self, name, filter):
2228 2228 self._datafilters[name] = filter
2229 2229
2230 2230 def wread(self, filename):
2231 2231 if self.wvfs.islink(filename):
2232 2232 data = self.wvfs.readlink(filename)
2233 2233 else:
2234 2234 data = self.wvfs.read(filename)
2235 2235 return self._filter(self._encodefilterpats, filename, data)
2236 2236
2237 2237 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2238 2238 """write ``data`` into ``filename`` in the working directory
2239 2239
2240 2240 This returns length of written (maybe decoded) data.
2241 2241 """
2242 2242 data = self._filter(self._decodefilterpats, filename, data)
2243 2243 if b'l' in flags:
2244 2244 self.wvfs.symlink(data, filename)
2245 2245 else:
2246 2246 self.wvfs.write(
2247 2247 filename, data, backgroundclose=backgroundclose, **kwargs
2248 2248 )
2249 2249 if b'x' in flags:
2250 2250 self.wvfs.setflags(filename, False, True)
2251 2251 else:
2252 2252 self.wvfs.setflags(filename, False, False)
2253 2253 return len(data)
2254 2254
2255 2255 def wwritedata(self, filename, data):
2256 2256 return self._filter(self._decodefilterpats, filename, data)
2257 2257
2258 2258 def currenttransaction(self):
2259 2259 """return the current transaction or None if non exists"""
2260 2260 if self._transref:
2261 2261 tr = self._transref()
2262 2262 else:
2263 2263 tr = None
2264 2264
2265 2265 if tr and tr.running():
2266 2266 return tr
2267 2267 return None
2268 2268
2269 2269 def transaction(self, desc, report=None):
2270 2270 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2271 2271 b'devel', b'check-locks'
2272 2272 ):
2273 2273 if self._currentlock(self._lockref) is None:
2274 2274 raise error.ProgrammingError(b'transaction requires locking')
2275 2275 tr = self.currenttransaction()
2276 2276 if tr is not None:
2277 2277 return tr.nest(name=desc)
2278 2278
2279 2279 # abort here if the journal already exists
2280 2280 if self.svfs.exists(b"journal"):
2281 2281 raise error.RepoError(
2282 2282 _(b"abandoned transaction found"),
2283 2283 hint=_(b"run 'hg recover' to clean up transaction"),
2284 2284 )
2285 2285
2286 2286 idbase = b"%.40f#%f" % (random.random(), time.time())
2287 2287 ha = hex(hashutil.sha1(idbase).digest())
2288 2288 txnid = b'TXN:' + ha
2289 2289 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2290 2290
2291 2291 self._writejournal(desc)
2292 2292 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2293 2293 if report:
2294 2294 rp = report
2295 2295 else:
2296 2296 rp = self.ui.warn
2297 2297 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2298 2298 # we must avoid cyclic reference between repo and transaction.
2299 2299 reporef = weakref.ref(self)
2300 2300 # Code to track tag movement
2301 2301 #
2302 2302 # Since tags are all handled as file content, it is actually quite hard
2303 2303 # to track these movement from a code perspective. So we fallback to a
2304 2304 # tracking at the repository level. One could envision to track changes
2305 2305 # to the '.hgtags' file through changegroup apply but that fails to
2306 2306 # cope with case where transaction expose new heads without changegroup
2307 2307 # being involved (eg: phase movement).
2308 2308 #
2309 2309 # For now, We gate the feature behind a flag since this likely comes
2310 2310 # with performance impacts. The current code run more often than needed
2311 2311 # and do not use caches as much as it could. The current focus is on
2312 2312 # the behavior of the feature so we disable it by default. The flag
2313 2313 # will be removed when we are happy with the performance impact.
2314 2314 #
2315 2315 # Once this feature is no longer experimental move the following
2316 2316 # documentation to the appropriate help section:
2317 2317 #
2318 2318 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2319 2319 # tags (new or changed or deleted tags). In addition the details of
2320 2320 # these changes are made available in a file at:
2321 2321 # ``REPOROOT/.hg/changes/tags.changes``.
2322 2322 # Make sure you check for HG_TAG_MOVED before reading that file as it
2323 2323 # might exist from a previous transaction even if no tag were touched
2324 2324 # in this one. Changes are recorded in a line base format::
2325 2325 #
2326 2326 # <action> <hex-node> <tag-name>\n
2327 2327 #
2328 2328 # Actions are defined as follow:
2329 2329 # "-R": tag is removed,
2330 2330 # "+A": tag is added,
2331 2331 # "-M": tag is moved (old value),
2332 2332 # "+M": tag is moved (new value),
2333 2333 tracktags = lambda x: None
2334 2334 # experimental config: experimental.hook-track-tags
2335 2335 shouldtracktags = self.ui.configbool(
2336 2336 b'experimental', b'hook-track-tags'
2337 2337 )
2338 2338 if desc != b'strip' and shouldtracktags:
2339 2339 oldheads = self.changelog.headrevs()
2340 2340
2341 2341 def tracktags(tr2):
2342 2342 repo = reporef()
2343 2343 assert repo is not None # help pytype
2344 2344 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2345 2345 newheads = repo.changelog.headrevs()
2346 2346 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2347 2347 # notes: we compare lists here.
2348 2348 # As we do it only once buiding set would not be cheaper
2349 2349 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2350 2350 if changes:
2351 2351 tr2.hookargs[b'tag_moved'] = b'1'
2352 2352 with repo.vfs(
2353 2353 b'changes/tags.changes', b'w', atomictemp=True
2354 2354 ) as changesfile:
2355 2355 # note: we do not register the file to the transaction
2356 2356 # because we needs it to still exist on the transaction
2357 2357 # is close (for txnclose hooks)
2358 2358 tagsmod.writediff(changesfile, changes)
2359 2359
2360 2360 def validate(tr2):
2361 2361 """will run pre-closing hooks"""
2362 2362 # XXX the transaction API is a bit lacking here so we take a hacky
2363 2363 # path for now
2364 2364 #
2365 2365 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2366 2366 # dict is copied before these run. In addition we needs the data
2367 2367 # available to in memory hooks too.
2368 2368 #
2369 2369 # Moreover, we also need to make sure this runs before txnclose
2370 2370 # hooks and there is no "pending" mechanism that would execute
2371 2371 # logic only if hooks are about to run.
2372 2372 #
2373 2373 # Fixing this limitation of the transaction is also needed to track
2374 2374 # other families of changes (bookmarks, phases, obsolescence).
2375 2375 #
2376 2376 # This will have to be fixed before we remove the experimental
2377 2377 # gating.
2378 2378 tracktags(tr2)
2379 2379 repo = reporef()
2380 2380 assert repo is not None # help pytype
2381 2381
2382 2382 singleheadopt = (b'experimental', b'single-head-per-branch')
2383 2383 singlehead = repo.ui.configbool(*singleheadopt)
2384 2384 if singlehead:
2385 2385 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2386 2386 accountclosed = singleheadsub.get(
2387 2387 b"account-closed-heads", False
2388 2388 )
2389 2389 if singleheadsub.get(b"public-changes-only", False):
2390 2390 filtername = b"immutable"
2391 2391 else:
2392 2392 filtername = b"visible"
2393 2393 scmutil.enforcesinglehead(
2394 2394 repo, tr2, desc, accountclosed, filtername
2395 2395 )
2396 2396 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2397 2397 for name, (old, new) in sorted(
2398 2398 tr.changes[b'bookmarks'].items()
2399 2399 ):
2400 2400 args = tr.hookargs.copy()
2401 2401 args.update(bookmarks.preparehookargs(name, old, new))
2402 2402 repo.hook(
2403 2403 b'pretxnclose-bookmark',
2404 2404 throw=True,
2405 2405 **pycompat.strkwargs(args)
2406 2406 )
2407 2407 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2408 2408 cl = repo.unfiltered().changelog
2409 2409 for revs, (old, new) in tr.changes[b'phases']:
2410 2410 for rev in revs:
2411 2411 args = tr.hookargs.copy()
2412 2412 node = hex(cl.node(rev))
2413 2413 args.update(phases.preparehookargs(node, old, new))
2414 2414 repo.hook(
2415 2415 b'pretxnclose-phase',
2416 2416 throw=True,
2417 2417 **pycompat.strkwargs(args)
2418 2418 )
2419 2419
2420 2420 repo.hook(
2421 2421 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2422 2422 )
2423 2423
2424 2424 def releasefn(tr, success):
2425 2425 repo = reporef()
2426 2426 if repo is None:
2427 2427 # If the repo has been GC'd (and this release function is being
2428 2428 # called from transaction.__del__), there's not much we can do,
2429 2429 # so just leave the unfinished transaction there and let the
2430 2430 # user run `hg recover`.
2431 2431 return
2432 2432 if success:
2433 2433 # this should be explicitly invoked here, because
2434 2434 # in-memory changes aren't written out at closing
2435 2435 # transaction, if tr.addfilegenerator (via
2436 2436 # dirstate.write or so) isn't invoked while
2437 2437 # transaction running
2438 2438 repo.dirstate.write(None)
2439 2439 else:
2440 2440 # discard all changes (including ones already written
2441 2441 # out) in this transaction
2442 2442 narrowspec.restorebackup(self, b'journal.narrowspec')
2443 2443 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2444 2444 repo.dirstate.restorebackup(None, b'journal.dirstate')
2445 2445
2446 2446 repo.invalidate(clearfilecache=True)
2447 2447
2448 2448 tr = transaction.transaction(
2449 2449 rp,
2450 2450 self.svfs,
2451 2451 vfsmap,
2452 2452 b"journal",
2453 2453 b"undo",
2454 2454 aftertrans(renames),
2455 2455 self.store.createmode,
2456 2456 validator=validate,
2457 2457 releasefn=releasefn,
2458 2458 checkambigfiles=_cachedfiles,
2459 2459 name=desc,
2460 2460 )
2461 2461 tr.changes[b'origrepolen'] = len(self)
2462 2462 tr.changes[b'obsmarkers'] = set()
2463 2463 tr.changes[b'phases'] = []
2464 2464 tr.changes[b'bookmarks'] = {}
2465 2465
2466 2466 tr.hookargs[b'txnid'] = txnid
2467 2467 tr.hookargs[b'txnname'] = desc
2468 2468 tr.hookargs[b'changes'] = tr.changes
2469 2469 # note: writing the fncache only during finalize mean that the file is
2470 2470 # outdated when running hooks. As fncache is used for streaming clone,
2471 2471 # this is not expected to break anything that happen during the hooks.
2472 2472 tr.addfinalize(b'flush-fncache', self.store.write)
2473 2473
2474 2474 def txnclosehook(tr2):
2475 2475 """To be run if transaction is successful, will schedule a hook run"""
2476 2476 # Don't reference tr2 in hook() so we don't hold a reference.
2477 2477 # This reduces memory consumption when there are multiple
2478 2478 # transactions per lock. This can likely go away if issue5045
2479 2479 # fixes the function accumulation.
2480 2480 hookargs = tr2.hookargs
2481 2481
2482 2482 def hookfunc(unused_success):
2483 2483 repo = reporef()
2484 2484 assert repo is not None # help pytype
2485 2485
2486 2486 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2487 2487 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2488 2488 for name, (old, new) in bmchanges:
2489 2489 args = tr.hookargs.copy()
2490 2490 args.update(bookmarks.preparehookargs(name, old, new))
2491 2491 repo.hook(
2492 2492 b'txnclose-bookmark',
2493 2493 throw=False,
2494 2494 **pycompat.strkwargs(args)
2495 2495 )
2496 2496
2497 2497 if hook.hashook(repo.ui, b'txnclose-phase'):
2498 2498 cl = repo.unfiltered().changelog
2499 2499 phasemv = sorted(
2500 2500 tr.changes[b'phases'], key=lambda r: r[0][0]
2501 2501 )
2502 2502 for revs, (old, new) in phasemv:
2503 2503 for rev in revs:
2504 2504 args = tr.hookargs.copy()
2505 2505 node = hex(cl.node(rev))
2506 2506 args.update(phases.preparehookargs(node, old, new))
2507 2507 repo.hook(
2508 2508 b'txnclose-phase',
2509 2509 throw=False,
2510 2510 **pycompat.strkwargs(args)
2511 2511 )
2512 2512
2513 2513 repo.hook(
2514 2514 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2515 2515 )
2516 2516
2517 2517 repo = reporef()
2518 2518 assert repo is not None # help pytype
2519 2519 repo._afterlock(hookfunc)
2520 2520
2521 2521 tr.addfinalize(b'txnclose-hook', txnclosehook)
2522 2522 # Include a leading "-" to make it happen before the transaction summary
2523 2523 # reports registered via scmutil.registersummarycallback() whose names
2524 2524 # are 00-txnreport etc. That way, the caches will be warm when the
2525 2525 # callbacks run.
2526 2526 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2527 2527
2528 2528 def txnaborthook(tr2):
2529 2529 """To be run if transaction is aborted"""
2530 2530 repo = reporef()
2531 2531 assert repo is not None # help pytype
2532 2532 repo.hook(
2533 2533 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2534 2534 )
2535 2535
2536 2536 tr.addabort(b'txnabort-hook', txnaborthook)
2537 2537 # avoid eager cache invalidation. in-memory data should be identical
2538 2538 # to stored data if transaction has no error.
2539 2539 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2540 2540 self._transref = weakref.ref(tr)
2541 2541 scmutil.registersummarycallback(self, tr, desc)
2542 2542 return tr
2543 2543
2544 2544 def _journalfiles(self):
2545 2545 return (
2546 2546 (self.svfs, b'journal'),
2547 2547 (self.svfs, b'journal.narrowspec'),
2548 2548 (self.vfs, b'journal.narrowspec.dirstate'),
2549 2549 (self.vfs, b'journal.dirstate'),
2550 2550 (self.vfs, b'journal.branch'),
2551 2551 (self.vfs, b'journal.desc'),
2552 2552 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2553 2553 (self.svfs, b'journal.phaseroots'),
2554 2554 )
2555 2555
2556 2556 def undofiles(self):
2557 2557 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2558 2558
2559 2559 @unfilteredmethod
2560 2560 def _writejournal(self, desc):
2561 2561 self.dirstate.savebackup(None, b'journal.dirstate')
2562 2562 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2563 2563 narrowspec.savebackup(self, b'journal.narrowspec')
2564 2564 self.vfs.write(
2565 2565 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2566 2566 )
2567 2567 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2568 2568 bookmarksvfs = bookmarks.bookmarksvfs(self)
2569 2569 bookmarksvfs.write(
2570 2570 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2571 2571 )
2572 2572 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2573 2573
2574 2574 def recover(self):
2575 2575 with self.lock():
2576 2576 if self.svfs.exists(b"journal"):
2577 2577 self.ui.status(_(b"rolling back interrupted transaction\n"))
2578 2578 vfsmap = {
2579 2579 b'': self.svfs,
2580 2580 b'plain': self.vfs,
2581 2581 }
2582 2582 transaction.rollback(
2583 2583 self.svfs,
2584 2584 vfsmap,
2585 2585 b"journal",
2586 2586 self.ui.warn,
2587 2587 checkambigfiles=_cachedfiles,
2588 2588 )
2589 2589 self.invalidate()
2590 2590 return True
2591 2591 else:
2592 2592 self.ui.warn(_(b"no interrupted transaction available\n"))
2593 2593 return False
2594 2594
2595 2595 def rollback(self, dryrun=False, force=False):
2596 2596 wlock = lock = dsguard = None
2597 2597 try:
2598 2598 wlock = self.wlock()
2599 2599 lock = self.lock()
2600 2600 if self.svfs.exists(b"undo"):
2601 2601 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2602 2602
2603 2603 return self._rollback(dryrun, force, dsguard)
2604 2604 else:
2605 2605 self.ui.warn(_(b"no rollback information available\n"))
2606 2606 return 1
2607 2607 finally:
2608 2608 release(dsguard, lock, wlock)
2609 2609
2610 2610 @unfilteredmethod # Until we get smarter cache management
2611 2611 def _rollback(self, dryrun, force, dsguard):
2612 2612 ui = self.ui
2613 2613 try:
2614 2614 args = self.vfs.read(b'undo.desc').splitlines()
2615 2615 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2616 2616 if len(args) >= 3:
2617 2617 detail = args[2]
2618 2618 oldtip = oldlen - 1
2619 2619
2620 2620 if detail and ui.verbose:
2621 2621 msg = _(
2622 2622 b'repository tip rolled back to revision %d'
2623 2623 b' (undo %s: %s)\n'
2624 2624 ) % (oldtip, desc, detail)
2625 2625 else:
2626 2626 msg = _(
2627 2627 b'repository tip rolled back to revision %d (undo %s)\n'
2628 2628 ) % (oldtip, desc)
2629 2629 except IOError:
2630 2630 msg = _(b'rolling back unknown transaction\n')
2631 2631 desc = None
2632 2632
2633 2633 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2634 2634 raise error.Abort(
2635 2635 _(
2636 2636 b'rollback of last commit while not checked out '
2637 2637 b'may lose data'
2638 2638 ),
2639 2639 hint=_(b'use -f to force'),
2640 2640 )
2641 2641
2642 2642 ui.status(msg)
2643 2643 if dryrun:
2644 2644 return 0
2645 2645
2646 2646 parents = self.dirstate.parents()
2647 2647 self.destroying()
2648 2648 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2649 2649 transaction.rollback(
2650 2650 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2651 2651 )
2652 2652 bookmarksvfs = bookmarks.bookmarksvfs(self)
2653 2653 if bookmarksvfs.exists(b'undo.bookmarks'):
2654 2654 bookmarksvfs.rename(
2655 2655 b'undo.bookmarks', b'bookmarks', checkambig=True
2656 2656 )
2657 2657 if self.svfs.exists(b'undo.phaseroots'):
2658 2658 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2659 2659 self.invalidate()
2660 2660
2661 2661 has_node = self.changelog.index.has_node
2662 2662 parentgone = any(not has_node(p) for p in parents)
2663 2663 if parentgone:
2664 2664 # prevent dirstateguard from overwriting already restored one
2665 2665 dsguard.close()
2666 2666
2667 2667 narrowspec.restorebackup(self, b'undo.narrowspec')
2668 2668 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2669 2669 self.dirstate.restorebackup(None, b'undo.dirstate')
2670 2670 try:
2671 2671 branch = self.vfs.read(b'undo.branch')
2672 2672 self.dirstate.setbranch(encoding.tolocal(branch))
2673 2673 except IOError:
2674 2674 ui.warn(
2675 2675 _(
2676 2676 b'named branch could not be reset: '
2677 2677 b'current branch is still \'%s\'\n'
2678 2678 )
2679 2679 % self.dirstate.branch()
2680 2680 )
2681 2681
2682 2682 parents = tuple([p.rev() for p in self[None].parents()])
2683 2683 if len(parents) > 1:
2684 2684 ui.status(
2685 2685 _(
2686 2686 b'working directory now based on '
2687 2687 b'revisions %d and %d\n'
2688 2688 )
2689 2689 % parents
2690 2690 )
2691 2691 else:
2692 2692 ui.status(
2693 2693 _(b'working directory now based on revision %d\n') % parents
2694 2694 )
2695 2695 mergestatemod.mergestate.clean(self)
2696 2696
2697 2697 # TODO: if we know which new heads may result from this rollback, pass
2698 2698 # them to destroy(), which will prevent the branchhead cache from being
2699 2699 # invalidated.
2700 2700 self.destroyed()
2701 2701 return 0
2702 2702
2703 2703 def _buildcacheupdater(self, newtransaction):
2704 2704 """called during transaction to build the callback updating cache
2705 2705
2706 2706 Lives on the repository to help extension who might want to augment
2707 2707 this logic. For this purpose, the created transaction is passed to the
2708 2708 method.
2709 2709 """
2710 2710 # we must avoid cyclic reference between repo and transaction.
2711 2711 reporef = weakref.ref(self)
2712 2712
2713 2713 def updater(tr):
2714 2714 repo = reporef()
2715 2715 assert repo is not None # help pytype
2716 2716 repo.updatecaches(tr)
2717 2717
2718 2718 return updater
2719 2719
2720 2720 @unfilteredmethod
2721 2721 def updatecaches(self, tr=None, full=False):
2722 2722 """warm appropriate caches
2723 2723
2724 2724 If this function is called after a transaction closed. The transaction
2725 2725 will be available in the 'tr' argument. This can be used to selectively
2726 2726 update caches relevant to the changes in that transaction.
2727 2727
2728 2728 If 'full' is set, make sure all caches the function knows about have
2729 2729 up-to-date data. Even the ones usually loaded more lazily.
2730
2731 The `full` argument can take a special "post-clone" value. In this case
2732 the cache warming is made after a clone and of the slower cache might
2733 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2734 as we plan for a cleaner way to deal with this for 5.9.
2730 2735 """
2731 2736 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2732 2737 # During strip, many caches are invalid but
2733 2738 # later call to `destroyed` will refresh them.
2734 2739 return
2735 2740
2736 2741 if tr is None or tr.changes[b'origrepolen'] < len(self):
2737 2742 # accessing the 'served' branchmap should refresh all the others,
2738 2743 self.ui.debug(b'updating the branch cache\n')
2739 2744 self.filtered(b'served').branchmap()
2740 2745 self.filtered(b'served.hidden').branchmap()
2741 2746
2742 2747 if full:
2743 2748 unfi = self.unfiltered()
2744 2749
2745 2750 self.changelog.update_caches(transaction=tr)
2746 2751 self.manifestlog.update_caches(transaction=tr)
2747 2752
2748 2753 rbc = unfi.revbranchcache()
2749 2754 for r in unfi.changelog:
2750 2755 rbc.branchinfo(r)
2751 2756 rbc.write()
2752 2757
2753 2758 # ensure the working copy parents are in the manifestfulltextcache
2754 2759 for ctx in self[b'.'].parents():
2755 2760 ctx.manifest() # accessing the manifest is enough
2756 2761
2757 # accessing fnode cache warms the cache
2758 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2762 if not full == b"post-clone":
2763 # accessing fnode cache warms the cache
2764 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2759 2765 # accessing tags warm the cache
2760 2766 self.tags()
2761 2767 self.filtered(b'served').tags()
2762 2768
2763 2769 # The `full` arg is documented as updating even the lazily-loaded
2764 2770 # caches immediately, so we're forcing a write to cause these caches
2765 2771 # to be warmed up even if they haven't explicitly been requested
2766 2772 # yet (if they've never been used by hg, they won't ever have been
2767 2773 # written, even if they're a subset of another kind of cache that
2768 2774 # *has* been used).
2769 2775 for filt in repoview.filtertable.keys():
2770 2776 filtered = self.filtered(filt)
2771 2777 filtered.branchmap().write(filtered)
2772 2778
2773 2779 def invalidatecaches(self):
2774 2780
2775 2781 if '_tagscache' in vars(self):
2776 2782 # can't use delattr on proxy
2777 2783 del self.__dict__['_tagscache']
2778 2784
2779 2785 self._branchcaches.clear()
2780 2786 self.invalidatevolatilesets()
2781 2787 self._sparsesignaturecache.clear()
2782 2788
2783 2789 def invalidatevolatilesets(self):
2784 2790 self.filteredrevcache.clear()
2785 2791 obsolete.clearobscaches(self)
2786 2792 self._quick_access_changeid_invalidate()
2787 2793
2788 2794 def invalidatedirstate(self):
2789 2795 """Invalidates the dirstate, causing the next call to dirstate
2790 2796 to check if it was modified since the last time it was read,
2791 2797 rereading it if it has.
2792 2798
2793 2799 This is different to dirstate.invalidate() that it doesn't always
2794 2800 rereads the dirstate. Use dirstate.invalidate() if you want to
2795 2801 explicitly read the dirstate again (i.e. restoring it to a previous
2796 2802 known good state)."""
2797 2803 if hasunfilteredcache(self, 'dirstate'):
2798 2804 for k in self.dirstate._filecache:
2799 2805 try:
2800 2806 delattr(self.dirstate, k)
2801 2807 except AttributeError:
2802 2808 pass
2803 2809 delattr(self.unfiltered(), 'dirstate')
2804 2810
2805 2811 def invalidate(self, clearfilecache=False):
2806 2812 """Invalidates both store and non-store parts other than dirstate
2807 2813
2808 2814 If a transaction is running, invalidation of store is omitted,
2809 2815 because discarding in-memory changes might cause inconsistency
2810 2816 (e.g. incomplete fncache causes unintentional failure, but
2811 2817 redundant one doesn't).
2812 2818 """
2813 2819 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2814 2820 for k in list(self._filecache.keys()):
2815 2821 # dirstate is invalidated separately in invalidatedirstate()
2816 2822 if k == b'dirstate':
2817 2823 continue
2818 2824 if (
2819 2825 k == b'changelog'
2820 2826 and self.currenttransaction()
2821 2827 and self.changelog._delayed
2822 2828 ):
2823 2829 # The changelog object may store unwritten revisions. We don't
2824 2830 # want to lose them.
2825 2831 # TODO: Solve the problem instead of working around it.
2826 2832 continue
2827 2833
2828 2834 if clearfilecache:
2829 2835 del self._filecache[k]
2830 2836 try:
2831 2837 delattr(unfiltered, k)
2832 2838 except AttributeError:
2833 2839 pass
2834 2840 self.invalidatecaches()
2835 2841 if not self.currenttransaction():
2836 2842 # TODO: Changing contents of store outside transaction
2837 2843 # causes inconsistency. We should make in-memory store
2838 2844 # changes detectable, and abort if changed.
2839 2845 self.store.invalidatecaches()
2840 2846
2841 2847 def invalidateall(self):
2842 2848 """Fully invalidates both store and non-store parts, causing the
2843 2849 subsequent operation to reread any outside changes."""
2844 2850 # extension should hook this to invalidate its caches
2845 2851 self.invalidate()
2846 2852 self.invalidatedirstate()
2847 2853
2848 2854 @unfilteredmethod
2849 2855 def _refreshfilecachestats(self, tr):
2850 2856 """Reload stats of cached files so that they are flagged as valid"""
2851 2857 for k, ce in self._filecache.items():
2852 2858 k = pycompat.sysstr(k)
2853 2859 if k == 'dirstate' or k not in self.__dict__:
2854 2860 continue
2855 2861 ce.refresh()
2856 2862
2857 2863 def _lock(
2858 2864 self,
2859 2865 vfs,
2860 2866 lockname,
2861 2867 wait,
2862 2868 releasefn,
2863 2869 acquirefn,
2864 2870 desc,
2865 2871 ):
2866 2872 timeout = 0
2867 2873 warntimeout = 0
2868 2874 if wait:
2869 2875 timeout = self.ui.configint(b"ui", b"timeout")
2870 2876 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2871 2877 # internal config: ui.signal-safe-lock
2872 2878 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2873 2879
2874 2880 l = lockmod.trylock(
2875 2881 self.ui,
2876 2882 vfs,
2877 2883 lockname,
2878 2884 timeout,
2879 2885 warntimeout,
2880 2886 releasefn=releasefn,
2881 2887 acquirefn=acquirefn,
2882 2888 desc=desc,
2883 2889 signalsafe=signalsafe,
2884 2890 )
2885 2891 return l
2886 2892
2887 2893 def _afterlock(self, callback):
2888 2894 """add a callback to be run when the repository is fully unlocked
2889 2895
2890 2896 The callback will be executed when the outermost lock is released
2891 2897 (with wlock being higher level than 'lock')."""
2892 2898 for ref in (self._wlockref, self._lockref):
2893 2899 l = ref and ref()
2894 2900 if l and l.held:
2895 2901 l.postrelease.append(callback)
2896 2902 break
2897 2903 else: # no lock have been found.
2898 2904 callback(True)
2899 2905
2900 2906 def lock(self, wait=True):
2901 2907 """Lock the repository store (.hg/store) and return a weak reference
2902 2908 to the lock. Use this before modifying the store (e.g. committing or
2903 2909 stripping). If you are opening a transaction, get a lock as well.)
2904 2910
2905 2911 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2906 2912 'wlock' first to avoid a dead-lock hazard."""
2907 2913 l = self._currentlock(self._lockref)
2908 2914 if l is not None:
2909 2915 l.lock()
2910 2916 return l
2911 2917
2912 2918 l = self._lock(
2913 2919 vfs=self.svfs,
2914 2920 lockname=b"lock",
2915 2921 wait=wait,
2916 2922 releasefn=None,
2917 2923 acquirefn=self.invalidate,
2918 2924 desc=_(b'repository %s') % self.origroot,
2919 2925 )
2920 2926 self._lockref = weakref.ref(l)
2921 2927 return l
2922 2928
2923 2929 def wlock(self, wait=True):
2924 2930 """Lock the non-store parts of the repository (everything under
2925 2931 .hg except .hg/store) and return a weak reference to the lock.
2926 2932
2927 2933 Use this before modifying files in .hg.
2928 2934
2929 2935 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2930 2936 'wlock' first to avoid a dead-lock hazard."""
2931 2937 l = self._wlockref() if self._wlockref else None
2932 2938 if l is not None and l.held:
2933 2939 l.lock()
2934 2940 return l
2935 2941
2936 2942 # We do not need to check for non-waiting lock acquisition. Such
2937 2943 # acquisition would not cause dead-lock as they would just fail.
2938 2944 if wait and (
2939 2945 self.ui.configbool(b'devel', b'all-warnings')
2940 2946 or self.ui.configbool(b'devel', b'check-locks')
2941 2947 ):
2942 2948 if self._currentlock(self._lockref) is not None:
2943 2949 self.ui.develwarn(b'"wlock" acquired after "lock"')
2944 2950
2945 2951 def unlock():
2946 2952 if self.dirstate.pendingparentchange():
2947 2953 self.dirstate.invalidate()
2948 2954 else:
2949 2955 self.dirstate.write(None)
2950 2956
2951 2957 self._filecache[b'dirstate'].refresh()
2952 2958
2953 2959 l = self._lock(
2954 2960 self.vfs,
2955 2961 b"wlock",
2956 2962 wait,
2957 2963 unlock,
2958 2964 self.invalidatedirstate,
2959 2965 _(b'working directory of %s') % self.origroot,
2960 2966 )
2961 2967 self._wlockref = weakref.ref(l)
2962 2968 return l
2963 2969
2964 2970 def _currentlock(self, lockref):
2965 2971 """Returns the lock if it's held, or None if it's not."""
2966 2972 if lockref is None:
2967 2973 return None
2968 2974 l = lockref()
2969 2975 if l is None or not l.held:
2970 2976 return None
2971 2977 return l
2972 2978
2973 2979 def currentwlock(self):
2974 2980 """Returns the wlock if it's held, or None if it's not."""
2975 2981 return self._currentlock(self._wlockref)
2976 2982
2977 2983 def checkcommitpatterns(self, wctx, match, status, fail):
2978 2984 """check for commit arguments that aren't committable"""
2979 2985 if match.isexact() or match.prefix():
2980 2986 matched = set(status.modified + status.added + status.removed)
2981 2987
2982 2988 for f in match.files():
2983 2989 f = self.dirstate.normalize(f)
2984 2990 if f == b'.' or f in matched or f in wctx.substate:
2985 2991 continue
2986 2992 if f in status.deleted:
2987 2993 fail(f, _(b'file not found!'))
2988 2994 # Is it a directory that exists or used to exist?
2989 2995 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2990 2996 d = f + b'/'
2991 2997 for mf in matched:
2992 2998 if mf.startswith(d):
2993 2999 break
2994 3000 else:
2995 3001 fail(f, _(b"no match under directory!"))
2996 3002 elif f not in self.dirstate:
2997 3003 fail(f, _(b"file not tracked!"))
2998 3004
2999 3005 @unfilteredmethod
3000 3006 def commit(
3001 3007 self,
3002 3008 text=b"",
3003 3009 user=None,
3004 3010 date=None,
3005 3011 match=None,
3006 3012 force=False,
3007 3013 editor=None,
3008 3014 extra=None,
3009 3015 ):
3010 3016 """Add a new revision to current repository.
3011 3017
3012 3018 Revision information is gathered from the working directory,
3013 3019 match can be used to filter the committed files. If editor is
3014 3020 supplied, it is called to get a commit message.
3015 3021 """
3016 3022 if extra is None:
3017 3023 extra = {}
3018 3024
3019 3025 def fail(f, msg):
3020 3026 raise error.InputError(b'%s: %s' % (f, msg))
3021 3027
3022 3028 if not match:
3023 3029 match = matchmod.always()
3024 3030
3025 3031 if not force:
3026 3032 match.bad = fail
3027 3033
3028 3034 # lock() for recent changelog (see issue4368)
3029 3035 with self.wlock(), self.lock():
3030 3036 wctx = self[None]
3031 3037 merge = len(wctx.parents()) > 1
3032 3038
3033 3039 if not force and merge and not match.always():
3034 3040 raise error.Abort(
3035 3041 _(
3036 3042 b'cannot partially commit a merge '
3037 3043 b'(do not specify files or patterns)'
3038 3044 )
3039 3045 )
3040 3046
3041 3047 status = self.status(match=match, clean=force)
3042 3048 if force:
3043 3049 status.modified.extend(
3044 3050 status.clean
3045 3051 ) # mq may commit clean files
3046 3052
3047 3053 # check subrepos
3048 3054 subs, commitsubs, newstate = subrepoutil.precommit(
3049 3055 self.ui, wctx, status, match, force=force
3050 3056 )
3051 3057
3052 3058 # make sure all explicit patterns are matched
3053 3059 if not force:
3054 3060 self.checkcommitpatterns(wctx, match, status, fail)
3055 3061
3056 3062 cctx = context.workingcommitctx(
3057 3063 self, status, text, user, date, extra
3058 3064 )
3059 3065
3060 3066 ms = mergestatemod.mergestate.read(self)
3061 3067 mergeutil.checkunresolved(ms)
3062 3068
3063 3069 # internal config: ui.allowemptycommit
3064 3070 if cctx.isempty() and not self.ui.configbool(
3065 3071 b'ui', b'allowemptycommit'
3066 3072 ):
3067 3073 self.ui.debug(b'nothing to commit, clearing merge state\n')
3068 3074 ms.reset()
3069 3075 return None
3070 3076
3071 3077 if merge and cctx.deleted():
3072 3078 raise error.Abort(_(b"cannot commit merge with missing files"))
3073 3079
3074 3080 if editor:
3075 3081 cctx._text = editor(self, cctx, subs)
3076 3082 edited = text != cctx._text
3077 3083
3078 3084 # Save commit message in case this transaction gets rolled back
3079 3085 # (e.g. by a pretxncommit hook). Leave the content alone on
3080 3086 # the assumption that the user will use the same editor again.
3081 3087 msgfn = self.savecommitmessage(cctx._text)
3082 3088
3083 3089 # commit subs and write new state
3084 3090 if subs:
3085 3091 uipathfn = scmutil.getuipathfn(self)
3086 3092 for s in sorted(commitsubs):
3087 3093 sub = wctx.sub(s)
3088 3094 self.ui.status(
3089 3095 _(b'committing subrepository %s\n')
3090 3096 % uipathfn(subrepoutil.subrelpath(sub))
3091 3097 )
3092 3098 sr = sub.commit(cctx._text, user, date)
3093 3099 newstate[s] = (newstate[s][0], sr)
3094 3100 subrepoutil.writestate(self, newstate)
3095 3101
3096 3102 p1, p2 = self.dirstate.parents()
3097 3103 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3098 3104 try:
3099 3105 self.hook(
3100 3106 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3101 3107 )
3102 3108 with self.transaction(b'commit'):
3103 3109 ret = self.commitctx(cctx, True)
3104 3110 # update bookmarks, dirstate and mergestate
3105 3111 bookmarks.update(self, [p1, p2], ret)
3106 3112 cctx.markcommitted(ret)
3107 3113 ms.reset()
3108 3114 except: # re-raises
3109 3115 if edited:
3110 3116 self.ui.write(
3111 3117 _(b'note: commit message saved in %s\n') % msgfn
3112 3118 )
3113 3119 self.ui.write(
3114 3120 _(
3115 3121 b"note: use 'hg commit --logfile "
3116 3122 b".hg/last-message.txt --edit' to reuse it\n"
3117 3123 )
3118 3124 )
3119 3125 raise
3120 3126
3121 3127 def commithook(unused_success):
3122 3128 # hack for command that use a temporary commit (eg: histedit)
3123 3129 # temporary commit got stripped before hook release
3124 3130 if self.changelog.hasnode(ret):
3125 3131 self.hook(
3126 3132 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3127 3133 )
3128 3134
3129 3135 self._afterlock(commithook)
3130 3136 return ret
3131 3137
3132 3138 @unfilteredmethod
3133 3139 def commitctx(self, ctx, error=False, origctx=None):
3134 3140 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3135 3141
3136 3142 @unfilteredmethod
3137 3143 def destroying(self):
3138 3144 """Inform the repository that nodes are about to be destroyed.
3139 3145 Intended for use by strip and rollback, so there's a common
3140 3146 place for anything that has to be done before destroying history.
3141 3147
3142 3148 This is mostly useful for saving state that is in memory and waiting
3143 3149 to be flushed when the current lock is released. Because a call to
3144 3150 destroyed is imminent, the repo will be invalidated causing those
3145 3151 changes to stay in memory (waiting for the next unlock), or vanish
3146 3152 completely.
3147 3153 """
3148 3154 # When using the same lock to commit and strip, the phasecache is left
3149 3155 # dirty after committing. Then when we strip, the repo is invalidated,
3150 3156 # causing those changes to disappear.
3151 3157 if '_phasecache' in vars(self):
3152 3158 self._phasecache.write()
3153 3159
3154 3160 @unfilteredmethod
3155 3161 def destroyed(self):
3156 3162 """Inform the repository that nodes have been destroyed.
3157 3163 Intended for use by strip and rollback, so there's a common
3158 3164 place for anything that has to be done after destroying history.
3159 3165 """
3160 3166 # When one tries to:
3161 3167 # 1) destroy nodes thus calling this method (e.g. strip)
3162 3168 # 2) use phasecache somewhere (e.g. commit)
3163 3169 #
3164 3170 # then 2) will fail because the phasecache contains nodes that were
3165 3171 # removed. We can either remove phasecache from the filecache,
3166 3172 # causing it to reload next time it is accessed, or simply filter
3167 3173 # the removed nodes now and write the updated cache.
3168 3174 self._phasecache.filterunknown(self)
3169 3175 self._phasecache.write()
3170 3176
3171 3177 # refresh all repository caches
3172 3178 self.updatecaches()
3173 3179
3174 3180 # Ensure the persistent tag cache is updated. Doing it now
3175 3181 # means that the tag cache only has to worry about destroyed
3176 3182 # heads immediately after a strip/rollback. That in turn
3177 3183 # guarantees that "cachetip == currenttip" (comparing both rev
3178 3184 # and node) always means no nodes have been added or destroyed.
3179 3185
3180 3186 # XXX this is suboptimal when qrefresh'ing: we strip the current
3181 3187 # head, refresh the tag cache, then immediately add a new head.
3182 3188 # But I think doing it this way is necessary for the "instant
3183 3189 # tag cache retrieval" case to work.
3184 3190 self.invalidate()
3185 3191
3186 3192 def status(
3187 3193 self,
3188 3194 node1=b'.',
3189 3195 node2=None,
3190 3196 match=None,
3191 3197 ignored=False,
3192 3198 clean=False,
3193 3199 unknown=False,
3194 3200 listsubrepos=False,
3195 3201 ):
3196 3202 '''a convenience method that calls node1.status(node2)'''
3197 3203 return self[node1].status(
3198 3204 node2, match, ignored, clean, unknown, listsubrepos
3199 3205 )
3200 3206
3201 3207 def addpostdsstatus(self, ps):
3202 3208 """Add a callback to run within the wlock, at the point at which status
3203 3209 fixups happen.
3204 3210
3205 3211 On status completion, callback(wctx, status) will be called with the
3206 3212 wlock held, unless the dirstate has changed from underneath or the wlock
3207 3213 couldn't be grabbed.
3208 3214
3209 3215 Callbacks should not capture and use a cached copy of the dirstate --
3210 3216 it might change in the meanwhile. Instead, they should access the
3211 3217 dirstate via wctx.repo().dirstate.
3212 3218
3213 3219 This list is emptied out after each status run -- extensions should
3214 3220 make sure it adds to this list each time dirstate.status is called.
3215 3221 Extensions should also make sure they don't call this for statuses
3216 3222 that don't involve the dirstate.
3217 3223 """
3218 3224
3219 3225 # The list is located here for uniqueness reasons -- it is actually
3220 3226 # managed by the workingctx, but that isn't unique per-repo.
3221 3227 self._postdsstatus.append(ps)
3222 3228
3223 3229 def postdsstatus(self):
3224 3230 """Used by workingctx to get the list of post-dirstate-status hooks."""
3225 3231 return self._postdsstatus
3226 3232
3227 3233 def clearpostdsstatus(self):
3228 3234 """Used by workingctx to clear post-dirstate-status hooks."""
3229 3235 del self._postdsstatus[:]
3230 3236
3231 3237 def heads(self, start=None):
3232 3238 if start is None:
3233 3239 cl = self.changelog
3234 3240 headrevs = reversed(cl.headrevs())
3235 3241 return [cl.node(rev) for rev in headrevs]
3236 3242
3237 3243 heads = self.changelog.heads(start)
3238 3244 # sort the output in rev descending order
3239 3245 return sorted(heads, key=self.changelog.rev, reverse=True)
3240 3246
3241 3247 def branchheads(self, branch=None, start=None, closed=False):
3242 3248 """return a (possibly filtered) list of heads for the given branch
3243 3249
3244 3250 Heads are returned in topological order, from newest to oldest.
3245 3251 If branch is None, use the dirstate branch.
3246 3252 If start is not None, return only heads reachable from start.
3247 3253 If closed is True, return heads that are marked as closed as well.
3248 3254 """
3249 3255 if branch is None:
3250 3256 branch = self[None].branch()
3251 3257 branches = self.branchmap()
3252 3258 if not branches.hasbranch(branch):
3253 3259 return []
3254 3260 # the cache returns heads ordered lowest to highest
3255 3261 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3256 3262 if start is not None:
3257 3263 # filter out the heads that cannot be reached from startrev
3258 3264 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3259 3265 bheads = [h for h in bheads if h in fbheads]
3260 3266 return bheads
3261 3267
3262 3268 def branches(self, nodes):
3263 3269 if not nodes:
3264 3270 nodes = [self.changelog.tip()]
3265 3271 b = []
3266 3272 for n in nodes:
3267 3273 t = n
3268 3274 while True:
3269 3275 p = self.changelog.parents(n)
3270 3276 if p[1] != nullid or p[0] == nullid:
3271 3277 b.append((t, n, p[0], p[1]))
3272 3278 break
3273 3279 n = p[0]
3274 3280 return b
3275 3281
3276 3282 def between(self, pairs):
3277 3283 r = []
3278 3284
3279 3285 for top, bottom in pairs:
3280 3286 n, l, i = top, [], 0
3281 3287 f = 1
3282 3288
3283 3289 while n != bottom and n != nullid:
3284 3290 p = self.changelog.parents(n)[0]
3285 3291 if i == f:
3286 3292 l.append(n)
3287 3293 f = f * 2
3288 3294 n = p
3289 3295 i += 1
3290 3296
3291 3297 r.append(l)
3292 3298
3293 3299 return r
3294 3300
3295 3301 def checkpush(self, pushop):
3296 3302 """Extensions can override this function if additional checks have
3297 3303 to be performed before pushing, or call it if they override push
3298 3304 command.
3299 3305 """
3300 3306
3301 3307 @unfilteredpropertycache
3302 3308 def prepushoutgoinghooks(self):
3303 3309 """Return util.hooks consists of a pushop with repo, remote, outgoing
3304 3310 methods, which are called before pushing changesets.
3305 3311 """
3306 3312 return util.hooks()
3307 3313
3308 3314 def pushkey(self, namespace, key, old, new):
3309 3315 try:
3310 3316 tr = self.currenttransaction()
3311 3317 hookargs = {}
3312 3318 if tr is not None:
3313 3319 hookargs.update(tr.hookargs)
3314 3320 hookargs = pycompat.strkwargs(hookargs)
3315 3321 hookargs['namespace'] = namespace
3316 3322 hookargs['key'] = key
3317 3323 hookargs['old'] = old
3318 3324 hookargs['new'] = new
3319 3325 self.hook(b'prepushkey', throw=True, **hookargs)
3320 3326 except error.HookAbort as exc:
3321 3327 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3322 3328 if exc.hint:
3323 3329 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3324 3330 return False
3325 3331 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3326 3332 ret = pushkey.push(self, namespace, key, old, new)
3327 3333
3328 3334 def runhook(unused_success):
3329 3335 self.hook(
3330 3336 b'pushkey',
3331 3337 namespace=namespace,
3332 3338 key=key,
3333 3339 old=old,
3334 3340 new=new,
3335 3341 ret=ret,
3336 3342 )
3337 3343
3338 3344 self._afterlock(runhook)
3339 3345 return ret
3340 3346
3341 3347 def listkeys(self, namespace):
3342 3348 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3343 3349 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3344 3350 values = pushkey.list(self, namespace)
3345 3351 self.hook(b'listkeys', namespace=namespace, values=values)
3346 3352 return values
3347 3353
3348 3354 def debugwireargs(self, one, two, three=None, four=None, five=None):
3349 3355 '''used to test argument passing over the wire'''
3350 3356 return b"%s %s %s %s %s" % (
3351 3357 one,
3352 3358 two,
3353 3359 pycompat.bytestr(three),
3354 3360 pycompat.bytestr(four),
3355 3361 pycompat.bytestr(five),
3356 3362 )
3357 3363
3358 3364 def savecommitmessage(self, text):
3359 3365 fp = self.vfs(b'last-message.txt', b'wb')
3360 3366 try:
3361 3367 fp.write(text)
3362 3368 finally:
3363 3369 fp.close()
3364 3370 return self.pathto(fp.name[len(self.root) + 1 :])
3365 3371
3366 3372 def register_wanted_sidedata(self, category):
3367 3373 self._wanted_sidedata.add(pycompat.bytestr(category))
3368 3374
3369 3375 def register_sidedata_computer(self, kind, category, keys, computer):
3370 3376 if kind not in (b"changelog", b"manifest", b"filelog"):
3371 3377 msg = _(b"unexpected revlog kind '%s'.")
3372 3378 raise error.ProgrammingError(msg % kind)
3373 3379 category = pycompat.bytestr(category)
3374 3380 if category in self._sidedata_computers.get(kind, []):
3375 3381 msg = _(
3376 3382 b"cannot register a sidedata computer twice for category '%s'."
3377 3383 )
3378 3384 raise error.ProgrammingError(msg % category)
3379 3385 self._sidedata_computers.setdefault(kind, {})
3380 3386 self._sidedata_computers[kind][category] = (keys, computer)
3381 3387
3382 3388
3383 3389 # used to avoid circular references so destructors work
3384 3390 def aftertrans(files):
3385 3391 renamefiles = [tuple(t) for t in files]
3386 3392
3387 3393 def a():
3388 3394 for vfs, src, dest in renamefiles:
3389 3395 # if src and dest refer to a same file, vfs.rename is a no-op,
3390 3396 # leaving both src and dest on disk. delete dest to make sure
3391 3397 # the rename couldn't be such a no-op.
3392 3398 vfs.tryunlink(dest)
3393 3399 try:
3394 3400 vfs.rename(src, dest)
3395 3401 except OSError: # journal file does not yet exist
3396 3402 pass
3397 3403
3398 3404 return a
3399 3405
3400 3406
3401 3407 def undoname(fn):
3402 3408 base, name = os.path.split(fn)
3403 3409 assert name.startswith(b'journal')
3404 3410 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3405 3411
3406 3412
3407 3413 def instance(ui, path, create, intents=None, createopts=None):
3408 3414 localpath = urlutil.urllocalpath(path)
3409 3415 if create:
3410 3416 createrepository(ui, localpath, createopts=createopts)
3411 3417
3412 3418 return makelocalrepository(ui, localpath, intents=intents)
3413 3419
3414 3420
3415 3421 def islocal(path):
3416 3422 return True
3417 3423
3418 3424
3419 3425 def defaultcreateopts(ui, createopts=None):
3420 3426 """Populate the default creation options for a repository.
3421 3427
3422 3428 A dictionary of explicitly requested creation options can be passed
3423 3429 in. Missing keys will be populated.
3424 3430 """
3425 3431 createopts = dict(createopts or {})
3426 3432
3427 3433 if b'backend' not in createopts:
3428 3434 # experimental config: storage.new-repo-backend
3429 3435 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3430 3436
3431 3437 return createopts
3432 3438
3433 3439
3434 3440 def newreporequirements(ui, createopts):
3435 3441 """Determine the set of requirements for a new local repository.
3436 3442
3437 3443 Extensions can wrap this function to specify custom requirements for
3438 3444 new repositories.
3439 3445 """
3440 3446 # If the repo is being created from a shared repository, we copy
3441 3447 # its requirements.
3442 3448 if b'sharedrepo' in createopts:
3443 3449 requirements = set(createopts[b'sharedrepo'].requirements)
3444 3450 if createopts.get(b'sharedrelative'):
3445 3451 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3446 3452 else:
3447 3453 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3448 3454
3449 3455 return requirements
3450 3456
3451 3457 if b'backend' not in createopts:
3452 3458 raise error.ProgrammingError(
3453 3459 b'backend key not present in createopts; '
3454 3460 b'was defaultcreateopts() called?'
3455 3461 )
3456 3462
3457 3463 if createopts[b'backend'] != b'revlogv1':
3458 3464 raise error.Abort(
3459 3465 _(
3460 3466 b'unable to determine repository requirements for '
3461 3467 b'storage backend: %s'
3462 3468 )
3463 3469 % createopts[b'backend']
3464 3470 )
3465 3471
3466 3472 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3467 3473 if ui.configbool(b'format', b'usestore'):
3468 3474 requirements.add(requirementsmod.STORE_REQUIREMENT)
3469 3475 if ui.configbool(b'format', b'usefncache'):
3470 3476 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3471 3477 if ui.configbool(b'format', b'dotencode'):
3472 3478 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3473 3479
3474 3480 compengines = ui.configlist(b'format', b'revlog-compression')
3475 3481 for compengine in compengines:
3476 3482 if compengine in util.compengines:
3477 3483 engine = util.compengines[compengine]
3478 3484 if engine.available() and engine.revlogheader():
3479 3485 break
3480 3486 else:
3481 3487 raise error.Abort(
3482 3488 _(
3483 3489 b'compression engines %s defined by '
3484 3490 b'format.revlog-compression not available'
3485 3491 )
3486 3492 % b', '.join(b'"%s"' % e for e in compengines),
3487 3493 hint=_(
3488 3494 b'run "hg debuginstall" to list available '
3489 3495 b'compression engines'
3490 3496 ),
3491 3497 )
3492 3498
3493 3499 # zlib is the historical default and doesn't need an explicit requirement.
3494 3500 if compengine == b'zstd':
3495 3501 requirements.add(b'revlog-compression-zstd')
3496 3502 elif compengine != b'zlib':
3497 3503 requirements.add(b'exp-compression-%s' % compengine)
3498 3504
3499 3505 if scmutil.gdinitconfig(ui):
3500 3506 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3501 3507 if ui.configbool(b'format', b'sparse-revlog'):
3502 3508 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3503 3509
3504 3510 # experimental config: format.exp-use-side-data
3505 3511 if ui.configbool(b'format', b'exp-use-side-data'):
3506 3512 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3507 3513 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3508 3514 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3509 3515 # experimental config: format.exp-use-copies-side-data-changeset
3510 3516 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3511 3517 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3512 3518 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3513 3519 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3514 3520 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3515 3521 if ui.configbool(b'experimental', b'treemanifest'):
3516 3522 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3517 3523
3518 3524 revlogv2 = ui.config(b'experimental', b'revlogv2')
3519 3525 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3520 3526 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3521 3527 # generaldelta is implied by revlogv2.
3522 3528 requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
3523 3529 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3524 3530 # experimental config: format.internal-phase
3525 3531 if ui.configbool(b'format', b'internal-phase'):
3526 3532 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3527 3533
3528 3534 if createopts.get(b'narrowfiles'):
3529 3535 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3530 3536
3531 3537 if createopts.get(b'lfs'):
3532 3538 requirements.add(b'lfs')
3533 3539
3534 3540 if ui.configbool(b'format', b'bookmarks-in-store'):
3535 3541 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3536 3542
3537 3543 if ui.configbool(b'format', b'use-persistent-nodemap'):
3538 3544 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3539 3545
3540 3546 # if share-safe is enabled, let's create the new repository with the new
3541 3547 # requirement
3542 3548 if ui.configbool(b'format', b'use-share-safe'):
3543 3549 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3544 3550
3545 3551 return requirements
3546 3552
3547 3553
3548 3554 def checkrequirementscompat(ui, requirements):
3549 3555 """Checks compatibility of repository requirements enabled and disabled.
3550 3556
3551 3557 Returns a set of requirements which needs to be dropped because dependend
3552 3558 requirements are not enabled. Also warns users about it"""
3553 3559
3554 3560 dropped = set()
3555 3561
3556 3562 if requirementsmod.STORE_REQUIREMENT not in requirements:
3557 3563 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3558 3564 ui.warn(
3559 3565 _(
3560 3566 b'ignoring enabled \'format.bookmarks-in-store\' config '
3561 3567 b'beacuse it is incompatible with disabled '
3562 3568 b'\'format.usestore\' config\n'
3563 3569 )
3564 3570 )
3565 3571 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3566 3572
3567 3573 if (
3568 3574 requirementsmod.SHARED_REQUIREMENT in requirements
3569 3575 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3570 3576 ):
3571 3577 raise error.Abort(
3572 3578 _(
3573 3579 b"cannot create shared repository as source was created"
3574 3580 b" with 'format.usestore' config disabled"
3575 3581 )
3576 3582 )
3577 3583
3578 3584 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3579 3585 ui.warn(
3580 3586 _(
3581 3587 b"ignoring enabled 'format.use-share-safe' config because "
3582 3588 b"it is incompatible with disabled 'format.usestore'"
3583 3589 b" config\n"
3584 3590 )
3585 3591 )
3586 3592 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3587 3593
3588 3594 return dropped
3589 3595
3590 3596
3591 3597 def filterknowncreateopts(ui, createopts):
3592 3598 """Filters a dict of repo creation options against options that are known.
3593 3599
3594 3600 Receives a dict of repo creation options and returns a dict of those
3595 3601 options that we don't know how to handle.
3596 3602
3597 3603 This function is called as part of repository creation. If the
3598 3604 returned dict contains any items, repository creation will not
3599 3605 be allowed, as it means there was a request to create a repository
3600 3606 with options not recognized by loaded code.
3601 3607
3602 3608 Extensions can wrap this function to filter out creation options
3603 3609 they know how to handle.
3604 3610 """
3605 3611 known = {
3606 3612 b'backend',
3607 3613 b'lfs',
3608 3614 b'narrowfiles',
3609 3615 b'sharedrepo',
3610 3616 b'sharedrelative',
3611 3617 b'shareditems',
3612 3618 b'shallowfilestore',
3613 3619 }
3614 3620
3615 3621 return {k: v for k, v in createopts.items() if k not in known}
3616 3622
3617 3623
3618 3624 def createrepository(ui, path, createopts=None):
3619 3625 """Create a new repository in a vfs.
3620 3626
3621 3627 ``path`` path to the new repo's working directory.
3622 3628 ``createopts`` options for the new repository.
3623 3629
3624 3630 The following keys for ``createopts`` are recognized:
3625 3631
3626 3632 backend
3627 3633 The storage backend to use.
3628 3634 lfs
3629 3635 Repository will be created with ``lfs`` requirement. The lfs extension
3630 3636 will automatically be loaded when the repository is accessed.
3631 3637 narrowfiles
3632 3638 Set up repository to support narrow file storage.
3633 3639 sharedrepo
3634 3640 Repository object from which storage should be shared.
3635 3641 sharedrelative
3636 3642 Boolean indicating if the path to the shared repo should be
3637 3643 stored as relative. By default, the pointer to the "parent" repo
3638 3644 is stored as an absolute path.
3639 3645 shareditems
3640 3646 Set of items to share to the new repository (in addition to storage).
3641 3647 shallowfilestore
3642 3648 Indicates that storage for files should be shallow (not all ancestor
3643 3649 revisions are known).
3644 3650 """
3645 3651 createopts = defaultcreateopts(ui, createopts=createopts)
3646 3652
3647 3653 unknownopts = filterknowncreateopts(ui, createopts)
3648 3654
3649 3655 if not isinstance(unknownopts, dict):
3650 3656 raise error.ProgrammingError(
3651 3657 b'filterknowncreateopts() did not return a dict'
3652 3658 )
3653 3659
3654 3660 if unknownopts:
3655 3661 raise error.Abort(
3656 3662 _(
3657 3663 b'unable to create repository because of unknown '
3658 3664 b'creation option: %s'
3659 3665 )
3660 3666 % b', '.join(sorted(unknownopts)),
3661 3667 hint=_(b'is a required extension not loaded?'),
3662 3668 )
3663 3669
3664 3670 requirements = newreporequirements(ui, createopts=createopts)
3665 3671 requirements -= checkrequirementscompat(ui, requirements)
3666 3672
3667 3673 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3668 3674
3669 3675 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3670 3676 if hgvfs.exists():
3671 3677 raise error.RepoError(_(b'repository %s already exists') % path)
3672 3678
3673 3679 if b'sharedrepo' in createopts:
3674 3680 sharedpath = createopts[b'sharedrepo'].sharedpath
3675 3681
3676 3682 if createopts.get(b'sharedrelative'):
3677 3683 try:
3678 3684 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3679 3685 sharedpath = util.pconvert(sharedpath)
3680 3686 except (IOError, ValueError) as e:
3681 3687 # ValueError is raised on Windows if the drive letters differ
3682 3688 # on each path.
3683 3689 raise error.Abort(
3684 3690 _(b'cannot calculate relative path'),
3685 3691 hint=stringutil.forcebytestr(e),
3686 3692 )
3687 3693
3688 3694 if not wdirvfs.exists():
3689 3695 wdirvfs.makedirs()
3690 3696
3691 3697 hgvfs.makedir(notindexed=True)
3692 3698 if b'sharedrepo' not in createopts:
3693 3699 hgvfs.mkdir(b'cache')
3694 3700 hgvfs.mkdir(b'wcache')
3695 3701
3696 3702 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3697 3703 if has_store and b'sharedrepo' not in createopts:
3698 3704 hgvfs.mkdir(b'store')
3699 3705
3700 3706 # We create an invalid changelog outside the store so very old
3701 3707 # Mercurial versions (which didn't know about the requirements
3702 3708 # file) encounter an error on reading the changelog. This
3703 3709 # effectively locks out old clients and prevents them from
3704 3710 # mucking with a repo in an unknown format.
3705 3711 #
3706 3712 # The revlog header has version 65535, which won't be recognized by
3707 3713 # such old clients.
3708 3714 hgvfs.append(
3709 3715 b'00changelog.i',
3710 3716 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3711 3717 b'layout',
3712 3718 )
3713 3719
3714 3720 # Filter the requirements into working copy and store ones
3715 3721 wcreq, storereq = scmutil.filterrequirements(requirements)
3716 3722 # write working copy ones
3717 3723 scmutil.writerequires(hgvfs, wcreq)
3718 3724 # If there are store requirements and the current repository
3719 3725 # is not a shared one, write stored requirements
3720 3726 # For new shared repository, we don't need to write the store
3721 3727 # requirements as they are already present in store requires
3722 3728 if storereq and b'sharedrepo' not in createopts:
3723 3729 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3724 3730 scmutil.writerequires(storevfs, storereq)
3725 3731
3726 3732 # Write out file telling readers where to find the shared store.
3727 3733 if b'sharedrepo' in createopts:
3728 3734 hgvfs.write(b'sharedpath', sharedpath)
3729 3735
3730 3736 if createopts.get(b'shareditems'):
3731 3737 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3732 3738 hgvfs.write(b'shared', shared)
3733 3739
3734 3740
3735 3741 def poisonrepository(repo):
3736 3742 """Poison a repository instance so it can no longer be used."""
3737 3743 # Perform any cleanup on the instance.
3738 3744 repo.close()
3739 3745
3740 3746 # Our strategy is to replace the type of the object with one that
3741 3747 # has all attribute lookups result in error.
3742 3748 #
3743 3749 # But we have to allow the close() method because some constructors
3744 3750 # of repos call close() on repo references.
3745 3751 class poisonedrepository(object):
3746 3752 def __getattribute__(self, item):
3747 3753 if item == 'close':
3748 3754 return object.__getattribute__(self, item)
3749 3755
3750 3756 raise error.ProgrammingError(
3751 3757 b'repo instances should not be used after unshare'
3752 3758 )
3753 3759
3754 3760 def close(self):
3755 3761 pass
3756 3762
3757 3763 # We may have a repoview, which intercepts __setattr__. So be sure
3758 3764 # we operate at the lowest level possible.
3759 3765 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,671 +1,670 b''
1 1 #require serve no-reposimplestore no-chg
2 2
3 3 #testcases stream-legacy stream-bundle2
4 4
5 5 #if stream-legacy
6 6 $ cat << EOF >> $HGRCPATH
7 7 > [server]
8 8 > bundle2.stream = no
9 9 > EOF
10 10 #endif
11 11
12 12 Initialize repository
13 13 the status call is to check for issue5130
14 14
15 15 $ hg init server
16 16 $ cd server
17 17 $ touch foo
18 18 $ hg -q commit -A -m initial
19 19 >>> for i in range(1024):
20 20 ... with open(str(i), 'wb') as fh:
21 21 ... fh.write(b"%d" % i) and None
22 22 $ hg -q commit -A -m 'add a lot of files'
23 23 $ hg st
24 24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
25 25 $ cat hg.pid > $DAEMON_PIDS
26 26 $ cd ..
27 27
28 28 Cannot stream clone when server.uncompressed is set
29 29
30 30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
31 31 200 Script output follows
32 32
33 33 1
34 34
35 35 #if stream-legacy
36 36 $ hg debugcapabilities http://localhost:$HGPORT
37 37 Main capabilities:
38 38 batch
39 39 branchmap
40 40 $USUAL_BUNDLE2_CAPS_SERVER$
41 41 changegroupsubset
42 42 compression=$BUNDLE2_COMPRESSIONS$
43 43 getbundle
44 44 httpheader=1024
45 45 httpmediatype=0.1rx,0.1tx,0.2tx
46 46 known
47 47 lookup
48 48 pushkey
49 49 unbundle=HG10GZ,HG10BZ,HG10UN
50 50 unbundlehash
51 51 Bundle2 capabilities:
52 52 HG20
53 53 bookmarks
54 54 changegroup
55 55 01
56 56 02
57 57 checkheads
58 58 related
59 59 digests
60 60 md5
61 61 sha1
62 62 sha512
63 63 error
64 64 abort
65 65 unsupportedcontent
66 66 pushraced
67 67 pushkey
68 68 hgtagsfnodes
69 69 listkeys
70 70 phases
71 71 heads
72 72 pushkey
73 73 remote-changegroup
74 74 http
75 75 https
76 76
77 77 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
78 78 warning: stream clone requested but server has them disabled
79 79 requesting all changes
80 80 adding changesets
81 81 adding manifests
82 82 adding file changes
83 83 added 2 changesets with 1025 changes to 1025 files
84 84 new changesets 96ee1d7354c4:c17445101a72
85 85
86 86 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
87 87 200 Script output follows
88 88 content-type: application/mercurial-0.2
89 89
90 90
91 91 $ f --size body --hexdump --bytes 100
92 92 body: size=232
93 93 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
94 94 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
95 95 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
96 96 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
97 97 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
98 98 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
99 99 0060: 69 73 20 66 |is f|
100 100
101 101 #endif
102 102 #if stream-bundle2
103 103 $ hg debugcapabilities http://localhost:$HGPORT
104 104 Main capabilities:
105 105 batch
106 106 branchmap
107 107 $USUAL_BUNDLE2_CAPS_SERVER$
108 108 changegroupsubset
109 109 compression=$BUNDLE2_COMPRESSIONS$
110 110 getbundle
111 111 httpheader=1024
112 112 httpmediatype=0.1rx,0.1tx,0.2tx
113 113 known
114 114 lookup
115 115 pushkey
116 116 unbundle=HG10GZ,HG10BZ,HG10UN
117 117 unbundlehash
118 118 Bundle2 capabilities:
119 119 HG20
120 120 bookmarks
121 121 changegroup
122 122 01
123 123 02
124 124 checkheads
125 125 related
126 126 digests
127 127 md5
128 128 sha1
129 129 sha512
130 130 error
131 131 abort
132 132 unsupportedcontent
133 133 pushraced
134 134 pushkey
135 135 hgtagsfnodes
136 136 listkeys
137 137 phases
138 138 heads
139 139 pushkey
140 140 remote-changegroup
141 141 http
142 142 https
143 143
144 144 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
145 145 warning: stream clone requested but server has them disabled
146 146 requesting all changes
147 147 adding changesets
148 148 adding manifests
149 149 adding file changes
150 150 added 2 changesets with 1025 changes to 1025 files
151 151 new changesets 96ee1d7354c4:c17445101a72
152 152
153 153 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
154 154 200 Script output follows
155 155 content-type: application/mercurial-0.2
156 156
157 157
158 158 $ f --size body --hexdump --bytes 100
159 159 body: size=232
160 160 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
161 161 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
162 162 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
163 163 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
164 164 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
165 165 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
166 166 0060: 69 73 20 66 |is f|
167 167
168 168 #endif
169 169
170 170 $ killdaemons.py
171 171 $ cd server
172 172 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
173 173 $ cat hg.pid > $DAEMON_PIDS
174 174 $ cd ..
175 175
176 176 Basic clone
177 177
178 178 #if stream-legacy
179 179 $ hg clone --stream -U http://localhost:$HGPORT clone1
180 180 streaming all changes
181 181 1027 files to transfer, 96.3 KB of data (no-zstd !)
182 182 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
183 183 1027 files to transfer, 93.5 KB of data (zstd !)
184 184 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
185 185 searching for changes
186 186 no changes found
187 187 $ cat server/errors.txt
188 188 #endif
189 189 #if stream-bundle2
190 190 $ hg clone --stream -U http://localhost:$HGPORT clone1
191 191 streaming all changes
192 192 1030 files to transfer, 96.5 KB of data (no-zstd !)
193 193 transferred 96.5 KB in * seconds (*/sec) (glob) (no-zstd !)
194 194 1030 files to transfer, 93.6 KB of data (zstd !)
195 195 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
196 196
197 197 $ ls -1 clone1/.hg/cache
198 198 branch2-base
199 199 branch2-immutable
200 200 branch2-served
201 201 branch2-served.hidden
202 202 branch2-visible
203 203 branch2-visible-hidden
204 hgtagsfnodes1
205 204 rbc-names-v1
206 205 rbc-revs-v1
207 206 tags2
208 207 tags2-served
209 208 $ cat server/errors.txt
210 209 #endif
211 210
212 211 getbundle requests with stream=1 are uncompressed
213 212
214 213 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
215 214 200 Script output follows
216 215 content-type: application/mercurial-0.2
217 216
218 217
219 218 $ f --size --hex --bytes 256 body
220 219 body: size=112262 (no-zstd !)
221 220 body: size=109410 (zstd no-rust !)
222 221 body: size=109431 (rust !)
223 222 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
224 223 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (no-zstd !)
225 224 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98| (no-zstd !)
226 225 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030| (no-zstd !)
227 226 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (zstd no-rust !)
228 227 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (rust !)
229 228 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95| (zstd no-rust !)
230 229 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95| (rust !)
231 230 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| (zstd !)
232 231 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
233 232 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
234 233 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
235 234 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| (no-zstd !)
236 235 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| (no-zstd !)
237 236 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| (no-zstd !)
238 237 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| (no-zstd !)
239 238 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| (no-zstd !)
240 239 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| (no-zstd !)
241 240 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u| (no-zstd !)
242 241 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....| (no-zstd !)
243 242 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................| (no-zstd !)
244 243 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress| (zstd no-rust !)
245 244 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node| (rust !)
246 245 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo| (zstd no-rust !)
247 246 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com| (rust !)
248 247 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl| (zstd no-rust !)
249 248 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C| (rust !)
250 249 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.| (zstd no-rust !)
251 250 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars| (rust !)
252 251 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......| (zstd no-rust !)
253 252 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.| (rust !)
254 253 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................| (zstd no-rust !)
255 254 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..| (rust !)
256 255 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#| (zstd no-rust !)
257 256 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................| (rust !)
258 257 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...| (zstd no-rust !)
259 258 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)| (rust !)
260 259 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda| (zstd no-rust !)
261 260 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.| (rust !)
262 261
263 262 --uncompressed is an alias to --stream
264 263
265 264 #if stream-legacy
266 265 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
267 266 streaming all changes
268 267 1027 files to transfer, 96.3 KB of data (no-zstd !)
269 268 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
270 269 1027 files to transfer, 93.5 KB of data (zstd !)
271 270 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
272 271 searching for changes
273 272 no changes found
274 273 #endif
275 274 #if stream-bundle2
276 275 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
277 276 streaming all changes
278 277 1030 files to transfer, 96.5 KB of data (no-zstd !)
279 278 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
280 279 1030 files to transfer, 93.6 KB of data (zstd !)
281 280 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
282 281 #endif
283 282
284 283 Clone with background file closing enabled
285 284
286 285 #if stream-legacy
287 286 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
288 287 using http://localhost:$HGPORT/
289 288 sending capabilities command
290 289 sending branchmap command
291 290 streaming all changes
292 291 sending stream_out command
293 292 1027 files to transfer, 96.3 KB of data (no-zstd !)
294 293 1027 files to transfer, 93.5 KB of data (zstd !)
295 294 starting 4 threads for background file closing
296 295 updating the branch cache
297 296 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
298 297 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
299 298 query 1; heads
300 299 sending batch command
301 300 searching for changes
302 301 all remote heads known locally
303 302 no changes found
304 303 sending getbundle command
305 304 bundle2-input-bundle: with-transaction
306 305 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
307 306 bundle2-input-part: "phase-heads" supported
308 307 bundle2-input-part: total payload size 24
309 308 bundle2-input-bundle: 2 parts total
310 309 checking for updated bookmarks
311 310 updating the branch cache
312 311 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
313 312 #endif
314 313 #if stream-bundle2
315 314 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
316 315 using http://localhost:$HGPORT/
317 316 sending capabilities command
318 317 query 1; heads
319 318 sending batch command
320 319 streaming all changes
321 320 sending getbundle command
322 321 bundle2-input-bundle: with-transaction
323 322 bundle2-input-part: "stream2" (params: 3 mandatory) supported
324 323 applying stream bundle
325 324 1030 files to transfer, 96.5 KB of data (no-zstd !)
326 325 1030 files to transfer, 93.6 KB of data (zstd !)
327 326 starting 4 threads for background file closing
328 327 starting 4 threads for background file closing
329 328 updating the branch cache
330 329 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
331 330 bundle2-input-part: total payload size 112094 (no-zstd !)
332 331 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
333 332 bundle2-input-part: total payload size 109216 (zstd !)
334 333 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
335 334 bundle2-input-bundle: 2 parts total
336 335 checking for updated bookmarks
337 336 updating the branch cache
338 337 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
339 338 #endif
340 339
341 340 Cannot stream clone when there are secret changesets
342 341
343 342 $ hg -R server phase --force --secret -r tip
344 343 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
345 344 warning: stream clone requested but server has them disabled
346 345 requesting all changes
347 346 adding changesets
348 347 adding manifests
349 348 adding file changes
350 349 added 1 changesets with 1 changes to 1 files
351 350 new changesets 96ee1d7354c4
352 351
353 352 $ killdaemons.py
354 353
355 354 Streaming of secrets can be overridden by server config
356 355
357 356 $ cd server
358 357 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
359 358 $ cat hg.pid > $DAEMON_PIDS
360 359 $ cd ..
361 360
362 361 #if stream-legacy
363 362 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
364 363 streaming all changes
365 364 1027 files to transfer, 96.3 KB of data (no-zstd !)
366 365 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
367 366 1027 files to transfer, 93.5 KB of data (zstd !)
368 367 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
369 368 searching for changes
370 369 no changes found
371 370 #endif
372 371 #if stream-bundle2
373 372 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
374 373 streaming all changes
375 374 1030 files to transfer, 96.5 KB of data (no-zstd !)
376 375 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
377 376 1030 files to transfer, 93.6 KB of data (zstd !)
378 377 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
379 378 #endif
380 379
381 380 $ killdaemons.py
382 381
383 382 Verify interaction between preferuncompressed and secret presence
384 383
385 384 $ cd server
386 385 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
387 386 $ cat hg.pid > $DAEMON_PIDS
388 387 $ cd ..
389 388
390 389 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
391 390 requesting all changes
392 391 adding changesets
393 392 adding manifests
394 393 adding file changes
395 394 added 1 changesets with 1 changes to 1 files
396 395 new changesets 96ee1d7354c4
397 396
398 397 $ killdaemons.py
399 398
400 399 Clone not allowed when full bundles disabled and can't serve secrets
401 400
402 401 $ cd server
403 402 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
404 403 $ cat hg.pid > $DAEMON_PIDS
405 404 $ cd ..
406 405
407 406 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
408 407 warning: stream clone requested but server has them disabled
409 408 requesting all changes
410 409 remote: abort: server has pull-based clones disabled
411 410 abort: pull failed on remote
412 411 (remove --pull if specified or upgrade Mercurial)
413 412 [100]
414 413
415 414 Local stream clone with secrets involved
416 415 (This is just a test over behavior: if you have access to the repo's files,
417 416 there is no security so it isn't important to prevent a clone here.)
418 417
419 418 $ hg clone -U --stream server local-secret
420 419 warning: stream clone requested but server has them disabled
421 420 requesting all changes
422 421 adding changesets
423 422 adding manifests
424 423 adding file changes
425 424 added 1 changesets with 1 changes to 1 files
426 425 new changesets 96ee1d7354c4
427 426
428 427 Stream clone while repo is changing:
429 428
430 429 $ mkdir changing
431 430 $ cd changing
432 431
433 432 extension for delaying the server process so we reliably can modify the repo
434 433 while cloning
435 434
436 435 $ cat > stream_steps.py <<EOF
437 436 > import os
438 437 > import sys
439 438 > from mercurial import (
440 439 > encoding,
441 440 > extensions,
442 441 > streamclone,
443 442 > testing,
444 443 > )
445 444 > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
446 445 > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
447 446 >
448 447 > def _test_sync_point_walk_1(orig, repo):
449 448 > testing.write_file(WALKED_FILE_1)
450 449 >
451 450 > def _test_sync_point_walk_2(orig, repo):
452 451 > assert repo._currentlock(repo._lockref) is None
453 452 > testing.wait_file(WALKED_FILE_2)
454 453 >
455 454 > extensions.wrapfunction(
456 455 > streamclone,
457 456 > '_test_sync_point_walk_1',
458 457 > _test_sync_point_walk_1
459 458 > )
460 459 > extensions.wrapfunction(
461 460 > streamclone,
462 461 > '_test_sync_point_walk_2',
463 462 > _test_sync_point_walk_2
464 463 > )
465 464 > EOF
466 465
467 466 prepare repo with small and big file to cover both code paths in emitrevlogdata
468 467
469 468 $ hg init repo
470 469 $ touch repo/f1
471 470 $ $TESTDIR/seq.py 50000 > repo/f2
472 471 $ hg -R repo ci -Aqm "0"
473 472 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
474 473 $ export HG_TEST_STREAM_WALKED_FILE_1
475 474 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
476 475 $ export HG_TEST_STREAM_WALKED_FILE_2
477 476 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
478 477 $ export HG_TEST_STREAM_WALKED_FILE_3
479 478 # $ cat << EOF >> $HGRCPATH
480 479 # > [hooks]
481 480 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
482 481 # > EOF
483 482 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
484 483 $ cat hg.pid >> $DAEMON_PIDS
485 484
486 485 clone while modifying the repo between stating file with write lock and
487 486 actually serving file content
488 487
489 488 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
490 489 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
491 490 $ echo >> repo/f1
492 491 $ echo >> repo/f2
493 492 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
494 493 $ touch $HG_TEST_STREAM_WALKED_FILE_2
495 494 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
496 495 $ hg -R clone id
497 496 000000000000
498 497 $ cat errors.log
499 498 $ cd ..
500 499
501 500 Stream repository with bookmarks
502 501 --------------------------------
503 502
504 503 (revert introduction of secret changeset)
505 504
506 505 $ hg -R server phase --draft 'secret()'
507 506
508 507 add a bookmark
509 508
510 509 $ hg -R server bookmark -r tip some-bookmark
511 510
512 511 clone it
513 512
514 513 #if stream-legacy
515 514 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
516 515 streaming all changes
517 516 1027 files to transfer, 96.3 KB of data (no-zstd !)
518 517 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
519 518 1027 files to transfer, 93.5 KB of data (zstd !)
520 519 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
521 520 searching for changes
522 521 no changes found
523 522 updating to branch default
524 523 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
525 524 #endif
526 525 #if stream-bundle2
527 526 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
528 527 streaming all changes
529 528 1033 files to transfer, 96.6 KB of data (no-zstd !)
530 529 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
531 530 1033 files to transfer, 93.8 KB of data (zstd !)
532 531 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
533 532 updating to branch default
534 533 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
535 534 #endif
536 535 $ hg -R with-bookmarks bookmarks
537 536 some-bookmark 1:c17445101a72
538 537
539 538 Stream repository with phases
540 539 -----------------------------
541 540
542 541 Clone as publishing
543 542
544 543 $ hg -R server phase -r 'all()'
545 544 0: draft
546 545 1: draft
547 546
548 547 #if stream-legacy
549 548 $ hg clone --stream http://localhost:$HGPORT phase-publish
550 549 streaming all changes
551 550 1027 files to transfer, 96.3 KB of data (no-zstd !)
552 551 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
553 552 1027 files to transfer, 93.5 KB of data (zstd !)
554 553 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
555 554 searching for changes
556 555 no changes found
557 556 updating to branch default
558 557 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
559 558 #endif
560 559 #if stream-bundle2
561 560 $ hg clone --stream http://localhost:$HGPORT phase-publish
562 561 streaming all changes
563 562 1033 files to transfer, 96.6 KB of data (no-zstd !)
564 563 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
565 564 1033 files to transfer, 93.8 KB of data (zstd !)
566 565 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
567 566 updating to branch default
568 567 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
569 568 #endif
570 569 $ hg -R phase-publish phase -r 'all()'
571 570 0: public
572 571 1: public
573 572
574 573 Clone as non publishing
575 574
576 575 $ cat << EOF >> server/.hg/hgrc
577 576 > [phases]
578 577 > publish = False
579 578 > EOF
580 579 $ killdaemons.py
581 580 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
582 581 $ cat hg.pid > $DAEMON_PIDS
583 582
584 583 #if stream-legacy
585 584
586 585 With v1 of the stream protocol, changeset are always cloned as public. It make
587 586 stream v1 unsuitable for non-publishing repository.
588 587
589 588 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
590 589 streaming all changes
591 590 1027 files to transfer, 96.3 KB of data (no-zstd !)
592 591 transferred 96.3 KB in * seconds (* */sec) (glob) (no-zstd !)
593 592 1027 files to transfer, 93.5 KB of data (zstd !)
594 593 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
595 594 searching for changes
596 595 no changes found
597 596 updating to branch default
598 597 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
599 598 $ hg -R phase-no-publish phase -r 'all()'
600 599 0: public
601 600 1: public
602 601 #endif
603 602 #if stream-bundle2
604 603 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
605 604 streaming all changes
606 605 1034 files to transfer, 96.7 KB of data (no-zstd !)
607 606 transferred 96.7 KB in * seconds (* */sec) (glob) (no-zstd !)
608 607 1034 files to transfer, 93.9 KB of data (zstd !)
609 608 transferred 93.9 KB in * seconds (* */sec) (glob) (zstd !)
610 609 updating to branch default
611 610 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
612 611 $ hg -R phase-no-publish phase -r 'all()'
613 612 0: draft
614 613 1: draft
615 614 #endif
616 615
617 616 $ killdaemons.py
618 617
619 618 #if stream-legacy
620 619
621 620 With v1 of the stream protocol, changeset are always cloned as public. There's
622 621 no obsolescence markers exchange in stream v1.
623 622
624 623 #endif
625 624 #if stream-bundle2
626 625
627 626 Stream repository with obsolescence
628 627 -----------------------------------
629 628
630 629 Clone non-publishing with obsolescence
631 630
632 631 $ cat >> $HGRCPATH << EOF
633 632 > [experimental]
634 633 > evolution=all
635 634 > EOF
636 635
637 636 $ cd server
638 637 $ echo foo > foo
639 638 $ hg -q commit -m 'about to be pruned'
640 639 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
641 640 1 new obsolescence markers
642 641 obsoleted 1 changesets
643 642 $ hg up null -q
644 643 $ hg log -T '{rev}: {phase}\n'
645 644 1: draft
646 645 0: draft
647 646 $ hg serve -p $HGPORT -d --pid-file=hg.pid
648 647 $ cat hg.pid > $DAEMON_PIDS
649 648 $ cd ..
650 649
651 650 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
652 651 streaming all changes
653 652 1035 files to transfer, 97.1 KB of data (no-zstd !)
654 653 transferred 97.1 KB in * seconds (* */sec) (glob) (no-zstd !)
655 654 1035 files to transfer, 94.3 KB of data (zstd !)
656 655 transferred 94.3 KB in * seconds (* */sec) (glob) (zstd !)
657 656 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
658 657 1: draft
659 658 0: draft
660 659 $ hg debugobsolete -R with-obsolescence
661 660 50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
662 661
663 662 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
664 663 streaming all changes
665 664 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
666 665 abort: pull failed on remote
667 666 [100]
668 667
669 668 $ killdaemons.py
670 669
671 670 #endif
@@ -1,1326 +1,1324 b''
1 1 #testcases sshv1 sshv2
2 2
3 3 #if sshv2
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [experimental]
6 6 > sshpeer.advertise-v2 = true
7 7 > sshserver.support-v2 = true
8 8 > EOF
9 9 #endif
10 10
11 11 Prepare repo a:
12 12
13 13 $ hg init a
14 14 $ cd a
15 15 $ echo a > a
16 16 $ hg add a
17 17 $ hg commit -m test
18 18 $ echo first line > b
19 19 $ hg add b
20 20
21 21 Create a non-inlined filelog:
22 22
23 23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 25 > cat data1 >> b
26 26 > hg commit -m test
27 27 > done
28 28
29 29 List files in store/data (should show a 'b.d'):
30 30
31 31 #if reporevlogstore
32 32 $ for i in .hg/store/data/*; do
33 33 > echo $i
34 34 > done
35 35 .hg/store/data/a.i
36 36 .hg/store/data/b.d
37 37 .hg/store/data/b.i
38 38 #endif
39 39
40 40 Trigger branchcache creation:
41 41
42 42 $ hg branches
43 43 default 10:a7949464abda
44 44 $ ls .hg/cache
45 45 branch2-served
46 46 rbc-names-v1
47 47 rbc-revs-v1
48 48
49 49 Default operation:
50 50
51 51 $ hg clone . ../b
52 52 updating to branch default
53 53 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 54 $ cd ../b
55 55
56 56 Ensure branchcache got copied over:
57 57
58 58 $ ls .hg/cache
59 59 branch2-base
60 60 branch2-immutable
61 61 branch2-served
62 62 branch2-served.hidden
63 63 branch2-visible
64 64 branch2-visible-hidden
65 hgtagsfnodes1
66 65 rbc-names-v1
67 66 rbc-revs-v1
68 67 tags2
69 68 tags2-served
70 69
71 70 $ cat a
72 71 a
73 72 $ hg verify
74 73 checking changesets
75 74 checking manifests
76 75 crosschecking files in changesets and manifests
77 76 checking files
78 77 checked 11 changesets with 11 changes to 2 files
79 78
80 79 Invalid dest '' must abort:
81 80
82 81 $ hg clone . ''
83 82 abort: empty destination path is not valid
84 83 [10]
85 84
86 85 No update, with debug option:
87 86
88 87 #if hardlink
89 88 $ hg --debug clone -U . ../c --config progress.debug=true
90 89 linking: 1 files
91 90 linking: 2 files
92 91 linking: 3 files
93 92 linking: 4 files
94 93 linking: 5 files
95 94 linking: 6 files
96 95 linking: 7 files
97 96 linking: 8 files
98 97 linked 8 files (reporevlogstore !)
99 98 linking: 9 files (reposimplestore !)
100 99 linking: 10 files (reposimplestore !)
101 100 linking: 11 files (reposimplestore !)
102 101 linking: 12 files (reposimplestore !)
103 102 linking: 13 files (reposimplestore !)
104 103 linking: 14 files (reposimplestore !)
105 104 linking: 15 files (reposimplestore !)
106 105 linking: 16 files (reposimplestore !)
107 106 linking: 17 files (reposimplestore !)
108 107 linking: 18 files (reposimplestore !)
109 108 linked 18 files (reposimplestore !)
110 109 updating the branch cache
111 110 #else
112 111 $ hg --debug clone -U . ../c --config progress.debug=true
113 112 linking: 1 files
114 113 copying: 2 files
115 114 copying: 3 files
116 115 copying: 4 files
117 116 copying: 5 files
118 117 copying: 6 files
119 118 copying: 7 files
120 119 copying: 8 files
121 120 copied 8 files (reporevlogstore !)
122 121 copying: 9 files (reposimplestore !)
123 122 copying: 10 files (reposimplestore !)
124 123 copying: 11 files (reposimplestore !)
125 124 copying: 12 files (reposimplestore !)
126 125 copying: 13 files (reposimplestore !)
127 126 copying: 14 files (reposimplestore !)
128 127 copying: 15 files (reposimplestore !)
129 128 copying: 16 files (reposimplestore !)
130 129 copying: 17 files (reposimplestore !)
131 130 copying: 18 files (reposimplestore !)
132 131 copied 18 files (reposimplestore !)
133 132 #endif
134 133 $ cd ../c
135 134
136 135 Ensure branchcache got copied over:
137 136
138 137 $ ls .hg/cache
139 138 branch2-base
140 139 branch2-immutable
141 140 branch2-served
142 141 branch2-served.hidden
143 142 branch2-visible
144 143 branch2-visible-hidden
145 hgtagsfnodes1
146 144 rbc-names-v1
147 145 rbc-revs-v1
148 146 tags2
149 147 tags2-served
150 148
151 149 $ cat a 2>/dev/null || echo "a not present"
152 150 a not present
153 151 $ hg verify
154 152 checking changesets
155 153 checking manifests
156 154 crosschecking files in changesets and manifests
157 155 checking files
158 156 checked 11 changesets with 11 changes to 2 files
159 157
160 158 Default destination:
161 159
162 160 $ mkdir ../d
163 161 $ cd ../d
164 162 $ hg clone ../a
165 163 destination directory: a
166 164 updating to branch default
167 165 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
168 166 $ cd a
169 167 $ hg cat a
170 168 a
171 169 $ cd ../..
172 170
173 171 Check that we drop the 'file:' from the path before writing the .hgrc:
174 172
175 173 $ hg clone file:a e
176 174 updating to branch default
177 175 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
178 176 $ grep 'file:' e/.hg/hgrc
179 177 [1]
180 178
181 179 Check that path aliases are expanded:
182 180
183 181 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
184 182 $ hg -R f showconfig paths.default
185 183 $TESTTMP/a#0
186 184
187 185 Use --pull:
188 186
189 187 $ hg clone --pull a g
190 188 requesting all changes
191 189 adding changesets
192 190 adding manifests
193 191 adding file changes
194 192 added 11 changesets with 11 changes to 2 files
195 193 new changesets acb14030fe0a:a7949464abda
196 194 updating to branch default
197 195 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
198 196 $ hg -R g verify
199 197 checking changesets
200 198 checking manifests
201 199 crosschecking files in changesets and manifests
202 200 checking files
203 201 checked 11 changesets with 11 changes to 2 files
204 202
205 203 Invalid dest '' with --pull must abort (issue2528):
206 204
207 205 $ hg clone --pull a ''
208 206 abort: empty destination path is not valid
209 207 [10]
210 208
211 209 Clone to '.':
212 210
213 211 $ mkdir h
214 212 $ cd h
215 213 $ hg clone ../a .
216 214 updating to branch default
217 215 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
218 216 $ cd ..
219 217
220 218
221 219 *** Tests for option -u ***
222 220
223 221 Adding some more history to repo a:
224 222
225 223 $ cd a
226 224 $ hg tag ref1
227 225 $ echo the quick brown fox >a
228 226 $ hg ci -m "hacked default"
229 227 $ hg up ref1
230 228 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
231 229 $ hg branch stable
232 230 marked working directory as branch stable
233 231 (branches are permanent and global, did you want a bookmark?)
234 232 $ echo some text >a
235 233 $ hg ci -m "starting branch stable"
236 234 $ hg tag ref2
237 235 $ echo some more text >a
238 236 $ hg ci -m "another change for branch stable"
239 237 $ hg up ref2
240 238 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
241 239 $ hg parents
242 240 changeset: 13:e8ece76546a6
243 241 branch: stable
244 242 tag: ref2
245 243 parent: 10:a7949464abda
246 244 user: test
247 245 date: Thu Jan 01 00:00:00 1970 +0000
248 246 summary: starting branch stable
249 247
250 248
251 249 Repo a has two heads:
252 250
253 251 $ hg heads
254 252 changeset: 15:0aae7cf88f0d
255 253 branch: stable
256 254 tag: tip
257 255 user: test
258 256 date: Thu Jan 01 00:00:00 1970 +0000
259 257 summary: another change for branch stable
260 258
261 259 changeset: 12:f21241060d6a
262 260 user: test
263 261 date: Thu Jan 01 00:00:00 1970 +0000
264 262 summary: hacked default
265 263
266 264
267 265 $ cd ..
268 266
269 267
270 268 Testing --noupdate with --updaterev (must abort):
271 269
272 270 $ hg clone --noupdate --updaterev 1 a ua
273 271 abort: cannot specify both --noupdate and --updaterev
274 272 [10]
275 273
276 274
277 275 Testing clone -u:
278 276
279 277 $ hg clone -u . a ua
280 278 updating to branch stable
281 279 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
282 280
283 281 Repo ua has both heads:
284 282
285 283 $ hg -R ua heads
286 284 changeset: 15:0aae7cf88f0d
287 285 branch: stable
288 286 tag: tip
289 287 user: test
290 288 date: Thu Jan 01 00:00:00 1970 +0000
291 289 summary: another change for branch stable
292 290
293 291 changeset: 12:f21241060d6a
294 292 user: test
295 293 date: Thu Jan 01 00:00:00 1970 +0000
296 294 summary: hacked default
297 295
298 296
299 297 Same revision checked out in repo a and ua:
300 298
301 299 $ hg -R a parents --template "{node|short}\n"
302 300 e8ece76546a6
303 301 $ hg -R ua parents --template "{node|short}\n"
304 302 e8ece76546a6
305 303
306 304 $ rm -r ua
307 305
308 306
309 307 Testing clone --pull -u:
310 308
311 309 $ hg clone --pull -u . a ua
312 310 requesting all changes
313 311 adding changesets
314 312 adding manifests
315 313 adding file changes
316 314 added 16 changesets with 16 changes to 3 files (+1 heads)
317 315 new changesets acb14030fe0a:0aae7cf88f0d
318 316 updating to branch stable
319 317 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
320 318
321 319 Repo ua has both heads:
322 320
323 321 $ hg -R ua heads
324 322 changeset: 15:0aae7cf88f0d
325 323 branch: stable
326 324 tag: tip
327 325 user: test
328 326 date: Thu Jan 01 00:00:00 1970 +0000
329 327 summary: another change for branch stable
330 328
331 329 changeset: 12:f21241060d6a
332 330 user: test
333 331 date: Thu Jan 01 00:00:00 1970 +0000
334 332 summary: hacked default
335 333
336 334
337 335 Same revision checked out in repo a and ua:
338 336
339 337 $ hg -R a parents --template "{node|short}\n"
340 338 e8ece76546a6
341 339 $ hg -R ua parents --template "{node|short}\n"
342 340 e8ece76546a6
343 341
344 342 $ rm -r ua
345 343
346 344
347 345 Testing clone -u <branch>:
348 346
349 347 $ hg clone -u stable a ua
350 348 updating to branch stable
351 349 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
352 350
353 351 Repo ua has both heads:
354 352
355 353 $ hg -R ua heads
356 354 changeset: 15:0aae7cf88f0d
357 355 branch: stable
358 356 tag: tip
359 357 user: test
360 358 date: Thu Jan 01 00:00:00 1970 +0000
361 359 summary: another change for branch stable
362 360
363 361 changeset: 12:f21241060d6a
364 362 user: test
365 363 date: Thu Jan 01 00:00:00 1970 +0000
366 364 summary: hacked default
367 365
368 366
369 367 Branch 'stable' is checked out:
370 368
371 369 $ hg -R ua parents
372 370 changeset: 15:0aae7cf88f0d
373 371 branch: stable
374 372 tag: tip
375 373 user: test
376 374 date: Thu Jan 01 00:00:00 1970 +0000
377 375 summary: another change for branch stable
378 376
379 377
380 378 $ rm -r ua
381 379
382 380
383 381 Testing default checkout:
384 382
385 383 $ hg clone a ua
386 384 updating to branch default
387 385 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
388 386
389 387 Repo ua has both heads:
390 388
391 389 $ hg -R ua heads
392 390 changeset: 15:0aae7cf88f0d
393 391 branch: stable
394 392 tag: tip
395 393 user: test
396 394 date: Thu Jan 01 00:00:00 1970 +0000
397 395 summary: another change for branch stable
398 396
399 397 changeset: 12:f21241060d6a
400 398 user: test
401 399 date: Thu Jan 01 00:00:00 1970 +0000
402 400 summary: hacked default
403 401
404 402
405 403 Branch 'default' is checked out:
406 404
407 405 $ hg -R ua parents
408 406 changeset: 12:f21241060d6a
409 407 user: test
410 408 date: Thu Jan 01 00:00:00 1970 +0000
411 409 summary: hacked default
412 410
413 411 Test clone with a branch named "@" (issue3677)
414 412
415 413 $ hg -R ua branch @
416 414 marked working directory as branch @
417 415 $ hg -R ua commit -m 'created branch @'
418 416 $ hg clone ua atbranch
419 417 updating to branch default
420 418 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
421 419 $ hg -R atbranch heads
422 420 changeset: 16:798b6d97153e
423 421 branch: @
424 422 tag: tip
425 423 parent: 12:f21241060d6a
426 424 user: test
427 425 date: Thu Jan 01 00:00:00 1970 +0000
428 426 summary: created branch @
429 427
430 428 changeset: 15:0aae7cf88f0d
431 429 branch: stable
432 430 user: test
433 431 date: Thu Jan 01 00:00:00 1970 +0000
434 432 summary: another change for branch stable
435 433
436 434 changeset: 12:f21241060d6a
437 435 user: test
438 436 date: Thu Jan 01 00:00:00 1970 +0000
439 437 summary: hacked default
440 438
441 439 $ hg -R atbranch parents
442 440 changeset: 12:f21241060d6a
443 441 user: test
444 442 date: Thu Jan 01 00:00:00 1970 +0000
445 443 summary: hacked default
446 444
447 445
448 446 $ rm -r ua atbranch
449 447
450 448
451 449 Testing #<branch>:
452 450
453 451 $ hg clone -u . a#stable ua
454 452 adding changesets
455 453 adding manifests
456 454 adding file changes
457 455 added 14 changesets with 14 changes to 3 files
458 456 new changesets acb14030fe0a:0aae7cf88f0d
459 457 updating to branch stable
460 458 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
461 459
462 460 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
463 461
464 462 $ hg -R ua heads
465 463 changeset: 13:0aae7cf88f0d
466 464 branch: stable
467 465 tag: tip
468 466 user: test
469 467 date: Thu Jan 01 00:00:00 1970 +0000
470 468 summary: another change for branch stable
471 469
472 470 changeset: 10:a7949464abda
473 471 user: test
474 472 date: Thu Jan 01 00:00:00 1970 +0000
475 473 summary: test
476 474
477 475
478 476 Same revision checked out in repo a and ua:
479 477
480 478 $ hg -R a parents --template "{node|short}\n"
481 479 e8ece76546a6
482 480 $ hg -R ua parents --template "{node|short}\n"
483 481 e8ece76546a6
484 482
485 483 $ rm -r ua
486 484
487 485
488 486 Testing -u -r <branch>:
489 487
490 488 $ hg clone -u . -r stable a ua
491 489 adding changesets
492 490 adding manifests
493 491 adding file changes
494 492 added 14 changesets with 14 changes to 3 files
495 493 new changesets acb14030fe0a:0aae7cf88f0d
496 494 updating to branch stable
497 495 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
498 496
499 497 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
500 498
501 499 $ hg -R ua heads
502 500 changeset: 13:0aae7cf88f0d
503 501 branch: stable
504 502 tag: tip
505 503 user: test
506 504 date: Thu Jan 01 00:00:00 1970 +0000
507 505 summary: another change for branch stable
508 506
509 507 changeset: 10:a7949464abda
510 508 user: test
511 509 date: Thu Jan 01 00:00:00 1970 +0000
512 510 summary: test
513 511
514 512
515 513 Same revision checked out in repo a and ua:
516 514
517 515 $ hg -R a parents --template "{node|short}\n"
518 516 e8ece76546a6
519 517 $ hg -R ua parents --template "{node|short}\n"
520 518 e8ece76546a6
521 519
522 520 $ rm -r ua
523 521
524 522
525 523 Testing -r <branch>:
526 524
527 525 $ hg clone -r stable a ua
528 526 adding changesets
529 527 adding manifests
530 528 adding file changes
531 529 added 14 changesets with 14 changes to 3 files
532 530 new changesets acb14030fe0a:0aae7cf88f0d
533 531 updating to branch stable
534 532 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
535 533
536 534 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
537 535
538 536 $ hg -R ua heads
539 537 changeset: 13:0aae7cf88f0d
540 538 branch: stable
541 539 tag: tip
542 540 user: test
543 541 date: Thu Jan 01 00:00:00 1970 +0000
544 542 summary: another change for branch stable
545 543
546 544 changeset: 10:a7949464abda
547 545 user: test
548 546 date: Thu Jan 01 00:00:00 1970 +0000
549 547 summary: test
550 548
551 549
552 550 Branch 'stable' is checked out:
553 551
554 552 $ hg -R ua parents
555 553 changeset: 13:0aae7cf88f0d
556 554 branch: stable
557 555 tag: tip
558 556 user: test
559 557 date: Thu Jan 01 00:00:00 1970 +0000
560 558 summary: another change for branch stable
561 559
562 560
563 561 $ rm -r ua
564 562
565 563
566 564 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
567 565 iterable in addbranchrevs()
568 566
569 567 $ cat <<EOF > simpleclone.py
570 568 > from mercurial import hg, ui as uimod
571 569 > myui = uimod.ui.load()
572 570 > repo = hg.repository(myui, b'a')
573 571 > hg.clone(myui, {}, repo, dest=b"ua")
574 572 > EOF
575 573
576 574 $ "$PYTHON" simpleclone.py
577 575 updating to branch default
578 576 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
579 577
580 578 $ rm -r ua
581 579
582 580 $ cat <<EOF > branchclone.py
583 581 > from mercurial import extensions, hg, ui as uimod
584 582 > myui = uimod.ui.load()
585 583 > extensions.loadall(myui)
586 584 > extensions.populateui(myui)
587 585 > repo = hg.repository(myui, b'a')
588 586 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
589 587 > EOF
590 588
591 589 $ "$PYTHON" branchclone.py
592 590 adding changesets
593 591 adding manifests
594 592 adding file changes
595 593 added 14 changesets with 14 changes to 3 files
596 594 new changesets acb14030fe0a:0aae7cf88f0d
597 595 updating to branch stable
598 596 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
599 597 $ rm -r ua
600 598
601 599
602 600 Test clone with special '@' bookmark:
603 601 $ cd a
604 602 $ hg bookmark -r a7949464abda @ # branch point of stable from default
605 603 $ hg clone . ../i
606 604 updating to bookmark @
607 605 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
608 606 $ hg id -i ../i
609 607 a7949464abda
610 608 $ rm -r ../i
611 609
612 610 $ hg bookmark -f -r stable @
613 611 $ hg bookmarks
614 612 @ 15:0aae7cf88f0d
615 613 $ hg clone . ../i
616 614 updating to bookmark @ on branch stable
617 615 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
618 616 $ hg id -i ../i
619 617 0aae7cf88f0d
620 618 $ cd "$TESTTMP"
621 619
622 620
623 621 Testing failures:
624 622
625 623 $ mkdir fail
626 624 $ cd fail
627 625
628 626 No local source
629 627
630 628 $ hg clone a b
631 629 abort: repository a not found
632 630 [255]
633 631
634 632 Invalid URL
635 633
636 634 $ hg clone http://invalid:url/a b
637 635 abort: error: nonnumeric port: 'url'
638 636 [100]
639 637
640 638 No remote source
641 639
642 640 #if windows
643 641 $ hg clone http://$LOCALIP:3121/a b
644 642 abort: error: * (glob)
645 643 [100]
646 644 #else
647 645 $ hg clone http://$LOCALIP:3121/a b
648 646 abort: error: *refused* (glob)
649 647 [100]
650 648 #endif
651 649 $ rm -rf b # work around bug with http clone
652 650
653 651
654 652 #if unix-permissions no-root
655 653
656 654 Inaccessible source
657 655
658 656 $ mkdir a
659 657 $ chmod 000 a
660 658 $ hg clone a b
661 659 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
662 660 [255]
663 661
664 662 Inaccessible destination
665 663
666 664 $ hg init b
667 665 $ cd b
668 666 $ hg clone . ../a
669 667 abort: Permission denied: *../a* (glob)
670 668 [255]
671 669 $ cd ..
672 670 $ chmod 700 a
673 671 $ rm -r a b
674 672
675 673 #endif
676 674
677 675
678 676 #if fifo
679 677
680 678 Source of wrong type
681 679
682 680 $ mkfifo a
683 681 $ hg clone a b
684 682 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
685 683 [255]
686 684 $ rm a
687 685
688 686 #endif
689 687
690 688 Default destination, same directory
691 689
692 690 $ hg init q
693 691 $ hg clone q
694 692 destination directory: q
695 693 abort: destination 'q' is not empty
696 694 [10]
697 695
698 696 destination directory not empty
699 697
700 698 $ mkdir a
701 699 $ echo stuff > a/a
702 700 $ hg clone q a
703 701 abort: destination 'a' is not empty
704 702 [10]
705 703
706 704
707 705 #if unix-permissions no-root
708 706
709 707 leave existing directory in place after clone failure
710 708
711 709 $ hg init c
712 710 $ cd c
713 711 $ echo c > c
714 712 $ hg commit -A -m test
715 713 adding c
716 714 $ chmod -rx .hg/store/data
717 715 $ cd ..
718 716 $ mkdir d
719 717 $ hg clone c d 2> err
720 718 [255]
721 719 $ test -d d
722 720 $ test -d d/.hg
723 721 [1]
724 722
725 723 re-enable perm to allow deletion
726 724
727 725 $ chmod +rx c/.hg/store/data
728 726
729 727 #endif
730 728
731 729 $ cd ..
732 730
733 731 Test clone from the repository in (emulated) revlog format 0 (issue4203):
734 732
735 733 $ mkdir issue4203
736 734 $ mkdir -p src/.hg
737 735 $ echo foo > src/foo
738 736 $ hg -R src add src/foo
739 737 $ hg -R src commit -m '#0'
740 738 $ hg -R src log -q
741 739 0:e1bab28bca43
742 740 $ hg -R src debugrevlog -c | egrep 'format|flags'
743 741 format : 0
744 742 flags : (none)
745 743 $ hg root -R src -T json | sed 's|\\\\|\\|g'
746 744 [
747 745 {
748 746 "hgpath": "$TESTTMP/src/.hg",
749 747 "reporoot": "$TESTTMP/src",
750 748 "storepath": "$TESTTMP/src/.hg"
751 749 }
752 750 ]
753 751 $ hg clone -U -q src dst
754 752 $ hg -R dst log -q
755 753 0:e1bab28bca43
756 754
757 755 Create repositories to test auto sharing functionality
758 756
759 757 $ cat >> $HGRCPATH << EOF
760 758 > [extensions]
761 759 > share=
762 760 > EOF
763 761
764 762 $ hg init empty
765 763 $ hg init source1a
766 764 $ cd source1a
767 765 $ echo initial1 > foo
768 766 $ hg -q commit -A -m initial
769 767 $ echo second > foo
770 768 $ hg commit -m second
771 769 $ cd ..
772 770
773 771 $ hg init filteredrev0
774 772 $ cd filteredrev0
775 773 $ cat >> .hg/hgrc << EOF
776 774 > [experimental]
777 775 > evolution.createmarkers=True
778 776 > EOF
779 777 $ echo initial1 > foo
780 778 $ hg -q commit -A -m initial0
781 779 $ hg -q up -r null
782 780 $ echo initial2 > foo
783 781 $ hg -q commit -A -m initial1
784 782 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
785 783 1 new obsolescence markers
786 784 obsoleted 1 changesets
787 785 $ cd ..
788 786
789 787 $ hg -q clone --pull source1a source1b
790 788 $ cd source1a
791 789 $ hg bookmark bookA
792 790 $ echo 1a > foo
793 791 $ hg commit -m 1a
794 792 $ cd ../source1b
795 793 $ hg -q up -r 0
796 794 $ echo head1 > foo
797 795 $ hg commit -m head1
798 796 created new head
799 797 $ hg bookmark head1
800 798 $ hg -q up -r 0
801 799 $ echo head2 > foo
802 800 $ hg commit -m head2
803 801 created new head
804 802 $ hg bookmark head2
805 803 $ hg -q up -r 0
806 804 $ hg branch branch1
807 805 marked working directory as branch branch1
808 806 (branches are permanent and global, did you want a bookmark?)
809 807 $ echo branch1 > foo
810 808 $ hg commit -m branch1
811 809 $ hg -q up -r 0
812 810 $ hg branch branch2
813 811 marked working directory as branch branch2
814 812 $ echo branch2 > foo
815 813 $ hg commit -m branch2
816 814 $ cd ..
817 815 $ hg init source2
818 816 $ cd source2
819 817 $ echo initial2 > foo
820 818 $ hg -q commit -A -m initial2
821 819 $ echo second > foo
822 820 $ hg commit -m second
823 821 $ cd ..
824 822
825 823 Clone with auto share from an empty repo should not result in share
826 824
827 825 $ mkdir share
828 826 $ hg --config share.pool=share clone empty share-empty
829 827 (not using pooled storage: remote appears to be empty)
830 828 updating to branch default
831 829 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
832 830 $ ls share
833 831 $ test -d share-empty/.hg/store
834 832 $ test -f share-empty/.hg/sharedpath
835 833 [1]
836 834
837 835 Clone with auto share from a repo with filtered revision 0 should not result in share
838 836
839 837 $ hg --config share.pool=share clone filteredrev0 share-filtered
840 838 (not using pooled storage: unable to resolve identity of remote)
841 839 requesting all changes
842 840 adding changesets
843 841 adding manifests
844 842 adding file changes
845 843 added 1 changesets with 1 changes to 1 files
846 844 new changesets e082c1832e09
847 845 updating to branch default
848 846 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
849 847
850 848 Clone from repo with content should result in shared store being created
851 849
852 850 $ hg --config share.pool=share clone source1a share-dest1a
853 851 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
854 852 requesting all changes
855 853 adding changesets
856 854 adding manifests
857 855 adding file changes
858 856 added 3 changesets with 3 changes to 1 files
859 857 new changesets b5f04eac9d8f:e5bfe23c0b47
860 858 searching for changes
861 859 no changes found
862 860 adding remote bookmark bookA
863 861 updating working directory
864 862 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
865 863
866 864 The shared repo should have been created
867 865
868 866 $ ls share
869 867 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
870 868
871 869 The destination should point to it
872 870
873 871 $ cat share-dest1a/.hg/sharedpath; echo
874 872 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
875 873
876 874 The destination should have bookmarks
877 875
878 876 $ hg -R share-dest1a bookmarks
879 877 bookA 2:e5bfe23c0b47
880 878
881 879 The default path should be the remote, not the share
882 880
883 881 $ hg -R share-dest1a config paths.default
884 882 $TESTTMP/source1a
885 883
886 884 Clone with existing share dir should result in pull + share
887 885
888 886 $ hg --config share.pool=share clone source1b share-dest1b
889 887 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
890 888 searching for changes
891 889 adding changesets
892 890 adding manifests
893 891 adding file changes
894 892 adding remote bookmark head1
895 893 adding remote bookmark head2
896 894 added 4 changesets with 4 changes to 1 files (+4 heads)
897 895 new changesets 4a8dc1ab4c13:6bacf4683960
898 896 updating working directory
899 897 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
900 898
901 899 $ ls share
902 900 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
903 901
904 902 $ cat share-dest1b/.hg/sharedpath; echo
905 903 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
906 904
907 905 We only get bookmarks from the remote, not everything in the share
908 906
909 907 $ hg -R share-dest1b bookmarks
910 908 head1 3:4a8dc1ab4c13
911 909 head2 4:99f71071f117
912 910
913 911 Default path should be source, not share.
914 912
915 913 $ hg -R share-dest1b config paths.default
916 914 $TESTTMP/source1b
917 915
918 916 Checked out revision should be head of default branch
919 917
920 918 $ hg -R share-dest1b log -r .
921 919 changeset: 4:99f71071f117
922 920 bookmark: head2
923 921 parent: 0:b5f04eac9d8f
924 922 user: test
925 923 date: Thu Jan 01 00:00:00 1970 +0000
926 924 summary: head2
927 925
928 926
929 927 Clone from unrelated repo should result in new share
930 928
931 929 $ hg --config share.pool=share clone source2 share-dest2
932 930 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
933 931 requesting all changes
934 932 adding changesets
935 933 adding manifests
936 934 adding file changes
937 935 added 2 changesets with 2 changes to 1 files
938 936 new changesets 22aeff664783:63cf6c3dba4a
939 937 searching for changes
940 938 no changes found
941 939 updating working directory
942 940 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
943 941
944 942 $ ls share
945 943 22aeff664783fd44c6d9b435618173c118c3448e
946 944 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
947 945
948 946 remote naming mode works as advertised
949 947
950 948 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
951 949 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
952 950 requesting all changes
953 951 adding changesets
954 952 adding manifests
955 953 adding file changes
956 954 added 3 changesets with 3 changes to 1 files
957 955 new changesets b5f04eac9d8f:e5bfe23c0b47
958 956 searching for changes
959 957 no changes found
960 958 adding remote bookmark bookA
961 959 updating working directory
962 960 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
963 961
964 962 $ ls shareremote
965 963 195bb1fcdb595c14a6c13e0269129ed78f6debde
966 964
967 965 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
968 966 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
969 967 requesting all changes
970 968 adding changesets
971 969 adding manifests
972 970 adding file changes
973 971 added 6 changesets with 6 changes to 1 files (+4 heads)
974 972 new changesets b5f04eac9d8f:6bacf4683960
975 973 searching for changes
976 974 no changes found
977 975 adding remote bookmark head1
978 976 adding remote bookmark head2
979 977 updating working directory
980 978 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
981 979
982 980 $ ls shareremote
983 981 195bb1fcdb595c14a6c13e0269129ed78f6debde
984 982 c0d4f83847ca2a873741feb7048a45085fd47c46
985 983
986 984 request to clone a single revision is respected in sharing mode
987 985
988 986 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
989 987 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
990 988 adding changesets
991 989 adding manifests
992 990 adding file changes
993 991 added 2 changesets with 2 changes to 1 files
994 992 new changesets b5f04eac9d8f:4a8dc1ab4c13
995 993 no changes found
996 994 adding remote bookmark head1
997 995 updating working directory
998 996 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
999 997
1000 998 $ hg -R share-1arev log -G
1001 999 @ changeset: 1:4a8dc1ab4c13
1002 1000 | bookmark: head1
1003 1001 | tag: tip
1004 1002 | user: test
1005 1003 | date: Thu Jan 01 00:00:00 1970 +0000
1006 1004 | summary: head1
1007 1005 |
1008 1006 o changeset: 0:b5f04eac9d8f
1009 1007 user: test
1010 1008 date: Thu Jan 01 00:00:00 1970 +0000
1011 1009 summary: initial
1012 1010
1013 1011
1014 1012 making another clone should only pull down requested rev
1015 1013
1016 1014 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
1017 1015 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1018 1016 searching for changes
1019 1017 adding changesets
1020 1018 adding manifests
1021 1019 adding file changes
1022 1020 adding remote bookmark head1
1023 1021 adding remote bookmark head2
1024 1022 added 1 changesets with 1 changes to 1 files (+1 heads)
1025 1023 new changesets 99f71071f117
1026 1024 updating working directory
1027 1025 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1028 1026
1029 1027 $ hg -R share-1brev log -G
1030 1028 @ changeset: 2:99f71071f117
1031 1029 | bookmark: head2
1032 1030 | tag: tip
1033 1031 | parent: 0:b5f04eac9d8f
1034 1032 | user: test
1035 1033 | date: Thu Jan 01 00:00:00 1970 +0000
1036 1034 | summary: head2
1037 1035 |
1038 1036 | o changeset: 1:4a8dc1ab4c13
1039 1037 |/ bookmark: head1
1040 1038 | user: test
1041 1039 | date: Thu Jan 01 00:00:00 1970 +0000
1042 1040 | summary: head1
1043 1041 |
1044 1042 o changeset: 0:b5f04eac9d8f
1045 1043 user: test
1046 1044 date: Thu Jan 01 00:00:00 1970 +0000
1047 1045 summary: initial
1048 1046
1049 1047
1050 1048 Request to clone a single branch is respected in sharing mode
1051 1049
1052 1050 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1053 1051 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1054 1052 adding changesets
1055 1053 adding manifests
1056 1054 adding file changes
1057 1055 added 2 changesets with 2 changes to 1 files
1058 1056 new changesets b5f04eac9d8f:5f92a6c1a1b1
1059 1057 no changes found
1060 1058 updating working directory
1061 1059 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1062 1060
1063 1061 $ hg -R share-1bbranch1 log -G
1064 1062 o changeset: 1:5f92a6c1a1b1
1065 1063 | branch: branch1
1066 1064 | tag: tip
1067 1065 | user: test
1068 1066 | date: Thu Jan 01 00:00:00 1970 +0000
1069 1067 | summary: branch1
1070 1068 |
1071 1069 @ changeset: 0:b5f04eac9d8f
1072 1070 user: test
1073 1071 date: Thu Jan 01 00:00:00 1970 +0000
1074 1072 summary: initial
1075 1073
1076 1074
1077 1075 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1078 1076 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1079 1077 searching for changes
1080 1078 adding changesets
1081 1079 adding manifests
1082 1080 adding file changes
1083 1081 added 1 changesets with 1 changes to 1 files (+1 heads)
1084 1082 new changesets 6bacf4683960
1085 1083 updating working directory
1086 1084 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1087 1085
1088 1086 $ hg -R share-1bbranch2 log -G
1089 1087 o changeset: 2:6bacf4683960
1090 1088 | branch: branch2
1091 1089 | tag: tip
1092 1090 | parent: 0:b5f04eac9d8f
1093 1091 | user: test
1094 1092 | date: Thu Jan 01 00:00:00 1970 +0000
1095 1093 | summary: branch2
1096 1094 |
1097 1095 | o changeset: 1:5f92a6c1a1b1
1098 1096 |/ branch: branch1
1099 1097 | user: test
1100 1098 | date: Thu Jan 01 00:00:00 1970 +0000
1101 1099 | summary: branch1
1102 1100 |
1103 1101 @ changeset: 0:b5f04eac9d8f
1104 1102 user: test
1105 1103 date: Thu Jan 01 00:00:00 1970 +0000
1106 1104 summary: initial
1107 1105
1108 1106
1109 1107 -U is respected in share clone mode
1110 1108
1111 1109 $ hg --config share.pool=share clone -U source1a share-1anowc
1112 1110 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1113 1111 searching for changes
1114 1112 no changes found
1115 1113 adding remote bookmark bookA
1116 1114
1117 1115 $ ls -A share-1anowc
1118 1116 .hg
1119 1117
1120 1118 Test that auto sharing doesn't cause failure of "hg clone local remote"
1121 1119
1122 1120 $ cd $TESTTMP
1123 1121 $ hg -R a id -r 0
1124 1122 acb14030fe0a
1125 1123 $ hg id -R remote -r 0
1126 1124 abort: repository remote not found
1127 1125 [255]
1128 1126 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1129 1127 $ hg -R remote id -r 0
1130 1128 acb14030fe0a
1131 1129
1132 1130 Cloning into pooled storage doesn't race (issue5104)
1133 1131
1134 1132 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1135 1133 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1136 1134 $ wait
1137 1135
1138 1136 $ hg -R share-destrace1 log -r tip
1139 1137 changeset: 2:e5bfe23c0b47
1140 1138 bookmark: bookA
1141 1139 tag: tip
1142 1140 user: test
1143 1141 date: Thu Jan 01 00:00:00 1970 +0000
1144 1142 summary: 1a
1145 1143
1146 1144
1147 1145 $ hg -R share-destrace2 log -r tip
1148 1146 changeset: 2:e5bfe23c0b47
1149 1147 bookmark: bookA
1150 1148 tag: tip
1151 1149 user: test
1152 1150 date: Thu Jan 01 00:00:00 1970 +0000
1153 1151 summary: 1a
1154 1152
1155 1153 One repo should be new, the other should be shared from the pool. We
1156 1154 don't care which is which, so we just make sure we always print the
1157 1155 one containing "new pooled" first, then one one containing "existing
1158 1156 pooled".
1159 1157
1160 1158 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1161 1159 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1162 1160 requesting all changes
1163 1161 adding changesets
1164 1162 adding manifests
1165 1163 adding file changes
1166 1164 added 3 changesets with 3 changes to 1 files
1167 1165 new changesets b5f04eac9d8f:e5bfe23c0b47
1168 1166 searching for changes
1169 1167 no changes found
1170 1168 adding remote bookmark bookA
1171 1169 updating working directory
1172 1170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1173 1171
1174 1172 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1175 1173 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1176 1174 searching for changes
1177 1175 no changes found
1178 1176 adding remote bookmark bookA
1179 1177 updating working directory
1180 1178 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1181 1179
1182 1180 SEC: check for unsafe ssh url
1183 1181
1184 1182 $ cat >> $HGRCPATH << EOF
1185 1183 > [ui]
1186 1184 > ssh = sh -c "read l; read l; read l"
1187 1185 > EOF
1188 1186
1189 1187 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1190 1188 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1191 1189 [255]
1192 1190 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1193 1191 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1194 1192 [255]
1195 1193 $ hg clone 'ssh://fakehost|touch%20owned/path'
1196 1194 abort: no suitable response from remote hg
1197 1195 [255]
1198 1196 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1199 1197 abort: no suitable response from remote hg
1200 1198 [255]
1201 1199
1202 1200 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1203 1201 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1204 1202 [255]
1205 1203
1206 1204 #if windows
1207 1205 $ hg clone "ssh://%26touch%20owned%20/" --debug
1208 1206 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1209 1207 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1210 1208 sending hello command
1211 1209 sending between command
1212 1210 abort: no suitable response from remote hg
1213 1211 [255]
1214 1212 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1215 1213 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1216 1214 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1217 1215 sending hello command
1218 1216 sending between command
1219 1217 abort: no suitable response from remote hg
1220 1218 [255]
1221 1219 #else
1222 1220 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1223 1221 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1224 1222 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1225 1223 sending hello command
1226 1224 sending between command
1227 1225 abort: no suitable response from remote hg
1228 1226 [255]
1229 1227 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1230 1228 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1231 1229 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1232 1230 sending hello command
1233 1231 sending between command
1234 1232 abort: no suitable response from remote hg
1235 1233 [255]
1236 1234 #endif
1237 1235
1238 1236 $ hg clone "ssh://v-alid.example.com/" --debug
1239 1237 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1240 1238 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1241 1239 sending hello command
1242 1240 sending between command
1243 1241 abort: no suitable response from remote hg
1244 1242 [255]
1245 1243
1246 1244 We should not have created a file named owned - if it exists, the
1247 1245 attack succeeded.
1248 1246 $ if test -f owned; then echo 'you got owned'; fi
1249 1247
1250 1248 Cloning without fsmonitor enabled does not print a warning for small repos
1251 1249
1252 1250 $ hg clone a fsmonitor-default
1253 1251 updating to bookmark @ on branch stable
1254 1252 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1255 1253
1256 1254 Lower the warning threshold to simulate a large repo
1257 1255
1258 1256 $ cat >> $HGRCPATH << EOF
1259 1257 > [fsmonitor]
1260 1258 > warn_update_file_count = 2
1261 1259 > warn_update_file_count_rust = 2
1262 1260 > EOF
1263 1261
1264 1262 We should see a warning about no fsmonitor on supported platforms
1265 1263
1266 1264 #if linuxormacos no-fsmonitor
1267 1265 $ hg clone a nofsmonitor
1268 1266 updating to bookmark @ on branch stable
1269 1267 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1270 1268 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1271 1269 #else
1272 1270 $ hg clone a nofsmonitor
1273 1271 updating to bookmark @ on branch stable
1274 1272 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1275 1273 #endif
1276 1274
1277 1275 We should not see warning about fsmonitor when it is enabled
1278 1276
1279 1277 #if fsmonitor
1280 1278 $ hg clone a fsmonitor-enabled
1281 1279 updating to bookmark @ on branch stable
1282 1280 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1283 1281 #endif
1284 1282
1285 1283 We can disable the fsmonitor warning
1286 1284
1287 1285 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1288 1286 updating to bookmark @ on branch stable
1289 1287 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1290 1288
1291 1289 Loaded fsmonitor but disabled in config should still print warning
1292 1290
1293 1291 #if linuxormacos fsmonitor
1294 1292 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1295 1293 updating to bookmark @ on branch stable
1296 1294 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1297 1295 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1298 1296 #endif
1299 1297
1300 1298 Warning not printed if working directory isn't empty
1301 1299
1302 1300 $ hg -q clone a fsmonitor-update
1303 1301 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1304 1302 $ cd fsmonitor-update
1305 1303 $ hg up acb14030fe0a
1306 1304 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1307 1305 (leaving bookmark @)
1308 1306 $ hg up cf0fe1914066
1309 1307 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1310 1308
1311 1309 `hg update` from null revision also prints
1312 1310
1313 1311 $ hg up null
1314 1312 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1315 1313
1316 1314 #if linuxormacos no-fsmonitor
1317 1315 $ hg up cf0fe1914066
1318 1316 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1319 1317 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1320 1318 #else
1321 1319 $ hg up cf0fe1914066
1322 1320 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1323 1321 #endif
1324 1322
1325 1323 $ cd ..
1326 1324
@@ -1,447 +1,445 b''
1 1 #require hardlink reporevlogstore
2 2
3 3 $ cat > nlinks.py <<EOF
4 4 > from __future__ import print_function
5 5 > import sys
6 6 > from mercurial import pycompat, util
7 7 > for f in sorted(sys.stdin.readlines()):
8 8 > f = f[:-1]
9 9 > print(util.nlinks(pycompat.fsencode(f)), f)
10 10 > EOF
11 11
12 12 $ nlinksdir()
13 13 > {
14 14 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
15 15 > }
16 16
17 17 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
18 18
19 19 $ cat > linkcp.py <<EOF
20 20 > from __future__ import absolute_import
21 21 > import sys
22 22 > from mercurial import pycompat, util
23 23 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
24 24 > pycompat.fsencode(sys.argv[2]), hardlink=True)
25 25 > EOF
26 26
27 27 $ linkcp()
28 28 > {
29 29 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
30 30 > }
31 31
32 32 Prepare repo r1:
33 33
34 34 $ hg init r1
35 35 $ cd r1
36 36
37 37 $ echo c1 > f1
38 38 $ hg add f1
39 39 $ hg ci -m0
40 40
41 41 $ mkdir d1
42 42 $ cd d1
43 43 $ echo c2 > f2
44 44 $ hg add f2
45 45 $ hg ci -m1
46 46 $ cd ../..
47 47
48 48 $ nlinksdir r1/.hg/store
49 49 1 r1/.hg/store/00changelog.i
50 50 1 r1/.hg/store/00manifest.i
51 51 1 r1/.hg/store/data/d1/f2.i
52 52 1 r1/.hg/store/data/f1.i
53 53 1 r1/.hg/store/fncache (repofncache !)
54 54 1 r1/.hg/store/phaseroots
55 55 1 r1/.hg/store/undo
56 56 1 r1/.hg/store/undo.backup.fncache (repofncache !)
57 57 1 r1/.hg/store/undo.backupfiles
58 58 1 r1/.hg/store/undo.phaseroots
59 59
60 60
61 61 Create hardlinked clone r2:
62 62
63 63 $ hg clone -U --debug r1 r2 --config progress.debug=true
64 64 linking: 1 files
65 65 linking: 2 files
66 66 linking: 3 files
67 67 linking: 4 files
68 68 linking: 5 files
69 69 linking: 6 files
70 70 linking: 7 files
71 71 linked 7 files
72 72 updating the branch cache
73 73
74 74 Create non-hardlinked clone r3:
75 75
76 76 $ hg clone --pull r1 r3
77 77 requesting all changes
78 78 adding changesets
79 79 adding manifests
80 80 adding file changes
81 81 added 2 changesets with 2 changes to 2 files
82 82 new changesets 40d85e9847f2:7069c422939c
83 83 updating to branch default
84 84 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 85
86 86
87 87 Repos r1 and r2 should now contain hardlinked files:
88 88
89 89 $ nlinksdir r1/.hg/store
90 90 2 r1/.hg/store/00changelog.i
91 91 2 r1/.hg/store/00manifest.i
92 92 2 r1/.hg/store/data/d1/f2.i
93 93 2 r1/.hg/store/data/f1.i
94 94 2 r1/.hg/store/fncache (repofncache !)
95 95 1 r1/.hg/store/phaseroots
96 96 1 r1/.hg/store/undo
97 97 1 r1/.hg/store/undo.backup.fncache (repofncache !)
98 98 1 r1/.hg/store/undo.backupfiles
99 99 1 r1/.hg/store/undo.phaseroots
100 100
101 101 $ nlinksdir r2/.hg/store
102 102 2 r2/.hg/store/00changelog.i
103 103 2 r2/.hg/store/00manifest.i
104 104 2 r2/.hg/store/data/d1/f2.i
105 105 2 r2/.hg/store/data/f1.i
106 106 2 r2/.hg/store/fncache (repofncache !)
107 107
108 108 Repo r3 should not be hardlinked:
109 109
110 110 $ nlinksdir r3/.hg/store
111 111 1 r3/.hg/store/00changelog.i
112 112 1 r3/.hg/store/00manifest.i
113 113 1 r3/.hg/store/data/d1/f2.i
114 114 1 r3/.hg/store/data/f1.i
115 115 1 r3/.hg/store/fncache (repofncache !)
116 116 1 r3/.hg/store/phaseroots
117 117 1 r3/.hg/store/undo
118 118 1 r3/.hg/store/undo.backupfiles
119 119 1 r3/.hg/store/undo.phaseroots
120 120
121 121
122 122 Create a non-inlined filelog in r3:
123 123
124 124 $ cd r3/d1
125 125 >>> f = open('data1', 'wb')
126 126 >>> for x in range(10000):
127 127 ... f.write(b"%d\n" % x) and None
128 128 >>> f.close()
129 129 $ for j in 0 1 2 3 4 5 6 7 8 9; do
130 130 > cat data1 >> f2
131 131 > hg commit -m$j
132 132 > done
133 133 $ cd ../..
134 134
135 135 $ nlinksdir r3/.hg/store
136 136 1 r3/.hg/store/00changelog.i
137 137 1 r3/.hg/store/00manifest.i
138 138 1 r3/.hg/store/data/d1/f2.d
139 139 1 r3/.hg/store/data/d1/f2.i
140 140 1 r3/.hg/store/data/f1.i
141 141 1 r3/.hg/store/fncache (repofncache !)
142 142 1 r3/.hg/store/phaseroots
143 143 1 r3/.hg/store/undo
144 144 1 r3/.hg/store/undo.backup.fncache (repofncache !)
145 145 1 r3/.hg/store/undo.backup.phaseroots
146 146 1 r3/.hg/store/undo.backupfiles
147 147 1 r3/.hg/store/undo.phaseroots
148 148
149 149 Push to repo r1 should break up most hardlinks in r2:
150 150
151 151 $ hg -R r2 verify
152 152 checking changesets
153 153 checking manifests
154 154 crosschecking files in changesets and manifests
155 155 checking files
156 156 checked 2 changesets with 2 changes to 2 files
157 157
158 158 $ cd r3
159 159 $ hg push
160 160 pushing to $TESTTMP/r1
161 161 searching for changes
162 162 adding changesets
163 163 adding manifests
164 164 adding file changes
165 165 added 10 changesets with 10 changes to 1 files
166 166
167 167 $ cd ..
168 168
169 169 $ nlinksdir r2/.hg/store
170 170 1 r2/.hg/store/00changelog.i
171 171 1 r2/.hg/store/00manifest.i
172 172 1 r2/.hg/store/data/d1/f2.i
173 173 2 r2/.hg/store/data/f1.i
174 174 [12] r2/\.hg/store/fncache (re) (repofncache !)
175 175
176 176 #if hardlink-whitelisted repofncache
177 177 $ nlinksdir r2/.hg/store/fncache
178 178 2 r2/.hg/store/fncache
179 179 #endif
180 180
181 181 $ hg -R r2 verify
182 182 checking changesets
183 183 checking manifests
184 184 crosschecking files in changesets and manifests
185 185 checking files
186 186 checked 2 changesets with 2 changes to 2 files
187 187
188 188
189 189 $ cd r1
190 190 $ hg up
191 191 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
192 192
193 193 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
194 194
195 195 $ echo c1c1 >> f1
196 196 $ hg ci -m00
197 197 $ cd ..
198 198
199 199 $ nlinksdir r2/.hg/store
200 200 1 r2/.hg/store/00changelog.i
201 201 1 r2/.hg/store/00manifest.i
202 202 1 r2/.hg/store/data/d1/f2.i
203 203 1 r2/.hg/store/data/f1.i
204 204 [12] r2/\.hg/store/fncache (re) (repofncache !)
205 205
206 206 #if hardlink-whitelisted repofncache
207 207 $ nlinksdir r2/.hg/store/fncache
208 208 2 r2/.hg/store/fncache
209 209 #endif
210 210
211 211 Create a file which exec permissions we will change
212 212 $ cd r3
213 213 $ echo "echo hello world" > f3
214 214 $ hg add f3
215 215 $ hg ci -mf3
216 216 $ cd ..
217 217
218 218 $ cd r3
219 219 $ hg tip --template '{rev}:{node|short}\n'
220 220 12:d3b77733a28a
221 221 $ echo bla > f1
222 222 $ chmod +x f3
223 223 $ hg ci -m1
224 224 $ cd ..
225 225
226 226 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
227 227
228 228 $ linkcp r3 r4
229 229
230 230 'checklink' is produced by hardlinking a symlink, which is undefined whether
231 231 the symlink should be followed or not. It does behave differently on Linux and
232 232 BSD. Just remove it so the test pass on both platforms.
233 233
234 234 $ rm -f r4/.hg/wcache/checklink
235 235
236 236 r4 has hardlinks in the working dir (not just inside .hg):
237 237
238 238 $ nlinksdir r4
239 239 2 r4/.hg/00changelog.i
240 240 2 r4/.hg/branch
241 241 2 r4/.hg/cache/branch2-base
242 242 2 r4/.hg/cache/branch2-immutable
243 243 2 r4/.hg/cache/branch2-served
244 244 2 r4/.hg/cache/branch2-served.hidden
245 245 2 r4/.hg/cache/branch2-visible
246 246 2 r4/.hg/cache/branch2-visible-hidden
247 2 r4/.hg/cache/hgtagsfnodes1
248 247 2 r4/.hg/cache/rbc-names-v1
249 248 2 r4/.hg/cache/rbc-revs-v1
250 249 2 r4/.hg/cache/tags2
251 250 2 r4/.hg/cache/tags2-served
252 251 2 r4/.hg/dirstate
253 252 2 r4/.hg/fsmonitor.state (fsmonitor !)
254 253 2 r4/.hg/hgrc
255 254 2 r4/.hg/last-message.txt
256 255 2 r4/.hg/requires
257 256 2 r4/.hg/store/00changelog.i
258 257 2 r4/.hg/store/00manifest.i
259 258 2 r4/.hg/store/data/d1/f2.d
260 259 2 r4/.hg/store/data/d1/f2.i
261 260 2 r4/.hg/store/data/f1.i
262 261 2 r4/.hg/store/data/f3.i
263 262 2 r4/.hg/store/fncache (repofncache !)
264 263 2 r4/.hg/store/phaseroots
265 264 2 r4/.hg/store/undo
266 265 2 r4/.hg/store/undo.backup.fncache (repofncache !)
267 266 2 r4/.hg/store/undo.backup.phaseroots
268 267 2 r4/.hg/store/undo.backupfiles
269 268 2 r4/.hg/store/undo.phaseroots
270 269 [24] r4/\.hg/undo\.backup\.dirstate (re)
271 270 2 r4/.hg/undo.bookmarks
272 271 2 r4/.hg/undo.branch
273 272 2 r4/.hg/undo.desc
274 273 [24] r4/\.hg/undo\.dirstate (re)
275 274 2 r4/.hg/wcache/checkisexec (execbit !)
276 275 2 r4/.hg/wcache/checklink-target (symlink !)
277 276 2 r4/.hg/wcache/checknoexec (execbit !)
278 277 2 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
279 278 2 r4/d1/data1
280 279 2 r4/d1/f2
281 280 2 r4/f1
282 281 2 r4/f3
283 282
284 283 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
285 284 #if hardlink-whitelisted
286 285 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
287 286 4 r4/.hg/undo.backup.dirstate
288 287 4 r4/.hg/undo.dirstate
289 288 #endif
290 289
291 290
292 291 $ hg -R r4 up 12
293 292 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
294 293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
295 294
296 295 $ nlinksdir r4
297 296 2 r4/.hg/00changelog.i
298 297 1 r4/.hg/branch
299 298 2 r4/.hg/cache/branch2-base
300 299 2 r4/.hg/cache/branch2-immutable
301 300 2 r4/.hg/cache/branch2-served
302 301 2 r4/.hg/cache/branch2-served.hidden
303 302 2 r4/.hg/cache/branch2-visible
304 303 2 r4/.hg/cache/branch2-visible-hidden
305 2 r4/.hg/cache/hgtagsfnodes1
306 304 2 r4/.hg/cache/rbc-names-v1
307 305 2 r4/.hg/cache/rbc-revs-v1
308 306 2 r4/.hg/cache/tags2
309 307 2 r4/.hg/cache/tags2-served
310 308 1 r4/.hg/dirstate
311 309 1 r4/.hg/fsmonitor.state (fsmonitor !)
312 310 2 r4/.hg/hgrc
313 311 2 r4/.hg/last-message.txt
314 312 2 r4/.hg/requires
315 313 2 r4/.hg/store/00changelog.i
316 314 2 r4/.hg/store/00manifest.i
317 315 2 r4/.hg/store/data/d1/f2.d
318 316 2 r4/.hg/store/data/d1/f2.i
319 317 2 r4/.hg/store/data/f1.i
320 318 2 r4/.hg/store/data/f3.i
321 319 2 r4/.hg/store/fncache
322 320 2 r4/.hg/store/phaseroots
323 321 2 r4/.hg/store/undo
324 322 2 r4/.hg/store/undo.backup.fncache (repofncache !)
325 323 2 r4/.hg/store/undo.backup.phaseroots
326 324 2 r4/.hg/store/undo.backupfiles
327 325 2 r4/.hg/store/undo.phaseroots
328 326 [24] r4/\.hg/undo\.backup\.dirstate (re)
329 327 2 r4/.hg/undo.bookmarks
330 328 2 r4/.hg/undo.branch
331 329 2 r4/.hg/undo.desc
332 330 [24] r4/\.hg/undo\.dirstate (re)
333 331 2 r4/.hg/wcache/checkisexec (execbit !)
334 332 2 r4/.hg/wcache/checklink-target (symlink !)
335 333 2 r4/.hg/wcache/checknoexec (execbit !)
336 334 1 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
337 335 2 r4/d1/data1
338 336 2 r4/d1/f2
339 337 1 r4/f1
340 338 1 r4/f3 (execbit !)
341 339 2 r4/f3 (no-execbit !)
342 340
343 341 #if hardlink-whitelisted
344 342 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
345 343 4 r4/.hg/undo.backup.dirstate
346 344 4 r4/.hg/undo.dirstate
347 345 #endif
348 346
349 347 Test hardlinking outside hg:
350 348
351 349 $ mkdir x
352 350 $ echo foo > x/a
353 351
354 352 $ linkcp x y
355 353 $ echo bar >> y/a
356 354
357 355 No diff if hardlink:
358 356
359 357 $ diff x/a y/a
360 358
361 359 Test mq hardlinking:
362 360
363 361 $ echo "[extensions]" >> $HGRCPATH
364 362 $ echo "mq=" >> $HGRCPATH
365 363
366 364 $ hg init a
367 365 $ cd a
368 366
369 367 $ hg qimport -n foo - << EOF
370 368 > # HG changeset patch
371 369 > # Date 1 0
372 370 > diff -r 2588a8b53d66 a
373 371 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
374 372 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
375 373 > @@ -0,0 +1,1 @@
376 374 > +a
377 375 > EOF
378 376 adding foo to series file
379 377
380 378 $ hg qpush
381 379 applying foo
382 380 now at: foo
383 381
384 382 $ cd ..
385 383 $ linkcp a b
386 384 $ cd b
387 385
388 386 $ hg qimport -n bar - << EOF
389 387 > # HG changeset patch
390 388 > # Date 2 0
391 389 > diff -r 2588a8b53d66 a
392 390 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
393 391 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
394 392 > @@ -0,0 +1,1 @@
395 393 > +b
396 394 > EOF
397 395 adding bar to series file
398 396
399 397 $ hg qpush
400 398 applying bar
401 399 now at: bar
402 400
403 401 $ cat .hg/patches/status
404 402 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
405 403 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
406 404
407 405 $ cat .hg/patches/series
408 406 foo
409 407 bar
410 408
411 409 $ cat ../a/.hg/patches/status
412 410 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
413 411
414 412 $ cat ../a/.hg/patches/series
415 413 foo
416 414
417 415 Test tags hardlinking:
418 416
419 417 $ hg qdel -r qbase:qtip
420 418 patch foo finalized without changeset message
421 419 patch bar finalized without changeset message
422 420
423 421 $ hg tag -l lfoo
424 422 $ hg tag foo
425 423
426 424 $ cd ..
427 425 $ linkcp b c
428 426 $ cd c
429 427
430 428 $ hg tag -l -r 0 lbar
431 429 $ hg tag -r 0 bar
432 430
433 431 $ cat .hgtags
434 432 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
435 433 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
436 434
437 435 $ cat .hg/localtags
438 436 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
439 437 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
440 438
441 439 $ cat ../b/.hgtags
442 440 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
443 441
444 442 $ cat ../b/.hg/localtags
445 443 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
446 444
447 445 $ cd ..
@@ -1,287 +1,286 b''
1 1 #testcases safe normal
2 2
3 3 #if safe
4 4 $ echo "[format]" >> $HGRCPATH
5 5 $ echo "use-share-safe = True" >> $HGRCPATH
6 6 #endif
7 7
8 8 $ echo "[extensions]" >> $HGRCPATH
9 9 $ echo "share = " >> $HGRCPATH
10 10
11 11 prepare repo1
12 12
13 13 $ hg init repo1
14 14 $ cd repo1
15 15 $ echo a > a
16 16 $ hg commit -A -m'init'
17 17 adding a
18 18
19 19 share it
20 20
21 21 $ cd ..
22 22 $ hg share repo1 repo2
23 23 updating working directory
24 24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
25 25
26 26 share shouldn't have a store dir
27 27
28 28 $ cd repo2
29 29 $ test -d .hg/store
30 30 [1]
31 31 $ hg root -Tjson | sed 's|\\\\|\\|g'
32 32 [
33 33 {
34 34 "hgpath": "$TESTTMP/repo2/.hg",
35 35 "reporoot": "$TESTTMP/repo2",
36 36 "storepath": "$TESTTMP/repo1/.hg/store"
37 37 }
38 38 ]
39 39
40 40 share shouldn't have a full cache dir, original repo should
41 41
42 42 $ hg branches
43 43 default 0:d3873e73d99e
44 44 $ hg tags
45 45 tip 0:d3873e73d99e
46 46 $ test -d .hg/cache
47 47 [1]
48 48 $ ls -1 .hg/wcache || true
49 49 checkisexec (execbit !)
50 50 checklink (symlink !)
51 51 checklink-target (symlink !)
52 52 manifestfulltextcache (reporevlogstore !)
53 53 $ ls -1 ../repo1/.hg/cache
54 54 branch2-served
55 55 rbc-names-v1
56 56 rbc-revs-v1
57 57 tags2-visible
58 58
59 59 Cloning a shared repo should pick up the full cache dir on the other hand.
60 60
61 61 $ hg clone . ../repo2-clone
62 62 updating to branch default
63 63 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
64 64 $ ls -1 ../repo2-clone/.hg/cache
65 65 branch2-base
66 66 branch2-immutable
67 67 branch2-served
68 68 branch2-served.hidden
69 69 branch2-visible
70 70 branch2-visible-hidden
71 hgtagsfnodes1
72 71 rbc-names-v1
73 72 rbc-revs-v1
74 73 tags2
75 74 tags2-served
76 75 tags2-visible
77 76
78 77 Some sed versions appends newline, some don't, and some just fails
79 78
80 79 $ cat .hg/sharedpath; echo
81 80 $TESTTMP/repo1/.hg
82 81
83 82 trailing newline on .hg/sharedpath is ok
84 83 $ hg tip -q
85 84 0:d3873e73d99e
86 85 $ echo '' >> .hg/sharedpath
87 86 $ cat .hg/sharedpath
88 87 $TESTTMP/repo1/.hg
89 88 $ hg tip -q
90 89 0:d3873e73d99e
91 90
92 91 commit in shared clone
93 92
94 93 $ echo a >> a
95 94 $ hg commit -m'change in shared clone'
96 95
97 96 check original
98 97
99 98 $ cd ../repo1
100 99 $ hg log
101 100 changeset: 1:8af4dc49db9e
102 101 tag: tip
103 102 user: test
104 103 date: Thu Jan 01 00:00:00 1970 +0000
105 104 summary: change in shared clone
106 105
107 106 changeset: 0:d3873e73d99e
108 107 user: test
109 108 date: Thu Jan 01 00:00:00 1970 +0000
110 109 summary: init
111 110
112 111 $ hg update
113 112 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 113 $ cat a # should be two lines of "a"
115 114 a
116 115 a
117 116
118 117 commit in original
119 118
120 119 $ echo b > b
121 120 $ hg commit -A -m'another file'
122 121 adding b
123 122
124 123 check in shared clone
125 124
126 125 $ cd ../repo2
127 126 $ hg log
128 127 changeset: 2:c2e0ac586386
129 128 tag: tip
130 129 user: test
131 130 date: Thu Jan 01 00:00:00 1970 +0000
132 131 summary: another file
133 132
134 133 changeset: 1:8af4dc49db9e
135 134 user: test
136 135 date: Thu Jan 01 00:00:00 1970 +0000
137 136 summary: change in shared clone
138 137
139 138 changeset: 0:d3873e73d99e
140 139 user: test
141 140 date: Thu Jan 01 00:00:00 1970 +0000
142 141 summary: init
143 142
144 143 $ hg update
145 144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 145 $ cat b # should exist with one "b"
147 146 b
148 147
149 148 hg serve shared clone
150 149
151 150 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid
152 151 $ cat hg.pid >> $DAEMON_PIDS
153 152 $ get-with-headers.py localhost:$HGPORT 'raw-file/'
154 153 200 Script output follows
155 154
156 155
157 156 -rw-r--r-- 4 a
158 157 -rw-r--r-- 2 b
159 158
160 159
161 160 Cloning a shared repo via bundle2 results in a non-shared clone
162 161
163 162 $ cd ..
164 163 $ hg clone -q --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
165 164 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
166 165 [1]
167 166 $ hg id --cwd cloned-via-bundle2 -r tip
168 167 c2e0ac586386 tip
169 168 $ cd repo2
170 169
171 170 test unshare command
172 171
173 172 $ hg unshare
174 173 $ test -d .hg/store
175 174 $ test -f .hg/sharedpath
176 175 [1]
177 176 $ grep shared .hg/requires
178 177 [1]
179 178 $ hg unshare
180 179 abort: this is not a shared repo
181 180 [255]
182 181
183 182 check that a change does not propagate
184 183
185 184 $ echo b >> b
186 185 $ hg commit -m'change in unshared'
187 186 $ cd ../repo1
188 187 $ hg id -r tip
189 188 c2e0ac586386 tip
190 189
191 190 $ cd ..
192 191
193 192
194 193 non largefiles repos won't enable largefiles
195 194
196 195 $ hg share --config extensions.largefiles= repo2 sharedrepo
197 196 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
198 197 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
199 198 updating working directory
200 199 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 200 $ [ -f sharedrepo/.hg/hgrc ]
202 201 [1]
203 202
204 203 test shared clones using relative paths work
205 204
206 205 $ mkdir thisdir
207 206 $ hg init thisdir/orig
208 207 $ hg share -U thisdir/orig thisdir/abs
209 208 $ hg share -U --relative thisdir/abs thisdir/rel
210 209 $ cat thisdir/rel/.hg/sharedpath
211 210 ../../orig/.hg (no-eol)
212 211 $ grep shared thisdir/*/.hg/requires
213 212 thisdir/abs/.hg/requires:shared
214 213 thisdir/rel/.hg/requires:relshared
215 214 thisdir/rel/.hg/requires:shared
216 215
217 216 test that relative shared paths aren't relative to $PWD
218 217
219 218 $ cd thisdir
220 219 $ hg -R rel root
221 220 $TESTTMP/thisdir/rel
222 221 $ cd ..
223 222
224 223 now test that relative paths really are relative, survive across
225 224 renames and changes of PWD
226 225
227 226 $ hg -R thisdir/abs root
228 227 $TESTTMP/thisdir/abs
229 228 $ hg -R thisdir/rel root
230 229 $TESTTMP/thisdir/rel
231 230 $ mv thisdir thatdir
232 231 $ hg -R thatdir/abs root
233 232 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg
234 233 [255]
235 234 $ hg -R thatdir/rel root
236 235 $TESTTMP/thatdir/rel
237 236
238 237 test unshare relshared repo
239 238
240 239 $ cd thatdir/rel
241 240 $ hg unshare
242 241 $ test -d .hg/store
243 242 $ test -f .hg/sharedpath
244 243 [1]
245 244 $ grep shared .hg/requires
246 245 [1]
247 246 $ hg unshare
248 247 abort: this is not a shared repo
249 248 [255]
250 249 $ cd ../..
251 250
252 251 $ rm -r thatdir
253 252
254 253 Demonstrate buggy behavior around requirements validation
255 254 See comment in localrepo.py:makelocalrepository() for more.
256 255
257 256 $ hg init sharenewrequires
258 257 $ hg share sharenewrequires shareoldrequires
259 258 updating working directory
260 259 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
261 260
262 261 $ cat >> sharenewrequires/.hg/requires << EOF
263 262 > missing-requirement
264 263 > EOF
265 264
266 265 We cannot open the repo with the unknown requirement
267 266
268 267 $ hg -R sharenewrequires status
269 268 abort: repository requires features unknown to this Mercurial: missing-requirement
270 269 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
271 270 [255]
272 271
273 272 BUG: we don't get the same error when opening the shared repo pointing to it
274 273
275 274 $ hg -R shareoldrequires status
276 275
277 276 Explicitly kill daemons to let the test exit on Windows
278 277
279 278 $ killdaemons.py
280 279
281 280 Test sharing a repository which was created with store requirement disable
282 281
283 282 $ hg init nostore --config format.usestore=false
284 283 ignoring enabled 'format.use-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !)
285 284 $ hg share nostore sharednostore
286 285 abort: cannot create shared repository as source was created with 'format.usestore' config disabled
287 286 [255]
@@ -1,733 +1,733 b''
1 1 #testcases sshv1 sshv2
2 2
3 3 #if sshv2
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [experimental]
6 6 > sshpeer.advertise-v2 = true
7 7 > sshserver.support-v2 = true
8 8 > EOF
9 9 #endif
10 10
11 11 This test tries to exercise the ssh functionality with a dummy script
12 12
13 13 creating 'remote' repo
14 14
15 15 $ hg init remote
16 16 $ cd remote
17 17 $ echo this > foo
18 18 $ echo this > fooO
19 19 $ hg ci -A -m "init" foo fooO
20 20
21 21 insert a closed branch (issue4428)
22 22
23 23 $ hg up null
24 24 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
25 25 $ hg branch closed
26 26 marked working directory as branch closed
27 27 (branches are permanent and global, did you want a bookmark?)
28 28 $ hg ci -mc0
29 29 $ hg ci --close-branch -mc1
30 30 $ hg up -q default
31 31
32 32 configure for serving
33 33
34 34 $ cat <<EOF > .hg/hgrc
35 35 > [server]
36 36 > uncompressed = True
37 37 >
38 38 > [hooks]
39 39 > changegroup = sh -c "printenv.py --line changegroup-in-remote 0 ../dummylog"
40 40 > EOF
41 41 $ cd $TESTTMP
42 42
43 43 repo not found error
44 44
45 45 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
46 46 remote: abort: repository nonexistent not found
47 47 abort: no suitable response from remote hg
48 48 [255]
49 49 $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
50 50 remote: abort: repository nonexistent not found
51 51 abort: no suitable response from remote hg
52 52 [255]
53 53
54 54 non-existent absolute path
55 55
56 56 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/nonexistent local
57 57 remote: abort: repository $TESTTMP/nonexistent not found
58 58 abort: no suitable response from remote hg
59 59 [255]
60 60
61 61 clone remote via stream
62 62
63 63 #if no-reposimplestore
64 64
65 65 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
66 66 streaming all changes
67 67 8 files to transfer, 827 bytes of data (no-zstd !)
68 68 transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
69 69 8 files to transfer, 846 bytes of data (zstd !)
70 70 transferred * bytes in * seconds (* */sec) (glob) (zstd !)
71 71 updating to branch default
72 72 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
73 73 $ cd local-stream
74 74 $ hg verify
75 75 checking changesets
76 76 checking manifests
77 77 crosschecking files in changesets and manifests
78 78 checking files
79 79 checked 3 changesets with 2 changes to 2 files
80 80 $ hg branches
81 81 default 0:1160648e36ce
82 82 $ cd $TESTTMP
83 83
84 84 clone bookmarks via stream
85 85
86 86 $ hg -R local-stream book mybook
87 87 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
88 88 streaming all changes
89 16 files to transfer, * of data (glob)
89 15 files to transfer, * of data (glob)
90 90 transferred * in * seconds (*) (glob)
91 91 updating to branch default
92 92 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
93 93 $ cd stream2
94 94 $ hg book
95 95 mybook 0:1160648e36ce
96 96 $ cd $TESTTMP
97 97 $ rm -rf local-stream stream2
98 98
99 99 #endif
100 100
101 101 clone remote via pull
102 102
103 103 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
104 104 requesting all changes
105 105 adding changesets
106 106 adding manifests
107 107 adding file changes
108 108 added 3 changesets with 2 changes to 2 files
109 109 new changesets 1160648e36ce:ad076bfb429d
110 110 updating to branch default
111 111 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 112
113 113 verify
114 114
115 115 $ cd local
116 116 $ hg verify
117 117 checking changesets
118 118 checking manifests
119 119 crosschecking files in changesets and manifests
120 120 checking files
121 121 checked 3 changesets with 2 changes to 2 files
122 122 $ cat >> .hg/hgrc <<EOF
123 123 > [hooks]
124 124 > changegroup = sh -c "printenv.py changegroup-in-local 0 ../dummylog"
125 125 > EOF
126 126
127 127 empty default pull
128 128
129 129 $ hg paths
130 130 default = ssh://user@dummy/remote
131 131 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
132 132 pulling from ssh://user@dummy/remote
133 133 searching for changes
134 134 no changes found
135 135
136 136 pull from wrong ssh URL
137 137
138 138 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
139 139 pulling from ssh://user@dummy/doesnotexist
140 140 remote: abort: repository doesnotexist not found
141 141 abort: no suitable response from remote hg
142 142 [255]
143 143
144 144 local change
145 145
146 146 $ echo bleah > foo
147 147 $ hg ci -m "add"
148 148
149 149 updating rc
150 150
151 151 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
152 152 $ echo "[ui]" >> .hg/hgrc
153 153 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
154 154
155 155 find outgoing
156 156
157 157 $ hg out ssh://user@dummy/remote
158 158 comparing with ssh://user@dummy/remote
159 159 searching for changes
160 160 changeset: 3:a28a9d1a809c
161 161 tag: tip
162 162 parent: 0:1160648e36ce
163 163 user: test
164 164 date: Thu Jan 01 00:00:00 1970 +0000
165 165 summary: add
166 166
167 167
168 168 find incoming on the remote side
169 169
170 170 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
171 171 comparing with ssh://user@dummy/local
172 172 searching for changes
173 173 changeset: 3:a28a9d1a809c
174 174 tag: tip
175 175 parent: 0:1160648e36ce
176 176 user: test
177 177 date: Thu Jan 01 00:00:00 1970 +0000
178 178 summary: add
179 179
180 180
181 181 find incoming on the remote side (using absolute path)
182 182
183 183 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
184 184 comparing with ssh://user@dummy/$TESTTMP/local
185 185 searching for changes
186 186 changeset: 3:a28a9d1a809c
187 187 tag: tip
188 188 parent: 0:1160648e36ce
189 189 user: test
190 190 date: Thu Jan 01 00:00:00 1970 +0000
191 191 summary: add
192 192
193 193
194 194 push
195 195
196 196 $ hg push
197 197 pushing to ssh://user@dummy/remote
198 198 searching for changes
199 199 remote: adding changesets
200 200 remote: adding manifests
201 201 remote: adding file changes
202 202 remote: added 1 changesets with 1 changes to 1 files
203 203 $ cd $TESTTMP/remote
204 204
205 205 check remote tip
206 206
207 207 $ hg tip
208 208 changeset: 3:a28a9d1a809c
209 209 tag: tip
210 210 parent: 0:1160648e36ce
211 211 user: test
212 212 date: Thu Jan 01 00:00:00 1970 +0000
213 213 summary: add
214 214
215 215 $ hg verify
216 216 checking changesets
217 217 checking manifests
218 218 crosschecking files in changesets and manifests
219 219 checking files
220 220 checked 4 changesets with 3 changes to 2 files
221 221 $ hg cat -r tip foo
222 222 bleah
223 223 $ echo z > z
224 224 $ hg ci -A -m z z
225 225 created new head
226 226
227 227 test pushkeys and bookmarks
228 228
229 229 $ cd $TESTTMP/local
230 230 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
231 231 bookmarks
232 232 namespaces
233 233 phases
234 234 $ hg book foo -r 0
235 235 $ hg out -B --config paths.default=bogus://invalid --config paths.default:pushurl=`hg paths default`
236 236 comparing with ssh://user@dummy/remote
237 237 searching for changed bookmarks
238 238 foo 1160648e36ce
239 239 $ hg push -B foo
240 240 pushing to ssh://user@dummy/remote
241 241 searching for changes
242 242 no changes found
243 243 exporting bookmark foo
244 244 [1]
245 245 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
246 246 foo 1160648e36cec0054048a7edc4110c6f84fde594
247 247 $ hg book -f foo
248 248 $ hg push --traceback
249 249 pushing to ssh://user@dummy/remote
250 250 searching for changes
251 251 no changes found
252 252 updating bookmark foo
253 253 [1]
254 254 $ hg book -d foo
255 255 $ hg in -B
256 256 comparing with ssh://user@dummy/remote
257 257 searching for changed bookmarks
258 258 foo a28a9d1a809c
259 259 $ hg book -f -r 0 foo
260 260 $ hg pull -B foo
261 261 pulling from ssh://user@dummy/remote
262 262 no changes found
263 263 updating bookmark foo
264 264 $ hg book -d foo
265 265 $ hg push -B foo
266 266 pushing to ssh://user@dummy/remote
267 267 searching for changes
268 268 no changes found
269 269 deleting remote bookmark foo
270 270 [1]
271 271
272 272 a bad, evil hook that prints to stdout
273 273
274 274 $ cat <<EOF > $TESTTMP/badhook
275 275 > import sys
276 276 > sys.stdout.write("KABOOM\n")
277 277 > sys.stdout.flush()
278 278 > EOF
279 279
280 280 $ cat <<EOF > $TESTTMP/badpyhook.py
281 281 > import sys
282 282 > def hook(ui, repo, hooktype, **kwargs):
283 283 > sys.stdout.write("KABOOM IN PROCESS\n")
284 284 > sys.stdout.flush()
285 285 > EOF
286 286
287 287 $ cat <<EOF >> ../remote/.hg/hgrc
288 288 > [hooks]
289 289 > changegroup.stdout = "$PYTHON" $TESTTMP/badhook
290 290 > changegroup.pystdout = python:$TESTTMP/badpyhook.py:hook
291 291 > EOF
292 292 $ echo r > r
293 293 $ hg ci -A -m z r
294 294
295 295 push should succeed even though it has an unexpected response
296 296
297 297 $ hg push
298 298 pushing to ssh://user@dummy/remote
299 299 searching for changes
300 300 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
301 301 remote: adding changesets
302 302 remote: adding manifests
303 303 remote: adding file changes
304 304 remote: added 1 changesets with 1 changes to 1 files
305 305 remote: KABOOM
306 306 remote: KABOOM IN PROCESS
307 307 $ hg -R ../remote heads
308 308 changeset: 5:1383141674ec
309 309 tag: tip
310 310 parent: 3:a28a9d1a809c
311 311 user: test
312 312 date: Thu Jan 01 00:00:00 1970 +0000
313 313 summary: z
314 314
315 315 changeset: 4:6c0482d977a3
316 316 parent: 0:1160648e36ce
317 317 user: test
318 318 date: Thu Jan 01 00:00:00 1970 +0000
319 319 summary: z
320 320
321 321
322 322 #if chg
323 323
324 324 try again with remote chg, which should succeed as well
325 325
326 326 $ hg rollback -R ../remote
327 327 repository tip rolled back to revision 4 (undo serve)
328 328
329 329 $ hg push --config ui.remotecmd=chg
330 330 pushing to ssh://user@dummy/remote
331 331 searching for changes
332 332 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
333 333 remote: adding changesets
334 334 remote: adding manifests
335 335 remote: adding file changes
336 336 remote: added 1 changesets with 1 changes to 1 files (py3 !)
337 337 remote: KABOOM
338 338 remote: KABOOM IN PROCESS
339 339 remote: added 1 changesets with 1 changes to 1 files (no-py3 !)
340 340
341 341 #endif
342 342
343 343 clone bookmarks
344 344
345 345 $ hg -R ../remote bookmark test
346 346 $ hg -R ../remote bookmarks
347 347 * test 4:6c0482d977a3
348 348 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
349 349 requesting all changes
350 350 adding changesets
351 351 adding manifests
352 352 adding file changes
353 353 added 6 changesets with 5 changes to 4 files (+1 heads)
354 354 new changesets 1160648e36ce:1383141674ec
355 355 updating to branch default
356 356 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
357 357 $ hg -R local-bookmarks bookmarks
358 358 test 4:6c0482d977a3
359 359
360 360 passwords in ssh urls are not supported
361 361 (we use a glob here because different Python versions give different
362 362 results here)
363 363
364 364 $ hg push ssh://user:erroneouspwd@dummy/remote
365 365 pushing to ssh://user:*@dummy/remote (glob)
366 366 abort: password in URL not supported
367 367 [255]
368 368
369 369 $ cd $TESTTMP
370 370
371 371 hide outer repo
372 372 $ hg init
373 373
374 374 Test remote paths with spaces (issue2983):
375 375
376 376 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
377 377 $ touch "$TESTTMP/a repo/test"
378 378 $ hg -R 'a repo' commit -A -m "test"
379 379 adding test
380 380 $ hg -R 'a repo' tag tag
381 381 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
382 382 73649e48688a
383 383
384 384 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
385 385 abort: unknown revision 'noNoNO'
386 386 [255]
387 387
388 388 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
389 389
390 390 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
391 391 destination directory: a repo
392 392 abort: destination 'a repo' is not empty
393 393 [10]
394 394
395 395 #if no-rhg
396 396 Make sure hg is really paranoid in serve --stdio mode. It used to be
397 397 possible to get a debugger REPL by specifying a repo named --debugger.
398 398 $ hg -R --debugger serve --stdio
399 399 abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio']
400 400 [255]
401 401 $ hg -R --config=ui.debugger=yes serve --stdio
402 402 abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio']
403 403 [255]
404 404 Abbreviations of 'serve' also don't work, to avoid shenanigans.
405 405 $ hg -R narf serv --stdio
406 406 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
407 407 [255]
408 408 #else
409 409 rhg aborts early on -R without a repository at that path
410 410 $ hg -R --debugger serve --stdio
411 411 abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio'] (missing-correct-output !)
412 412 abort: repository --debugger not found (known-bad-output !)
413 413 [255]
414 414 $ hg -R --config=ui.debugger=yes serve --stdio
415 415 abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio'] (missing-correct-output !)
416 416 abort: repository --config=ui.debugger=yes not found (known-bad-output !)
417 417 [255]
418 418 $ hg -R narf serv --stdio
419 419 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio'] (missing-correct-output !)
420 420 abort: repository narf not found (known-bad-output !)
421 421 [255]
422 422 If the repo does exist, rhg finds an unsupported command and falls back to Python
423 423 which still does the right thing
424 424 $ hg init narf
425 425 $ hg -R narf serv --stdio
426 426 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
427 427 [255]
428 428 #endif
429 429
430 430 Test hg-ssh using a helper script that will restore PYTHONPATH (which might
431 431 have been cleared by a hg.exe wrapper) and invoke hg-ssh with the right
432 432 parameters:
433 433
434 434 $ cat > ssh.sh << EOF
435 435 > userhost="\$1"
436 436 > SSH_ORIGINAL_COMMAND="\$2"
437 437 > export SSH_ORIGINAL_COMMAND
438 438 > PYTHONPATH="$PYTHONPATH"
439 439 > export PYTHONPATH
440 440 > "$PYTHON" "$TESTDIR/../contrib/hg-ssh" "$TESTTMP/a repo"
441 441 > EOF
442 442
443 443 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a repo"
444 444 73649e48688a
445 445
446 446 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a'repo"
447 447 remote: Illegal repository "$TESTTMP/a'repo"
448 448 abort: no suitable response from remote hg
449 449 [255]
450 450
451 451 $ hg id --ssh "sh ssh.sh" --remotecmd hacking "ssh://user@dummy/a'repo"
452 452 remote: Illegal command "hacking -R 'a'\''repo' serve --stdio"
453 453 abort: no suitable response from remote hg
454 454 [255]
455 455
456 456 $ SSH_ORIGINAL_COMMAND="'hg' -R 'a'repo' serve --stdio" "$PYTHON" "$TESTDIR/../contrib/hg-ssh"
457 457 Illegal command "'hg' -R 'a'repo' serve --stdio": No closing quotation
458 458 [255]
459 459
460 460 Test hg-ssh in read-only mode:
461 461
462 462 $ cat > ssh.sh << EOF
463 463 > userhost="\$1"
464 464 > SSH_ORIGINAL_COMMAND="\$2"
465 465 > export SSH_ORIGINAL_COMMAND
466 466 > PYTHONPATH="$PYTHONPATH"
467 467 > export PYTHONPATH
468 468 > "$PYTHON" "$TESTDIR/../contrib/hg-ssh" --read-only "$TESTTMP/remote"
469 469 > EOF
470 470
471 471 $ hg clone --ssh "sh ssh.sh" "ssh://user@dummy/$TESTTMP/remote" read-only-local
472 472 requesting all changes
473 473 adding changesets
474 474 adding manifests
475 475 adding file changes
476 476 added 6 changesets with 5 changes to 4 files (+1 heads)
477 477 new changesets 1160648e36ce:1383141674ec
478 478 updating to branch default
479 479 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
480 480
481 481 $ cd read-only-local
482 482 $ echo "baz" > bar
483 483 $ hg ci -A -m "unpushable commit" bar
484 484 $ hg push --ssh "sh ../ssh.sh"
485 485 pushing to ssh://user@dummy/*/remote (glob)
486 486 searching for changes
487 487 remote: Permission denied
488 488 remote: pretxnopen.hg-ssh hook failed
489 489 abort: push failed on remote
490 490 [100]
491 491
492 492 $ cd $TESTTMP
493 493
494 494 stderr from remote commands should be printed before stdout from local code (issue4336)
495 495
496 496 $ hg clone remote stderr-ordering
497 497 updating to branch default
498 498 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
499 499 $ cd stderr-ordering
500 500 $ cat >> localwrite.py << EOF
501 501 > from mercurial import exchange, extensions
502 502 >
503 503 > def wrappedpush(orig, repo, *args, **kwargs):
504 504 > res = orig(repo, *args, **kwargs)
505 505 > repo.ui.write(b'local stdout\n')
506 506 > repo.ui.flush()
507 507 > return res
508 508 >
509 509 > def extsetup(ui):
510 510 > extensions.wrapfunction(exchange, b'push', wrappedpush)
511 511 > EOF
512 512
513 513 $ cat >> .hg/hgrc << EOF
514 514 > [paths]
515 515 > default-push = ssh://user@dummy/remote
516 516 > [ui]
517 517 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
518 518 > [extensions]
519 519 > localwrite = localwrite.py
520 520 > EOF
521 521
522 522 $ echo localwrite > foo
523 523 $ hg commit -m 'testing localwrite'
524 524 $ hg push
525 525 pushing to ssh://user@dummy/remote
526 526 searching for changes
527 527 remote: adding changesets
528 528 remote: adding manifests
529 529 remote: adding file changes
530 530 remote: added 1 changesets with 1 changes to 1 files
531 531 remote: KABOOM
532 532 remote: KABOOM IN PROCESS
533 533 local stdout
534 534
535 535 debug output
536 536
537 537 $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
538 538 pulling from ssh://user@dummy/remote
539 539 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
540 540 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
541 541 devel-peer-request: hello+between
542 542 devel-peer-request: pairs: 81 bytes
543 543 sending hello command
544 544 sending between command
545 545 remote: 444 (sshv1 no-rust !)
546 546 remote: 463 (sshv1 rust !)
547 547 protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
548 548 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
549 549 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
550 550 remote: 1 (sshv1 !)
551 551 devel-peer-request: protocaps
552 552 devel-peer-request: caps: * bytes (glob)
553 553 sending protocaps command
554 554 query 1; heads
555 555 devel-peer-request: batched-content
556 556 devel-peer-request: - heads (0 arguments)
557 557 devel-peer-request: - known (1 arguments)
558 558 devel-peer-request: batch
559 559 devel-peer-request: cmds: 141 bytes
560 560 sending batch command
561 561 searching for changes
562 562 all remote heads known locally
563 563 no changes found
564 564 devel-peer-request: getbundle
565 565 devel-peer-request: bookmarks: 1 bytes
566 566 devel-peer-request: bundlecaps: 270 bytes
567 567 devel-peer-request: cg: 1 bytes
568 568 devel-peer-request: common: 122 bytes
569 569 devel-peer-request: heads: 122 bytes
570 570 devel-peer-request: listkeys: 9 bytes
571 571 devel-peer-request: phases: 1 bytes
572 572 sending getbundle command
573 573 bundle2-input-bundle: with-transaction
574 574 bundle2-input-part: "bookmarks" supported
575 575 bundle2-input-part: total payload size 26
576 576 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
577 577 bundle2-input-part: total payload size 45
578 578 bundle2-input-part: "phase-heads" supported
579 579 bundle2-input-part: total payload size 72
580 580 bundle2-input-bundle: 3 parts total
581 581 checking for updated bookmarks
582 582
583 583 $ cd $TESTTMP
584 584
585 585 $ cat dummylog
586 586 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
587 587 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
588 588 Got arguments 1:user@dummy 2:hg -R $TESTTMP/nonexistent serve --stdio
589 589 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
590 590 Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !)
591 591 Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
592 592 Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
593 593 Got arguments 1:user@dummy 2:hg -R doesnotexist serve --stdio
594 594 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
595 595 Got arguments 1:user@dummy 2:hg -R local serve --stdio
596 596 Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
597 597 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
598 598 changegroup-in-remote hook: HG_BUNDLE2=1
599 599 HG_HOOKNAME=changegroup
600 600 HG_HOOKTYPE=changegroup
601 601 HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60
602 602 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60
603 603 HG_SOURCE=serve
604 604 HG_TXNID=TXN:$ID$
605 605 HG_TXNNAME=serve
606 606 HG_URL=remote:ssh:$LOCALIP
607 607
608 608 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
609 609 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
610 610 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
611 611 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
612 612 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
613 613 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
614 614 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
615 615 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
616 616 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
617 617 changegroup-in-remote hook: HG_BUNDLE2=1
618 618 HG_HOOKNAME=changegroup
619 619 HG_HOOKTYPE=changegroup
620 620 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6
621 621 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6
622 622 HG_SOURCE=serve
623 623 HG_TXNID=TXN:$ID$
624 624 HG_TXNNAME=serve
625 625 HG_URL=remote:ssh:$LOCALIP
626 626
627 627 Got arguments 1:user@dummy 2:chg -R remote serve --stdio (chg !)
628 628 changegroup-in-remote hook: HG_BUNDLE2=1 (chg !)
629 629 HG_HOOKNAME=changegroup (chg !)
630 630 HG_HOOKTYPE=changegroup (chg !)
631 631 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 (chg !)
632 632 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 (chg !)
633 633 HG_SOURCE=serve (chg !)
634 634 HG_TXNID=TXN:$ID$ (chg !)
635 635 HG_TXNNAME=serve (chg !)
636 636 HG_URL=remote:ssh:$LOCALIP (chg !)
637 637 (chg !)
638 638 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
639 639 Got arguments 1:user@dummy 2:hg init 'a repo'
640 640 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
641 641 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
642 642 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
643 643 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
644 644 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
645 645 changegroup-in-remote hook: HG_BUNDLE2=1
646 646 HG_HOOKNAME=changegroup
647 647 HG_HOOKTYPE=changegroup
648 648 HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8
649 649 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8
650 650 HG_SOURCE=serve
651 651 HG_TXNID=TXN:$ID$
652 652 HG_TXNNAME=serve
653 653 HG_URL=remote:ssh:$LOCALIP
654 654
655 655 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
656 656
657 657
658 658 remote hook failure is attributed to remote
659 659
660 660 $ cat > $TESTTMP/failhook << EOF
661 661 > def hook(ui, repo, **kwargs):
662 662 > ui.write(b'hook failure!\n')
663 663 > ui.flush()
664 664 > return 1
665 665 > EOF
666 666
667 667 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
668 668
669 669 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
670 670 $ cd hookout
671 671 $ touch hookfailure
672 672 $ hg -q commit -A -m 'remote hook failure'
673 673 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
674 674 pushing to ssh://user@dummy/remote
675 675 searching for changes
676 676 remote: adding changesets
677 677 remote: adding manifests
678 678 remote: adding file changes
679 679 remote: hook failure!
680 680 remote: transaction abort!
681 681 remote: rollback completed
682 682 remote: pretxnchangegroup.fail hook failed
683 683 abort: push failed on remote
684 684 [100]
685 685
686 686 abort during pull is properly reported as such
687 687
688 688 $ echo morefoo >> ../remote/foo
689 689 $ hg -R ../remote commit --message "more foo to be pulled"
690 690 $ cat >> ../remote/.hg/hgrc << EOF
691 691 > [extensions]
692 692 > crash = ${TESTDIR}/crashgetbundler.py
693 693 > EOF
694 694 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
695 695 pulling from ssh://user@dummy/remote
696 696 searching for changes
697 697 remote: abort: this is an exercise
698 698 abort: pull failed on remote
699 699 [100]
700 700
701 701 abort with no error hint when there is a ssh problem when pulling
702 702
703 703 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
704 704 pulling from ssh://brokenrepository/
705 705 abort: no suitable response from remote hg
706 706 [255]
707 707
708 708 abort with configured error hint when there is a ssh problem when pulling
709 709
710 710 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
711 711 > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
712 712 pulling from ssh://brokenrepository/
713 713 abort: no suitable response from remote hg
714 714 (Please see http://company/internalwiki/ssh.html)
715 715 [255]
716 716
717 717 test that custom environment is passed down to ssh executable
718 718 $ cat >>dumpenv <<EOF
719 719 > #! /bin/sh
720 720 > echo \$VAR >&2
721 721 > EOF
722 722 $ chmod +x dumpenv
723 723 $ hg pull ssh://something --config ui.ssh="sh dumpenv"
724 724 pulling from ssh://something/
725 725 remote:
726 726 abort: no suitable response from remote hg
727 727 [255]
728 728 $ hg pull ssh://something --config ui.ssh="sh dumpenv" --config sshenv.VAR=17
729 729 pulling from ssh://something/
730 730 remote: 17
731 731 abort: no suitable response from remote hg
732 732 [255]
733 733
@@ -1,935 +1,935 b''
1 1 setup
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > blackbox=
6 6 > mock=$TESTDIR/mockblackbox.py
7 7 > [blackbox]
8 8 > track = command, commandfinish, tagscache
9 9 > EOF
10 10
11 11 Helper functions:
12 12
13 13 $ cacheexists() {
14 14 > [ -f .hg/cache/tags2-visible ] && echo "tag cache exists" || echo "no tag cache"
15 15 > }
16 16
17 17 $ fnodescacheexists() {
18 18 > [ -f .hg/cache/hgtagsfnodes1 ] && echo "fnodes cache exists" || echo "no fnodes cache"
19 19 > }
20 20
21 21 $ dumptags() {
22 22 > rev=$1
23 23 > echo "rev $rev: .hgtags:"
24 24 > hg cat -r$rev .hgtags
25 25 > }
26 26
27 27 # XXX need to test that the tag cache works when we strip an old head
28 28 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
29 29 # same, but stuff has changed behind tip.
30 30
31 31 Setup:
32 32
33 33 $ hg init t
34 34 $ cd t
35 35 $ cacheexists
36 36 no tag cache
37 37 $ fnodescacheexists
38 38 no fnodes cache
39 39 $ hg id
40 40 000000000000 tip
41 41 $ cacheexists
42 42 no tag cache
43 43 $ fnodescacheexists
44 44 no fnodes cache
45 45 $ echo a > a
46 46 $ hg add a
47 47 $ hg commit -m "test"
48 48 $ hg co
49 49 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
50 50 $ hg identify
51 51 acb14030fe0a tip
52 52 $ hg identify -r 'wdir()'
53 53 acb14030fe0a tip
54 54 $ cacheexists
55 55 tag cache exists
56 56 No fnodes cache because .hgtags file doesn't exist
57 57 (this is an implementation detail)
58 58 $ fnodescacheexists
59 59 no fnodes cache
60 60
61 61 Try corrupting the cache
62 62
63 63 $ printf 'a b' > .hg/cache/tags2-visible
64 64 $ hg identify
65 65 acb14030fe0a tip
66 66 $ cacheexists
67 67 tag cache exists
68 68 $ fnodescacheexists
69 69 no fnodes cache
70 70 $ hg identify
71 71 acb14030fe0a tip
72 72
73 73 Create local tag with long name:
74 74
75 75 $ T=`hg identify --debug --id`
76 76 $ hg tag -l "This is a local tag with a really long name!"
77 77 $ hg tags
78 78 tip 0:acb14030fe0a
79 79 This is a local tag with a really long name! 0:acb14030fe0a
80 80 $ rm .hg/localtags
81 81
82 82 Create a tag behind hg's back:
83 83
84 84 $ echo "$T first" > .hgtags
85 85 $ cat .hgtags
86 86 acb14030fe0a21b60322c440ad2d20cf7685a376 first
87 87 $ hg add .hgtags
88 88 $ hg commit -m "add tags"
89 89 $ hg tags
90 90 tip 1:b9154636be93
91 91 first 0:acb14030fe0a
92 92 $ hg identify
93 93 b9154636be93 tip
94 94
95 95 We should have a fnodes cache now that we have a real tag
96 96 The cache should have an empty entry for rev 0 and a valid entry for rev 1.
97 97
98 98
99 99 $ fnodescacheexists
100 100 fnodes cache exists
101 101 $ f --size --hexdump .hg/cache/hgtagsfnodes1
102 102 .hg/cache/hgtagsfnodes1: size=48
103 103 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
104 104 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
105 105 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
106 106 $ hg debugtagscache
107 107 0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing
108 108 1 b9154636be938d3d431e75a7c906504a079bfe07 26b7b4a773e09ee3c52f510e19e05e1ff966d859
109 109
110 110 Repeat with cold tag cache:
111 111
112 112 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
113 113 $ hg identify
114 114 b9154636be93 tip
115 115
116 116 $ fnodescacheexists
117 117 fnodes cache exists
118 118 $ f --size --hexdump .hg/cache/hgtagsfnodes1
119 119 .hg/cache/hgtagsfnodes1: size=48
120 120 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
121 121 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
122 122 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
123 123
124 124 And again, but now unable to write tag cache or lock file:
125 125
126 126 #if unix-permissions no-fsmonitor
127 127
128 128 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
129 129 $ chmod 555 .hg/cache
130 130 $ hg identify
131 131 b9154636be93 tip
132 132 $ chmod 755 .hg/cache
133 133
134 134 (this block should be protected by no-fsmonitor, because "chmod 555 .hg"
135 135 makes watchman fail at accessing to files under .hg)
136 136
137 137 $ chmod 555 .hg
138 138 $ hg identify
139 139 b9154636be93 tip
140 140 $ chmod 755 .hg
141 141 #endif
142 142
143 143 Tag cache debug info written to blackbox log
144 144
145 145 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
146 146 $ hg identify
147 147 b9154636be93 tip
148 148 $ hg blackbox -l 6
149 149 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
150 150 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
151 151 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
152 152 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
153 153 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
154 154 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
155 155
156 156 Failure to acquire lock results in no write
157 157
158 158 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
159 159 $ echo 'foo:1' > .hg/store/lock
160 160 $ hg identify
161 161 b9154636be93 tip
162 162 $ hg blackbox -l 6
163 163 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
164 164 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
165 165 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
166 166 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
167 167 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
168 168 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
169 169
170 170 $ fnodescacheexists
171 171 no fnodes cache
172 172
173 173 $ rm .hg/store/lock
174 174
175 175 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
176 176 $ hg identify
177 177 b9154636be93 tip
178 178
179 179 Create a branch:
180 180
181 181 $ echo bb > a
182 182 $ hg status
183 183 M a
184 184 $ hg identify
185 185 b9154636be93+ tip
186 186 $ hg co first
187 187 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
188 188 $ hg id
189 189 acb14030fe0a+ first
190 190 $ hg id -r 'wdir()'
191 191 acb14030fe0a+ first
192 192 $ hg -v id
193 193 acb14030fe0a+ first
194 194 $ hg status
195 195 M a
196 196 $ echo 1 > b
197 197 $ hg add b
198 198 $ hg commit -m "branch"
199 199 created new head
200 200
201 201 Creating a new commit shouldn't append the .hgtags fnodes cache until
202 202 tags info is accessed
203 203
204 204 $ f --size --hexdump .hg/cache/hgtagsfnodes1
205 205 .hg/cache/hgtagsfnodes1: size=48
206 206 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
207 207 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
208 208 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
209 209
210 210 $ hg id
211 211 c8edf04160c7 tip
212 212
213 213 First 4 bytes of record 3 are changeset fragment
214 214
215 215 $ f --size --hexdump .hg/cache/hgtagsfnodes1
216 216 .hg/cache/hgtagsfnodes1: size=72
217 217 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
218 218 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
219 219 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
220 220 0030: c8 ed f0 41 00 00 00 00 00 00 00 00 00 00 00 00 |...A............|
221 221 0040: 00 00 00 00 00 00 00 00 |........|
222 222
223 223 Merge the two heads:
224 224
225 225 $ hg merge 1
226 226 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
227 227 (branch merge, don't forget to commit)
228 228 $ hg blackbox -l3
229 229 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
230 230 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
231 231 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
232 232 $ hg id
233 233 c8edf04160c7+b9154636be93+ tip
234 234 $ hg status
235 235 M .hgtags
236 236 $ hg commit -m "merge"
237 237
238 238 Create a fake head, make sure tag not visible afterwards:
239 239
240 240 $ cp .hgtags tags
241 241 $ hg tag last
242 242 $ hg rm .hgtags
243 243 $ hg commit -m "remove"
244 244
245 245 $ mv tags .hgtags
246 246 $ hg add .hgtags
247 247 $ hg commit -m "readd"
248 248 $
249 249 $ hg tags
250 250 tip 6:35ff301afafe
251 251 first 0:acb14030fe0a
252 252
253 253 Add invalid tags:
254 254
255 255 $ echo "spam" >> .hgtags
256 256 $ echo >> .hgtags
257 257 $ echo "foo bar" >> .hgtags
258 258 $ echo "a5a5 invalid" >> .hg/localtags
259 259 $ cat .hgtags
260 260 acb14030fe0a21b60322c440ad2d20cf7685a376 first
261 261 spam
262 262
263 263 foo bar
264 264 $ hg commit -m "tags"
265 265
266 266 Report tag parse error on other head:
267 267
268 268 $ hg up 3
269 269 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
270 270 $ echo 'x y' >> .hgtags
271 271 $ hg commit -m "head"
272 272 created new head
273 273
274 274 $ hg tags --debug
275 275 .hgtags@75d9f02dfe28, line 2: cannot parse entry
276 276 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
277 277 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
278 278 tip 8:c4be69a18c11e8bc3a5fdbb576017c25f7d84663
279 279 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
280 280 $ hg tip
281 281 changeset: 8:c4be69a18c11
282 282 tag: tip
283 283 parent: 3:ac5e980c4dc0
284 284 user: test
285 285 date: Thu Jan 01 00:00:00 1970 +0000
286 286 summary: head
287 287
288 288
289 289 Test tag precedence rules:
290 290
291 291 $ cd ..
292 292 $ hg init t2
293 293 $ cd t2
294 294 $ echo foo > foo
295 295 $ hg add foo
296 296 $ hg ci -m 'add foo' # rev 0
297 297 $ hg tag bar # rev 1
298 298 $ echo >> foo
299 299 $ hg ci -m 'change foo 1' # rev 2
300 300 $ hg up -C 1
301 301 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
302 302 $ hg tag -r 1 -f bar # rev 3
303 303 $ hg up -C 1
304 304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
305 305 $ echo >> foo
306 306 $ hg ci -m 'change foo 2' # rev 4
307 307 created new head
308 308 $ hg tags
309 309 tip 4:0c192d7d5e6b
310 310 bar 1:78391a272241
311 311
312 312 Repeat in case of cache effects:
313 313
314 314 $ hg tags
315 315 tip 4:0c192d7d5e6b
316 316 bar 1:78391a272241
317 317
318 318 Detailed dump of tag info:
319 319
320 320 $ hg heads -q # expect 4, 3, 2
321 321 4:0c192d7d5e6b
322 322 3:6fa450212aeb
323 323 2:7a94127795a3
324 324 $ dumptags 2
325 325 rev 2: .hgtags:
326 326 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
327 327 $ dumptags 3
328 328 rev 3: .hgtags:
329 329 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
330 330 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
331 331 78391a272241d70354aa14c874552cad6b51bb42 bar
332 332 $ dumptags 4
333 333 rev 4: .hgtags:
334 334 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
335 335
336 336 Dump cache:
337 337
338 338 $ cat .hg/cache/tags2-visible
339 339 4 0c192d7d5e6b78a714de54a2e9627952a877e25a
340 340 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
341 341 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
342 342 78391a272241d70354aa14c874552cad6b51bb42 bar
343 343
344 344 $ f --size --hexdump .hg/cache/hgtagsfnodes1
345 345 .hg/cache/hgtagsfnodes1: size=120
346 346 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
347 347 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
348 348 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
349 349 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
350 350 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
351 351 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
352 352 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
353 353 0070: 78 ee 5a 2d ad bc 94 3d |x.Z-...=|
354 354
355 355 Corrupt the .hgtags fnodes cache
356 356 Extra junk data at the end should get overwritten on next cache update
357 357
358 358 $ echo extra >> .hg/cache/hgtagsfnodes1
359 359 $ echo dummy1 > foo
360 360 $ hg commit -m throwaway1
361 361
362 362 $ hg tags
363 363 tip 5:8dbfe60eff30
364 364 bar 1:78391a272241
365 365
366 366 $ hg blackbox -l 6
367 367 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
368 368 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
369 369 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 3/4 cache hits/lookups in * seconds (glob)
370 370 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
371 371 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
372 372 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
373 373
374 374 On junk data + missing cache entries, hg also overwrites the junk.
375 375
376 376 $ rm -f .hg/cache/tags2-visible
377 377 >>> import os
378 378 >>> with open(".hg/cache/hgtagsfnodes1", "ab+") as fp:
379 379 ... fp.seek(-10, os.SEEK_END) and None
380 380 ... fp.truncate() and None
381 381
382 382 $ hg debugtagscache | tail -2
383 383 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
384 384 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing
385 385 $ hg tags
386 386 tip 5:8dbfe60eff30
387 387 bar 1:78391a272241
388 388 $ hg debugtagscache | tail -2
389 389 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
390 390 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8af31de17fab7422878ee5a2dadbc943d
391 391
392 392 If the 4 bytes of node hash for a record don't match an existing node, the entry
393 393 is flagged as invalid.
394 394
395 395 >>> import os
396 396 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
397 397 ... fp.seek(-24, os.SEEK_END) and None
398 398 ... fp.write(b'\xde\xad') and None
399 399
400 400 $ f --size --hexdump .hg/cache/hgtagsfnodes1
401 401 .hg/cache/hgtagsfnodes1: size=144
402 402 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
403 403 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
404 404 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
405 405 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
406 406 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
407 407 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
408 408 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
409 409 0070: 78 ee 5a 2d ad bc 94 3d de ad e6 0e 0c 04 f2 a8 |x.Z-...=........|
410 410 0080: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
411 411
412 412 $ hg debugtagscache | tail -2
413 413 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
414 414 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 invalid
415 415
416 416 $ hg tags
417 417 tip 5:8dbfe60eff30
418 418 bar 1:78391a272241
419 419
420 420 BUG: If the filenode part of an entry in hgtagsfnodes is corrupt and
421 421 tags2-visible is missing, `hg tags` aborts. Corrupting the leading 4 bytes of
422 422 node hash (as above) doesn't seem to trigger the issue. Also note that the
423 423 debug command hides the corruption, both with and without tags2-visible.
424 424
425 425 $ mv .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1.bak
426 426 $ hg debugupdatecaches
427 427
428 428 >>> import os
429 429 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
430 430 ... fp.seek(-16, os.SEEK_END) and None
431 431 ... fp.write(b'\xde\xad') and None
432 432
433 433 $ f --size --hexdump .hg/cache/hgtagsfnodes1
434 434 .hg/cache/hgtagsfnodes1: size=144
435 435 0000: bb d1 79 df 00 00 00 00 00 00 00 00 00 00 00 00 |..y.............|
436 436 0010: 00 00 00 00 00 00 00 00 78 39 1a 27 0c 04 f2 a8 |........x9.'....|
437 437 0020: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
438 438 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
439 439 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
440 440 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
441 441 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
442 442 0070: 78 ee 5a 2d ad bc 94 3d 8d bf e6 0e 0c 04 f2 a8 |x.Z-...=........|
443 443 0080: de ad de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |......B(x.Z-...=|
444 444
445 445 $ hg debugtagscache | tail -2
446 446 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
447 447 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
448 448
449 449 $ rm -f .hg/cache/tags2-visible
450 450 $ hg debugtagscache | tail -2
451 451 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
452 452 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
453 453
454 454 $ hg tags
455 455 tip 5:8dbfe60eff30
456 456 bar 1:78391a272241
457 457
458 458 BUG: Unless this file is restored, the `hg tags` in the next unix-permissions
459 459 conditional will fail: "abort: data/.hgtags.i@0c04f2a8dead: no match found"
460 460
461 461 $ mv .hg/cache/hgtagsfnodes1.bak .hg/cache/hgtagsfnodes1
462 462
463 463 #if unix-permissions no-root
464 464 Errors writing to .hgtags fnodes cache are silently ignored
465 465
466 466 $ echo dummy2 > foo
467 467 $ hg commit -m throwaway2
468 468
469 469 $ chmod a-w .hg/cache/hgtagsfnodes1
470 470 $ rm -f .hg/cache/tags2-visible
471 471
472 472 $ hg tags
473 473 tip 6:b968051b5cf3
474 474 bar 1:78391a272241
475 475
476 476 $ hg blackbox -l 6
477 477 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
478 478 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno *] * (glob)
479 479 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
480 480 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
481 481 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
482 482 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
483 483
484 484 $ chmod a+w .hg/cache/hgtagsfnodes1
485 485
486 486 $ rm -f .hg/cache/tags2-visible
487 487 $ hg tags
488 488 tip 6:b968051b5cf3
489 489 bar 1:78391a272241
490 490
491 491 $ hg blackbox -l 6
492 492 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
493 493 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
494 494 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
495 495 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
496 496 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
497 497 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
498 498
499 499 $ f --size .hg/cache/hgtagsfnodes1
500 500 .hg/cache/hgtagsfnodes1: size=168
501 501
502 502 $ hg -q --config extensions.strip= strip -r 6 --no-backup
503 503 #endif
504 504
505 505 Stripping doesn't truncate the tags cache until new data is available
506 506
507 507 $ rm -f .hg/cache/hgtagsfnodes1 .hg/cache/tags2-visible
508 508 $ hg tags
509 509 tip 5:8dbfe60eff30
510 510 bar 1:78391a272241
511 511
512 512 $ f --size .hg/cache/hgtagsfnodes1
513 513 .hg/cache/hgtagsfnodes1: size=144
514 514
515 515 $ hg -q --config extensions.strip= strip -r 5 --no-backup
516 516 $ hg tags
517 517 tip 4:0c192d7d5e6b
518 518 bar 1:78391a272241
519 519
520 520 $ hg blackbox -l 5
521 521 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
522 522 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/4 cache hits/lookups in * seconds (glob)
523 523 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
524 524 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
525 525 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
526 526
527 527 $ f --size .hg/cache/hgtagsfnodes1
528 528 .hg/cache/hgtagsfnodes1: size=120
529 529
530 530 $ echo dummy > foo
531 531 $ hg commit -m throwaway3
532 532
533 533 $ hg tags
534 534 tip 5:035f65efb448
535 535 bar 1:78391a272241
536 536
537 537 $ hg blackbox -l 6
538 538 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
539 539 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
540 540 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 3/4 cache hits/lookups in * seconds (glob)
541 541 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
542 542 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
543 543 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
544 544 $ f --size .hg/cache/hgtagsfnodes1
545 545 .hg/cache/hgtagsfnodes1: size=144
546 546
547 547 $ hg -q --config extensions.strip= strip -r 5 --no-backup
548 548
549 549 Test tag removal:
550 550
551 551 $ hg tag --remove bar # rev 5
552 552 $ hg tip -vp
553 553 changeset: 5:5f6e8655b1c7
554 554 tag: tip
555 555 user: test
556 556 date: Thu Jan 01 00:00:00 1970 +0000
557 557 files: .hgtags
558 558 description:
559 559 Removed tag bar
560 560
561 561
562 562 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
563 563 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
564 564 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
565 565 @@ -1,1 +1,3 @@
566 566 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
567 567 +78391a272241d70354aa14c874552cad6b51bb42 bar
568 568 +0000000000000000000000000000000000000000 bar
569 569
570 570 $ hg tags
571 571 tip 5:5f6e8655b1c7
572 572 $ hg tags # again, try to expose cache bugs
573 573 tip 5:5f6e8655b1c7
574 574
575 575 Remove nonexistent tag:
576 576
577 577 $ hg tag --remove foobar
578 578 abort: tag 'foobar' does not exist
579 579 [10]
580 580 $ hg tip
581 581 changeset: 5:5f6e8655b1c7
582 582 tag: tip
583 583 user: test
584 584 date: Thu Jan 01 00:00:00 1970 +0000
585 585 summary: Removed tag bar
586 586
587 587
588 588 Undo a tag with rollback:
589 589
590 590 $ hg rollback # destroy rev 5 (restore bar)
591 591 repository tip rolled back to revision 4 (undo commit)
592 592 working directory now based on revision 4
593 593 $ hg tags
594 594 tip 4:0c192d7d5e6b
595 595 bar 1:78391a272241
596 596 $ hg tags
597 597 tip 4:0c192d7d5e6b
598 598 bar 1:78391a272241
599 599
600 600 Test tag rank:
601 601
602 602 $ cd ..
603 603 $ hg init t3
604 604 $ cd t3
605 605 $ echo foo > foo
606 606 $ hg add foo
607 607 $ hg ci -m 'add foo' # rev 0
608 608 $ hg tag -f bar # rev 1 bar -> 0
609 609 $ hg tag -f bar # rev 2 bar -> 1
610 610 $ hg tag -fr 0 bar # rev 3 bar -> 0
611 611 $ hg tag -fr 1 bar # rev 4 bar -> 1
612 612 $ hg tag -fr 0 bar # rev 5 bar -> 0
613 613 $ hg tags
614 614 tip 5:85f05169d91d
615 615 bar 0:bbd179dfa0a7
616 616 $ hg co 3
617 617 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
618 618 $ echo barbar > foo
619 619 $ hg ci -m 'change foo' # rev 6
620 620 created new head
621 621 $ hg tags
622 622 tip 6:735c3ca72986
623 623 bar 0:bbd179dfa0a7
624 624
625 625 Don't allow moving tag without -f:
626 626
627 627 $ hg tag -r 3 bar
628 628 abort: tag 'bar' already exists (use -f to force)
629 629 [10]
630 630 $ hg tags
631 631 tip 6:735c3ca72986
632 632 bar 0:bbd179dfa0a7
633 633
634 634 Strip 1: expose an old head:
635 635
636 636 $ hg --config extensions.mq= strip 5
637 637 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
638 638 $ hg tags # partly stale cache
639 639 tip 5:735c3ca72986
640 640 bar 1:78391a272241
641 641 $ hg tags # up-to-date cache
642 642 tip 5:735c3ca72986
643 643 bar 1:78391a272241
644 644
645 645 Strip 2: destroy whole branch, no old head exposed
646 646
647 647 $ hg --config extensions.mq= strip 4
648 648 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
649 649 $ hg tags # partly stale
650 650 tip 4:735c3ca72986
651 651 bar 0:bbd179dfa0a7
652 652 $ rm -f .hg/cache/tags2-visible
653 653 $ hg tags # cold cache
654 654 tip 4:735c3ca72986
655 655 bar 0:bbd179dfa0a7
656 656
657 657 Test tag rank with 3 heads:
658 658
659 659 $ cd ..
660 660 $ hg init t4
661 661 $ cd t4
662 662 $ echo foo > foo
663 663 $ hg add
664 664 adding foo
665 665 $ hg ci -m 'add foo' # rev 0
666 666 $ hg tag bar # rev 1 bar -> 0
667 667 $ hg tag -f bar # rev 2 bar -> 1
668 668 $ hg up -qC 0
669 669 $ hg tag -fr 2 bar # rev 3 bar -> 2
670 670 $ hg tags
671 671 tip 3:197c21bbbf2c
672 672 bar 2:6fa450212aeb
673 673 $ hg up -qC 0
674 674 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
675 675
676 676 Bar should still point to rev 2:
677 677
678 678 $ hg tags
679 679 tip 4:3b4b14ed0202
680 680 bar 2:6fa450212aeb
681 681
682 682 Test that removing global/local tags does not get confused when trying
683 683 to remove a tag of type X which actually only exists as a type Y:
684 684
685 685 $ cd ..
686 686 $ hg init t5
687 687 $ cd t5
688 688 $ echo foo > foo
689 689 $ hg add
690 690 adding foo
691 691 $ hg ci -m 'add foo' # rev 0
692 692
693 693 $ hg tag -r 0 -l localtag
694 694 $ hg tag --remove localtag
695 695 abort: tag 'localtag' is not a global tag
696 696 [10]
697 697 $
698 698 $ hg tag -r 0 globaltag
699 699 $ hg tag --remove -l globaltag
700 700 abort: tag 'globaltag' is not a local tag
701 701 [10]
702 702 $ hg tags -v
703 703 tip 1:a0b6fe111088
704 704 localtag 0:bbd179dfa0a7 local
705 705 globaltag 0:bbd179dfa0a7
706 706
707 707 Templated output:
708 708
709 709 (immediate values)
710 710
711 711 $ hg tags -T '{pad(tag, 9)} {rev}:{node} ({type})\n'
712 712 tip 1:a0b6fe111088c8c29567d3876cc466aa02927cae ()
713 713 localtag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa (local)
714 714 globaltag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa ()
715 715
716 716 (ctx/revcache dependent)
717 717
718 718 $ hg tags -T '{pad(tag, 9)} {rev} {file_adds}\n'
719 719 tip 1 .hgtags
720 720 localtag 0 foo
721 721 globaltag 0 foo
722 722
723 723 $ hg tags -T '{pad(tag, 9)} {rev}:{node|shortest}\n'
724 724 tip 1:a0b6
725 725 localtag 0:bbd1
726 726 globaltag 0:bbd1
727 727
728 728 Test for issue3911
729 729
730 730 $ hg tag -r 0 -l localtag2
731 731 $ hg tag -l --remove localtag2
732 732 $ hg tags -v
733 733 tip 1:a0b6fe111088
734 734 localtag 0:bbd179dfa0a7 local
735 735 globaltag 0:bbd179dfa0a7
736 736
737 737 $ hg tag -r 1 -f localtag
738 738 $ hg tags -v
739 739 tip 2:5c70a037bb37
740 740 localtag 1:a0b6fe111088
741 741 globaltag 0:bbd179dfa0a7
742 742
743 743 $ hg tags -v
744 744 tip 2:5c70a037bb37
745 745 localtag 1:a0b6fe111088
746 746 globaltag 0:bbd179dfa0a7
747 747
748 748 $ hg tag -r 1 localtag2
749 749 $ hg tags -v
750 750 tip 3:bbfb8cd42be2
751 751 localtag2 1:a0b6fe111088
752 752 localtag 1:a0b6fe111088
753 753 globaltag 0:bbd179dfa0a7
754 754
755 755 $ hg tags -v
756 756 tip 3:bbfb8cd42be2
757 757 localtag2 1:a0b6fe111088
758 758 localtag 1:a0b6fe111088
759 759 globaltag 0:bbd179dfa0a7
760 760
761 761 $ cd ..
762 762
763 763 Create a repository with tags data to test .hgtags fnodes transfer
764 764
765 765 $ hg init tagsserver
766 766 $ cd tagsserver
767 767 $ touch foo
768 768 $ hg -q commit -A -m initial
769 769 $ hg tag -m 'tag 0.1' 0.1
770 770 $ echo second > foo
771 771 $ hg commit -m second
772 772 $ hg tag -m 'tag 0.2' 0.2
773 773 $ hg tags
774 774 tip 3:40f0358cb314
775 775 0.2 2:f63cc8fe54e4
776 776 0.1 0:96ee1d7354c4
777 777 $ cd ..
778 778
779 779 Cloning should pull down hgtags fnodes mappings and write the cache file
780 780
781 781 $ hg clone --pull tagsserver tagsclient
782 782 requesting all changes
783 783 adding changesets
784 784 adding manifests
785 785 adding file changes
786 786 added 4 changesets with 4 changes to 2 files
787 787 new changesets 96ee1d7354c4:40f0358cb314
788 788 updating to branch default
789 789 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
790 790
791 791 Missing tags2* files means the cache wasn't written through the normal mechanism.
792 792
793 793 $ ls tagsclient/.hg/cache
794 794 branch2-base
795 795 branch2-immutable
796 796 branch2-served
797 797 branch2-served.hidden
798 798 branch2-visible
799 799 branch2-visible-hidden
800 800 hgtagsfnodes1
801 801 rbc-names-v1
802 802 rbc-revs-v1
803 803 tags2
804 804 tags2-served
805 805
806 806 Cache should contain the head only, even though other nodes have tags data
807 807
808 808 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
809 809 tagsclient/.hg/cache/hgtagsfnodes1: size=96
810 0000: 96 ee 1d 73 00 00 00 00 00 00 00 00 00 00 00 00 |...s............|
811 0010: 00 00 00 00 00 00 00 00 c4 da b0 c2 94 65 e1 c6 |.............e..|
812 0020: 0d f7 f0 dd 32 04 ea 57 78 c8 97 97 79 fc d5 95 |....2..Wx...y...|
813 0030: f6 3c c8 fe 94 65 e1 c6 0d f7 f0 dd 32 04 ea 57 |.<...e......2..W|
814 0040: 78 c8 97 97 79 fc d5 95 40 f0 35 8c 19 e0 a7 d3 |x...y...@.5.....|
810 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
811 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
812 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
813 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
814 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
815 815 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
816 816
817 817 Running hg tags should produce tags2* file and not change cache
818 818
819 819 $ hg -R tagsclient tags
820 820 tip 3:40f0358cb314
821 821 0.2 2:f63cc8fe54e4
822 822 0.1 0:96ee1d7354c4
823 823
824 824 $ ls tagsclient/.hg/cache
825 825 branch2-base
826 826 branch2-immutable
827 827 branch2-served
828 828 branch2-served.hidden
829 829 branch2-visible
830 830 branch2-visible-hidden
831 831 hgtagsfnodes1
832 832 rbc-names-v1
833 833 rbc-revs-v1
834 834 tags2
835 835 tags2-served
836 836 tags2-visible
837 837
838 838 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
839 839 tagsclient/.hg/cache/hgtagsfnodes1: size=96
840 0000: 96 ee 1d 73 00 00 00 00 00 00 00 00 00 00 00 00 |...s............|
841 0010: 00 00 00 00 00 00 00 00 c4 da b0 c2 94 65 e1 c6 |.............e..|
842 0020: 0d f7 f0 dd 32 04 ea 57 78 c8 97 97 79 fc d5 95 |....2..Wx...y...|
843 0030: f6 3c c8 fe 94 65 e1 c6 0d f7 f0 dd 32 04 ea 57 |.<...e......2..W|
844 0040: 78 c8 97 97 79 fc d5 95 40 f0 35 8c 19 e0 a7 d3 |x...y...@.5.....|
840 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
841 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
842 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
843 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
844 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
845 845 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
846 846
847 847 Check that the bundle includes cache data
848 848
849 849 $ hg -R tagsclient bundle --all ./test-cache-in-bundle-all-rev.hg
850 850 4 changesets found
851 851 $ hg debugbundle ./test-cache-in-bundle-all-rev.hg
852 852 Stream params: {Compression: BZ}
853 853 changegroup -- {nbchanges: 4, version: 02} (mandatory: True)
854 854 96ee1d7354c4ad7372047672c36a1f561e3a6a4c
855 855 c4dab0c2fd337eb9191f80c3024830a4889a8f34
856 856 f63cc8fe54e4d326f8d692805d70e092f851ddb1
857 857 40f0358cb314c824a5929ee527308d90e023bc10
858 858 hgtagsfnodes -- {} (mandatory: True)
859 859 cache:rev-branch-cache -- {} (mandatory: False)
860 860
861 861 Check that local clone includes cache data
862 862
863 863 $ hg clone tagsclient tags-local-clone
864 864 updating to branch default
865 865 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
866 866 $ (cd tags-local-clone/.hg/cache/; ls -1 tag*)
867 867 tags2
868 868 tags2-served
869 869 tags2-visible
870 870
871 871 Avoid writing logs on trying to delete an already deleted tag
872 872 $ hg init issue5752
873 873 $ cd issue5752
874 874 $ echo > a
875 875 $ hg commit -Am 'add a'
876 876 adding a
877 877 $ hg tag a
878 878 $ hg tags
879 879 tip 1:bd7ee4f3939b
880 880 a 0:a8a82d372bb3
881 881 $ hg log
882 882 changeset: 1:bd7ee4f3939b
883 883 tag: tip
884 884 user: test
885 885 date: Thu Jan 01 00:00:00 1970 +0000
886 886 summary: Added tag a for changeset a8a82d372bb3
887 887
888 888 changeset: 0:a8a82d372bb3
889 889 tag: a
890 890 user: test
891 891 date: Thu Jan 01 00:00:00 1970 +0000
892 892 summary: add a
893 893
894 894 $ hg tag --remove a
895 895 $ hg log
896 896 changeset: 2:e7feacc7ec9e
897 897 tag: tip
898 898 user: test
899 899 date: Thu Jan 01 00:00:00 1970 +0000
900 900 summary: Removed tag a
901 901
902 902 changeset: 1:bd7ee4f3939b
903 903 user: test
904 904 date: Thu Jan 01 00:00:00 1970 +0000
905 905 summary: Added tag a for changeset a8a82d372bb3
906 906
907 907 changeset: 0:a8a82d372bb3
908 908 user: test
909 909 date: Thu Jan 01 00:00:00 1970 +0000
910 910 summary: add a
911 911
912 912 $ hg tag --remove a
913 913 abort: tag 'a' is already removed
914 914 [10]
915 915 $ hg log
916 916 changeset: 2:e7feacc7ec9e
917 917 tag: tip
918 918 user: test
919 919 date: Thu Jan 01 00:00:00 1970 +0000
920 920 summary: Removed tag a
921 921
922 922 changeset: 1:bd7ee4f3939b
923 923 user: test
924 924 date: Thu Jan 01 00:00:00 1970 +0000
925 925 summary: Added tag a for changeset a8a82d372bb3
926 926
927 927 changeset: 0:a8a82d372bb3
928 928 user: test
929 929 date: Thu Jan 01 00:00:00 1970 +0000
930 930 summary: add a
931 931
932 932 $ cat .hgtags
933 933 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
934 934 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
935 935 0000000000000000000000000000000000000000 a
@@ -1,903 +1,903 b''
1 1 $ cat << EOF >> $HGRCPATH
2 2 > [ui]
3 3 > ssh="$PYTHON" "$TESTDIR/dummyssh"
4 4 > EOF
5 5
6 6 Set up repo
7 7
8 8 $ hg --config experimental.treemanifest=True init repo
9 9 $ cd repo
10 10
11 11 Requirements get set on init
12 12
13 13 $ grep treemanifest .hg/requires
14 14 treemanifest
15 15
16 16 Without directories, looks like any other repo
17 17
18 18 $ echo 0 > a
19 19 $ echo 0 > b
20 20 $ hg ci -Aqm initial
21 21 $ hg debugdata -m 0
22 22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
24 24
25 25 Submanifest is stored in separate revlog
26 26
27 27 $ mkdir dir1
28 28 $ echo 1 > dir1/a
29 29 $ echo 1 > dir1/b
30 30 $ echo 1 > e
31 31 $ hg ci -Aqm 'add dir1'
32 32 $ hg debugdata -m 1
33 33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
35 35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
36 36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
37 37 $ hg debugdata --dir dir1 0
38 38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
40 40
41 41 Can add nested directories
42 42
43 43 $ mkdir dir1/dir1
44 44 $ echo 2 > dir1/dir1/a
45 45 $ echo 2 > dir1/dir1/b
46 46 $ mkdir dir1/dir2
47 47 $ echo 2 > dir1/dir2/a
48 48 $ echo 2 > dir1/dir2/b
49 49 $ hg ci -Aqm 'add dir1/dir1'
50 50 $ hg files -r .
51 51 a
52 52 b
53 53 dir1/a
54 54 dir1/b
55 55 dir1/dir1/a
56 56 dir1/dir1/b
57 57 dir1/dir2/a
58 58 dir1/dir2/b
59 59 e
60 60
61 61 The manifest command works
62 62
63 63 $ hg manifest
64 64 a
65 65 b
66 66 dir1/a
67 67 dir1/b
68 68 dir1/dir1/a
69 69 dir1/dir1/b
70 70 dir1/dir2/a
71 71 dir1/dir2/b
72 72 e
73 73
74 74 Revision is not created for unchanged directory
75 75
76 76 $ mkdir dir2
77 77 $ echo 3 > dir2/a
78 78 $ hg add dir2
79 79 adding dir2/a
80 80 $ hg debugindex --dir dir1 > before
81 81 $ hg ci -qm 'add dir2'
82 82 $ hg debugindex --dir dir1 > after
83 83 $ diff before after
84 84 $ rm before after
85 85
86 86 Removing directory does not create an revlog entry
87 87
88 88 $ hg rm dir1/dir1
89 89 removing dir1/dir1/a
90 90 removing dir1/dir1/b
91 91 $ hg debugindex --dir dir1/dir1 > before
92 92 $ hg ci -qm 'remove dir1/dir1'
93 93 $ hg debugindex --dir dir1/dir1 > after
94 94 $ diff before after
95 95 $ rm before after
96 96
97 97 Check that hg files (calls treemanifest.walk()) works
98 98 without loading all directory revlogs
99 99
100 100 $ hg co 'desc("add dir2")'
101 101 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 102 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
103 103 $ hg files -r . dir1
104 104 dir1/a
105 105 dir1/b
106 106 dir1/dir1/a
107 107 dir1/dir1/b
108 108 dir1/dir2/a
109 109 dir1/dir2/b
110 110
111 111 Check that status between revisions works (calls treemanifest.matches())
112 112 without loading all directory revlogs
113 113
114 114 $ hg status --rev 'desc("add dir1")' --rev . dir1
115 115 A dir1/dir1/a
116 116 A dir1/dir1/b
117 117 A dir1/dir2/a
118 118 A dir1/dir2/b
119 119 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
120 120
121 121 Merge creates 2-parent revision of directory revlog
122 122
123 123 $ echo 5 > dir1/a
124 124 $ hg ci -Aqm 'modify dir1/a'
125 125 $ hg co '.^'
126 126 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
127 127 $ echo 6 > dir1/b
128 128 $ hg ci -Aqm 'modify dir1/b'
129 129 $ hg merge 'desc("modify dir1/a")'
130 130 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
131 131 (branch merge, don't forget to commit)
132 132 $ hg ci -m 'conflict-free merge involving dir1/'
133 133 $ cat dir1/a
134 134 5
135 135 $ cat dir1/b
136 136 6
137 137 $ hg debugindex --dir dir1
138 138 rev linkrev nodeid p1 p2
139 139 0 1 8b3ffd73f901 000000000000 000000000000
140 140 1 2 68e9d057c5a8 8b3ffd73f901 000000000000
141 141 2 4 4698198d2624 68e9d057c5a8 000000000000
142 142 3 5 44844058ccce 68e9d057c5a8 000000000000
143 143 4 6 bf3d9b744927 68e9d057c5a8 000000000000
144 144 5 7 dde7c0af2a03 bf3d9b744927 44844058ccce
145 145
146 146 Merge keeping directory from parent 1 does not create revlog entry. (Note that
147 147 dir1's manifest does change, but only because dir1/a's filelog changes.)
148 148
149 149 $ hg co 'desc("add dir2")'
150 150 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 151 $ echo 8 > dir2/a
152 152 $ hg ci -m 'modify dir2/a'
153 153 created new head
154 154
155 155 $ hg debugindex --dir dir2 > before
156 156 $ hg merge 'desc("modify dir1/a")'
157 157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 158 (branch merge, don't forget to commit)
159 159 $ hg revert -r 'desc("modify dir2/a")' .
160 160 reverting dir1/a
161 161 $ hg ci -m 'merge, keeping parent 1'
162 162 $ hg debugindex --dir dir2 > after
163 163 $ diff before after
164 164 $ rm before after
165 165
166 166 Merge keeping directory from parent 2 does not create revlog entry. (Note that
167 167 dir2's manifest does change, but only because dir2/a's filelog changes.)
168 168
169 169 $ hg co 'desc("modify dir2/a")'
170 170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
171 171 $ hg debugindex --dir dir1 > before
172 172 $ hg merge 'desc("modify dir1/a")'
173 173 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
174 174 (branch merge, don't forget to commit)
175 175 $ hg revert -r 'desc("modify dir1/a")' .
176 176 reverting dir2/a
177 177 $ hg ci -m 'merge, keeping parent 2'
178 178 created new head
179 179 $ hg debugindex --dir dir1 > after
180 180 $ diff before after
181 181 $ rm before after
182 182
183 183 Create flat source repo for tests with mixed flat/tree manifests
184 184
185 185 $ cd ..
186 186 $ hg init repo-flat
187 187 $ cd repo-flat
188 188
189 189 Create a few commits with flat manifest
190 190
191 191 $ echo 0 > a
192 192 $ echo 0 > b
193 193 $ echo 0 > e
194 194 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
195 195 > do
196 196 > mkdir $d
197 197 > echo 0 > $d/a
198 198 > echo 0 > $d/b
199 199 > done
200 200 $ hg ci -Aqm initial
201 201
202 202 $ echo 1 > a
203 203 $ echo 1 > dir1/a
204 204 $ echo 1 > dir1/dir1/a
205 205 $ hg ci -Aqm 'modify on branch 1'
206 206
207 207 $ hg co 0
208 208 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
209 209 $ echo 2 > b
210 210 $ echo 2 > dir1/b
211 211 $ echo 2 > dir1/dir1/b
212 212 $ hg ci -Aqm 'modify on branch 2'
213 213
214 214 $ hg merge 1
215 215 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 216 (branch merge, don't forget to commit)
217 217 $ hg ci -m 'merge of flat manifests to new flat manifest'
218 218
219 219 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
220 220 $ cat hg.pid >> $DAEMON_PIDS
221 221
222 222 Create clone with tree manifests enabled
223 223
224 224 $ cd ..
225 225 $ hg clone --config experimental.treemanifest=1 \
226 226 > http://localhost:$HGPORT repo-mixed -r 1
227 227 adding changesets
228 228 adding manifests
229 229 adding file changes
230 230 added 2 changesets with 14 changes to 11 files
231 231 new changesets 5b02a3e8db7e:581ef6037d8b
232 232 updating to branch default
233 233 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
234 234 $ cd repo-mixed
235 235 $ test -d .hg/store/meta
236 236 [1]
237 237 $ grep treemanifest .hg/requires
238 238 treemanifest
239 239
240 240 Should be possible to push updates from flat to tree manifest repo
241 241
242 242 $ hg -R ../repo-flat push ssh://user@dummy/repo-mixed
243 243 pushing to ssh://user@dummy/repo-mixed
244 244 searching for changes
245 245 remote: adding changesets
246 246 remote: adding manifests
247 247 remote: adding file changes
248 248 remote: added 2 changesets with 3 changes to 3 files
249 249
250 250 Commit should store revlog per directory
251 251
252 252 $ hg co 1
253 253 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
254 254 $ echo 3 > a
255 255 $ echo 3 > dir1/a
256 256 $ echo 3 > dir1/dir1/a
257 257 $ hg ci -m 'first tree'
258 258 created new head
259 259 $ find .hg/store/meta | sort
260 260 .hg/store/meta
261 261 .hg/store/meta/dir1
262 262 .hg/store/meta/dir1/00manifest.i
263 263 .hg/store/meta/dir1/dir1
264 264 .hg/store/meta/dir1/dir1/00manifest.i
265 265 .hg/store/meta/dir1/dir2
266 266 .hg/store/meta/dir1/dir2/00manifest.i
267 267 .hg/store/meta/dir2
268 268 .hg/store/meta/dir2/00manifest.i
269 269
270 270 Merge of two trees
271 271
272 272 $ hg co 2
273 273 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
274 274 $ hg merge 1
275 275 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
276 276 (branch merge, don't forget to commit)
277 277 $ hg ci -m 'merge of flat manifests to new tree manifest'
278 278 created new head
279 279 $ hg diff -r 3
280 280
281 281 Parent of tree root manifest should be flat manifest, and two for merge
282 282
283 283 $ hg debugindex -m
284 284 rev linkrev nodeid p1 p2
285 285 0 0 40536115ed9e 000000000000 000000000000
286 286 1 1 f3376063c255 40536115ed9e 000000000000
287 287 2 2 5d9b9da231a2 40536115ed9e 000000000000
288 288 3 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
289 289 4 4 51e32a8c60ee f3376063c255 000000000000
290 290 5 5 cc5baa78b230 5d9b9da231a2 f3376063c255
291 291
292 292
293 293 Status across flat/tree boundary should work
294 294
295 295 $ hg status --rev '.^' --rev .
296 296 M a
297 297 M dir1/a
298 298 M dir1/dir1/a
299 299
300 300
301 301 Turning off treemanifest config has no effect
302 302
303 303 $ hg debugindex --dir dir1
304 304 rev linkrev nodeid p1 p2
305 305 0 4 064927a0648a 000000000000 000000000000
306 306 1 5 25ecb8cb8618 000000000000 000000000000
307 307 $ echo 2 > dir1/a
308 308 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
309 309 $ hg debugindex --dir dir1
310 310 rev linkrev nodeid p1 p2
311 311 0 4 064927a0648a 000000000000 000000000000
312 312 1 5 25ecb8cb8618 000000000000 000000000000
313 313 2 6 5b16163a30c6 25ecb8cb8618 000000000000
314 314
315 315 Stripping and recovering changes should work
316 316
317 317 $ hg st --change tip
318 318 M dir1/a
319 319 $ hg --config extensions.strip= strip tip
320 320 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
321 321 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg
322 322 $ hg debugindex --dir dir1
323 323 rev linkrev nodeid p1 p2
324 324 0 4 064927a0648a 000000000000 000000000000
325 325 1 5 25ecb8cb8618 000000000000 000000000000
326 326
327 327 #if repobundlerepo
328 328 $ hg incoming .hg/strip-backup/*
329 329 comparing with .hg/strip-backup/*-backup.hg (glob)
330 330 searching for changes
331 331 changeset: 6:51cfd7b1e13b
332 332 tag: tip
333 333 user: test
334 334 date: Thu Jan 01 00:00:00 1970 +0000
335 335 summary: modify dir1/a
336 336
337 337 #endif
338 338
339 339 $ hg unbundle .hg/strip-backup/*
340 340 adding changesets
341 341 adding manifests
342 342 adding file changes
343 343 added 1 changesets with 1 changes to 1 files
344 344 new changesets 51cfd7b1e13b (1 drafts)
345 345 (run 'hg update' to get a working copy)
346 346 $ hg --config extensions.strip= strip tip
347 347 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/*-backup.hg (glob)
348 348 $ hg unbundle -q .hg/strip-backup/*
349 349 $ hg debugindex --dir dir1
350 350 rev linkrev nodeid p1 p2
351 351 0 4 064927a0648a 000000000000 000000000000
352 352 1 5 25ecb8cb8618 000000000000 000000000000
353 353 2 6 5b16163a30c6 25ecb8cb8618 000000000000
354 354 $ hg st --change tip
355 355 M dir1/a
356 356
357 357 Shelving and unshelving should work
358 358
359 359 $ echo foo >> dir1/a
360 360 $ hg shelve
361 361 shelved as default
362 362 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
363 363 $ hg unshelve
364 364 unshelving change 'default'
365 365 $ hg diff --nodates
366 366 diff -r 708a273da119 dir1/a
367 367 --- a/dir1/a
368 368 +++ b/dir1/a
369 369 @@ -1,1 +1,2 @@
370 370 1
371 371 +foo
372 372
373 373 Pushing from treemanifest repo to an empty repo makes that a treemanifest repo
374 374
375 375 $ cd ..
376 376 $ hg init empty-repo
377 377 $ cat << EOF >> empty-repo/.hg/hgrc
378 378 > [experimental]
379 379 > changegroup3=yes
380 380 > EOF
381 381 $ grep treemanifest empty-repo/.hg/requires
382 382 [1]
383 383 $ hg push -R repo -r 0 empty-repo
384 384 pushing to empty-repo
385 385 searching for changes
386 386 adding changesets
387 387 adding manifests
388 388 adding file changes
389 389 added 1 changesets with 2 changes to 2 files
390 390 $ grep treemanifest empty-repo/.hg/requires
391 391 treemanifest
392 392
393 393 Pushing to an empty repo works
394 394
395 395 $ hg --config experimental.treemanifest=1 init clone
396 396 $ grep treemanifest clone/.hg/requires
397 397 treemanifest
398 398 $ hg push -R repo clone
399 399 pushing to clone
400 400 searching for changes
401 401 adding changesets
402 402 adding manifests
403 403 adding file changes
404 404 added 11 changesets with 15 changes to 10 files (+3 heads)
405 405 $ grep treemanifest clone/.hg/requires
406 406 treemanifest
407 407 $ hg -R clone verify
408 408 checking changesets
409 409 checking manifests
410 410 checking directory manifests
411 411 crosschecking files in changesets and manifests
412 412 checking files
413 413 checked 11 changesets with 15 changes to 10 files
414 414
415 415 Create deeper repo with tree manifests.
416 416
417 417 $ hg --config experimental.treemanifest=True init deeprepo
418 418 $ cd deeprepo
419 419
420 420 $ mkdir .A
421 421 $ mkdir b
422 422 $ mkdir b/bar
423 423 $ mkdir b/bar/orange
424 424 $ mkdir b/bar/orange/fly
425 425 $ mkdir b/foo
426 426 $ mkdir b/foo/apple
427 427 $ mkdir b/foo/apple/bees
428 428
429 429 $ touch .A/one.txt
430 430 $ touch .A/two.txt
431 431 $ touch b/bar/fruits.txt
432 432 $ touch b/bar/orange/fly/gnat.py
433 433 $ touch b/bar/orange/fly/housefly.txt
434 434 $ touch b/foo/apple/bees/flower.py
435 435 $ touch c.txt
436 436 $ touch d.py
437 437
438 438 $ hg ci -Aqm 'initial'
439 439
440 440 $ echo >> .A/one.txt
441 441 $ echo >> .A/two.txt
442 442 $ echo >> b/bar/fruits.txt
443 443 $ echo >> b/bar/orange/fly/gnat.py
444 444 $ echo >> b/bar/orange/fly/housefly.txt
445 445 $ echo >> b/foo/apple/bees/flower.py
446 446 $ echo >> c.txt
447 447 $ echo >> d.py
448 448 $ hg ci -Aqm 'second'
449 449
450 450 We'll see that visitdir works by removing some treemanifest revlogs and running
451 451 the files command with various parameters.
452 452
453 453 Test files from the root.
454 454
455 455 $ hg files -r .
456 456 .A/one.txt
457 457 .A/two.txt
458 458 b/bar/fruits.txt
459 459 b/bar/orange/fly/gnat.py
460 460 b/bar/orange/fly/housefly.txt
461 461 b/foo/apple/bees/flower.py
462 462 c.txt
463 463 d.py
464 464
465 465 Excludes with a glob should not exclude everything from the glob's root
466 466
467 467 $ hg files -r . -X 'b/fo?' b
468 468 b/bar/fruits.txt
469 469 b/bar/orange/fly/gnat.py
470 470 b/bar/orange/fly/housefly.txt
471 471 $ cp -R .hg/store .hg/store-copy
472 472
473 473 Test files for a subdirectory.
474 474
475 475 #if reporevlogstore
476 476 $ rm -r .hg/store/meta/~2e_a
477 477 #endif
478 478 #if reposimplestore
479 479 $ rm -r .hg/store/meta/._a
480 480 #endif
481 481 $ hg files -r . b
482 482 b/bar/fruits.txt
483 483 b/bar/orange/fly/gnat.py
484 484 b/bar/orange/fly/housefly.txt
485 485 b/foo/apple/bees/flower.py
486 486 $ hg diff -r '.^' -r . --stat b
487 487 b/bar/fruits.txt | 1 +
488 488 b/bar/orange/fly/gnat.py | 1 +
489 489 b/bar/orange/fly/housefly.txt | 1 +
490 490 b/foo/apple/bees/flower.py | 1 +
491 491 4 files changed, 4 insertions(+), 0 deletions(-)
492 492 $ cp -R .hg/store-copy/. .hg/store
493 493
494 494 Test files with just includes and excludes.
495 495
496 496 #if reporevlogstore
497 497 $ rm -r .hg/store/meta/~2e_a
498 498 #endif
499 499 #if reposimplestore
500 500 $ rm -r .hg/store/meta/._a
501 501 #endif
502 502 $ rm -r .hg/store/meta/b/bar/orange/fly
503 503 $ rm -r .hg/store/meta/b/foo/apple/bees
504 504 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
505 505 b/bar/fruits.txt
506 506 $ hg diff -r '.^' -r . --stat -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
507 507 b/bar/fruits.txt | 1 +
508 508 1 files changed, 1 insertions(+), 0 deletions(-)
509 509 $ cp -R .hg/store-copy/. .hg/store
510 510
511 511 Test files for a subdirectory, excluding a directory within it.
512 512
513 513 #if reporevlogstore
514 514 $ rm -r .hg/store/meta/~2e_a
515 515 #endif
516 516 #if reposimplestore
517 517 $ rm -r .hg/store/meta/._a
518 518 #endif
519 519 $ rm -r .hg/store/meta/b/foo
520 520 $ hg files -r . -X path:b/foo b
521 521 b/bar/fruits.txt
522 522 b/bar/orange/fly/gnat.py
523 523 b/bar/orange/fly/housefly.txt
524 524 $ hg diff -r '.^' -r . --stat -X path:b/foo b
525 525 b/bar/fruits.txt | 1 +
526 526 b/bar/orange/fly/gnat.py | 1 +
527 527 b/bar/orange/fly/housefly.txt | 1 +
528 528 3 files changed, 3 insertions(+), 0 deletions(-)
529 529 $ cp -R .hg/store-copy/. .hg/store
530 530
531 531 Test files for a sub directory, including only a directory within it, and
532 532 including an unrelated directory.
533 533
534 534 #if reporevlogstore
535 535 $ rm -r .hg/store/meta/~2e_a
536 536 #endif
537 537 #if reposimplestore
538 538 $ rm -r .hg/store/meta/._a
539 539 #endif
540 540 $ rm -r .hg/store/meta/b/foo
541 541 $ hg files -r . -I path:b/bar/orange -I path:a b
542 542 b/bar/orange/fly/gnat.py
543 543 b/bar/orange/fly/housefly.txt
544 544 $ hg diff -r '.^' -r . --stat -I path:b/bar/orange -I path:a b
545 545 b/bar/orange/fly/gnat.py | 1 +
546 546 b/bar/orange/fly/housefly.txt | 1 +
547 547 2 files changed, 2 insertions(+), 0 deletions(-)
548 548 $ cp -R .hg/store-copy/. .hg/store
549 549
550 550 Test files for a pattern, including a directory, and excluding a directory
551 551 within that.
552 552
553 553 #if reporevlogstore
554 554 $ rm -r .hg/store/meta/~2e_a
555 555 #endif
556 556 #if reposimplestore
557 557 $ rm -r .hg/store/meta/._a
558 558 #endif
559 559 $ rm -r .hg/store/meta/b/foo
560 560 $ rm -r .hg/store/meta/b/bar/orange
561 561 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
562 562 b/bar/fruits.txt
563 563 $ hg diff -r '.^' -r . --stat glob:**.txt -I path:b/bar -X path:b/bar/orange
564 564 b/bar/fruits.txt | 1 +
565 565 1 files changed, 1 insertions(+), 0 deletions(-)
566 566 $ cp -R .hg/store-copy/. .hg/store
567 567
568 568 Add some more changes to the deep repo
569 569 $ echo narf >> b/bar/fruits.txt
570 570 $ hg ci -m narf
571 571 $ echo troz >> b/bar/orange/fly/gnat.py
572 572 $ hg ci -m troz
573 573
574 574 Verify works
575 575 $ hg verify
576 576 checking changesets
577 577 checking manifests
578 578 checking directory manifests
579 579 crosschecking files in changesets and manifests
580 580 checking files
581 581 checked 4 changesets with 18 changes to 8 files
582 582
583 583 #if repofncache
584 584 Dirlogs are included in fncache
585 585 $ grep meta/.A/00manifest.i .hg/store/fncache
586 586 meta/.A/00manifest.i
587 587
588 588 Rebuilt fncache includes dirlogs
589 589 $ rm .hg/store/fncache
590 590 $ hg debugrebuildfncache
591 591 adding data/.A/one.txt.i
592 592 adding data/.A/two.txt.i
593 593 adding data/b/bar/fruits.txt.i
594 594 adding data/b/bar/orange/fly/gnat.py.i
595 595 adding data/b/bar/orange/fly/housefly.txt.i
596 596 adding data/b/foo/apple/bees/flower.py.i
597 597 adding data/c.txt.i
598 598 adding data/d.py.i
599 599 adding meta/.A/00manifest.i
600 600 adding meta/b/00manifest.i
601 601 adding meta/b/bar/00manifest.i
602 602 adding meta/b/bar/orange/00manifest.i
603 603 adding meta/b/bar/orange/fly/00manifest.i
604 604 adding meta/b/foo/00manifest.i
605 605 adding meta/b/foo/apple/00manifest.i
606 606 adding meta/b/foo/apple/bees/00manifest.i
607 607 16 items added, 0 removed from fncache
608 608 #endif
609 609
610 610 Finish first server
611 611 $ killdaemons.py
612 612
613 613 Back up the recently added revlogs
614 614 $ cp -R .hg/store .hg/store-newcopy
615 615
616 616 Verify reports missing dirlog
617 617 $ rm .hg/store/meta/b/00manifest.*
618 618 $ hg verify
619 619 checking changesets
620 620 checking manifests
621 621 checking directory manifests
622 622 0: empty or missing b/
623 623 b/@0: parent-directory manifest refers to unknown revision 67688a370455
624 624 b/@1: parent-directory manifest refers to unknown revision f065da70369e
625 625 b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
626 626 b/@3: parent-directory manifest refers to unknown revision 367152e6af28
627 627 warning: orphan data file 'meta/b/bar/00manifest.i' (reporevlogstore !)
628 628 warning: orphan data file 'meta/b/bar/orange/00manifest.i' (reporevlogstore !)
629 629 warning: orphan data file 'meta/b/bar/orange/fly/00manifest.i' (reporevlogstore !)
630 630 warning: orphan data file 'meta/b/foo/00manifest.i' (reporevlogstore !)
631 631 warning: orphan data file 'meta/b/foo/apple/00manifest.i' (reporevlogstore !)
632 632 warning: orphan data file 'meta/b/foo/apple/bees/00manifest.i' (reporevlogstore !)
633 633 crosschecking files in changesets and manifests
634 634 b/bar/fruits.txt@0: in changeset but not in manifest
635 635 b/bar/orange/fly/gnat.py@0: in changeset but not in manifest
636 636 b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
637 637 b/foo/apple/bees/flower.py@0: in changeset but not in manifest
638 638 checking files
639 639 checked 4 changesets with 18 changes to 8 files
640 640 6 warnings encountered! (reporevlogstore !)
641 641 9 integrity errors encountered!
642 642 (first damaged changeset appears to be 0)
643 643 [1]
644 644 $ cp -R .hg/store-newcopy/. .hg/store
645 645
646 646 Verify reports missing dirlog entry
647 647 $ mv -f .hg/store-copy/meta/b/00manifest.* .hg/store/meta/b/
648 648 $ hg verify
649 649 checking changesets
650 650 checking manifests
651 651 checking directory manifests
652 652 b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
653 653 b/@3: parent-directory manifest refers to unknown revision 367152e6af28
654 654 b/bar/@?: rev 2 points to unexpected changeset 2
655 655 b/bar/@?: 44d7e1146e0d not in parent-directory manifest
656 656 b/bar/@?: rev 3 points to unexpected changeset 3
657 657 b/bar/@?: 70b10c6b17b7 not in parent-directory manifest
658 658 b/bar/orange/@?: rev 2 points to unexpected changeset 3
659 659 (expected None)
660 660 b/bar/orange/fly/@?: rev 2 points to unexpected changeset 3
661 661 (expected None)
662 662 crosschecking files in changesets and manifests
663 663 checking files
664 664 checked 4 changesets with 18 changes to 8 files
665 665 2 warnings encountered!
666 666 8 integrity errors encountered!
667 667 (first damaged changeset appears to be 2)
668 668 [1]
669 669 $ cp -R .hg/store-newcopy/. .hg/store
670 670
671 671 Test cloning a treemanifest repo over http.
672 672 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
673 673 $ cat hg.pid >> $DAEMON_PIDS
674 674 $ cd ..
675 675 We can clone even with the knob turned off and we'll get a treemanifest repo.
676 676 $ hg clone --config experimental.treemanifest=False \
677 677 > --config experimental.changegroup3=True \
678 678 > http://localhost:$HGPORT deepclone
679 679 requesting all changes
680 680 adding changesets
681 681 adding manifests
682 682 adding file changes
683 683 added 4 changesets with 18 changes to 8 files
684 684 new changesets 775704be6f52:523e5c631710
685 685 updating to branch default
686 686 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
687 687 No server errors.
688 688 $ cat deeprepo/errors.log
689 689 requires got updated to include treemanifest
690 690 $ cat deepclone/.hg/requires | grep treemanifest
691 691 treemanifest
692 692 Tree manifest revlogs exist.
693 693 $ find deepclone/.hg/store/meta | sort
694 694 deepclone/.hg/store/meta
695 695 deepclone/.hg/store/meta/._a (reposimplestore !)
696 696 deepclone/.hg/store/meta/._a/00manifest.i (reposimplestore !)
697 697 deepclone/.hg/store/meta/b
698 698 deepclone/.hg/store/meta/b/00manifest.i
699 699 deepclone/.hg/store/meta/b/bar
700 700 deepclone/.hg/store/meta/b/bar/00manifest.i
701 701 deepclone/.hg/store/meta/b/bar/orange
702 702 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
703 703 deepclone/.hg/store/meta/b/bar/orange/fly
704 704 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
705 705 deepclone/.hg/store/meta/b/foo
706 706 deepclone/.hg/store/meta/b/foo/00manifest.i
707 707 deepclone/.hg/store/meta/b/foo/apple
708 708 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
709 709 deepclone/.hg/store/meta/b/foo/apple/bees
710 710 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
711 711 deepclone/.hg/store/meta/~2e_a (reporevlogstore !)
712 712 deepclone/.hg/store/meta/~2e_a/00manifest.i (reporevlogstore !)
713 713 Verify passes.
714 714 $ cd deepclone
715 715 $ hg verify
716 716 checking changesets
717 717 checking manifests
718 718 checking directory manifests
719 719 crosschecking files in changesets and manifests
720 720 checking files
721 721 checked 4 changesets with 18 changes to 8 files
722 722 $ cd ..
723 723
724 724 #if reporevlogstore
725 725 Create clones using old repo formats to use in later tests
726 726 $ hg clone --config format.usestore=False \
727 727 > --config experimental.changegroup3=True \
728 728 > http://localhost:$HGPORT deeprepo-basicstore
729 729 requesting all changes
730 730 adding changesets
731 731 adding manifests
732 732 adding file changes
733 733 added 4 changesets with 18 changes to 8 files
734 734 new changesets 775704be6f52:523e5c631710
735 735 updating to branch default
736 736 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
737 737 $ cd deeprepo-basicstore
738 738 $ grep store .hg/requires
739 739 [1]
740 740 $ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log
741 741 $ cat hg.pid >> $DAEMON_PIDS
742 742 $ cd ..
743 743 $ hg clone --config format.usefncache=False \
744 744 > --config experimental.changegroup3=True \
745 745 > http://localhost:$HGPORT deeprepo-encodedstore
746 746 requesting all changes
747 747 adding changesets
748 748 adding manifests
749 749 adding file changes
750 750 added 4 changesets with 18 changes to 8 files
751 751 new changesets 775704be6f52:523e5c631710
752 752 updating to branch default
753 753 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
754 754 $ cd deeprepo-encodedstore
755 755 $ grep fncache .hg/requires
756 756 [1]
757 757 $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log
758 758 $ cat hg.pid >> $DAEMON_PIDS
759 759 $ cd ..
760 760
761 761 Local clone with basicstore
762 762 $ hg clone -U deeprepo-basicstore local-clone-basicstore
763 763 $ hg -R local-clone-basicstore verify
764 764 checking changesets
765 765 checking manifests
766 766 checking directory manifests
767 767 crosschecking files in changesets and manifests
768 768 checking files
769 769 checked 4 changesets with 18 changes to 8 files
770 770
771 771 Local clone with encodedstore
772 772 $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
773 773 $ hg -R local-clone-encodedstore verify
774 774 checking changesets
775 775 checking manifests
776 776 checking directory manifests
777 777 crosschecking files in changesets and manifests
778 778 checking files
779 779 checked 4 changesets with 18 changes to 8 files
780 780
781 781 Local clone with fncachestore
782 782 $ hg clone -U deeprepo local-clone-fncachestore
783 783 $ hg -R local-clone-fncachestore verify
784 784 checking changesets
785 785 checking manifests
786 786 checking directory manifests
787 787 crosschecking files in changesets and manifests
788 788 checking files
789 789 checked 4 changesets with 18 changes to 8 files
790 790
791 791 Stream clone with basicstore
792 792 $ hg clone --config experimental.changegroup3=True --stream -U \
793 793 > http://localhost:$HGPORT1 stream-clone-basicstore
794 794 streaming all changes
795 29 files to transfer, * of data (glob)
795 28 files to transfer, * of data (glob)
796 796 transferred * in * seconds (*) (glob)
797 797 $ hg -R stream-clone-basicstore verify
798 798 checking changesets
799 799 checking manifests
800 800 checking directory manifests
801 801 crosschecking files in changesets and manifests
802 802 checking files
803 803 checked 4 changesets with 18 changes to 8 files
804 804
805 805 Stream clone with encodedstore
806 806 $ hg clone --config experimental.changegroup3=True --stream -U \
807 807 > http://localhost:$HGPORT2 stream-clone-encodedstore
808 808 streaming all changes
809 29 files to transfer, * of data (glob)
809 28 files to transfer, * of data (glob)
810 810 transferred * in * seconds (*) (glob)
811 811 $ hg -R stream-clone-encodedstore verify
812 812 checking changesets
813 813 checking manifests
814 814 checking directory manifests
815 815 crosschecking files in changesets and manifests
816 816 checking files
817 817 checked 4 changesets with 18 changes to 8 files
818 818
819 819 Stream clone with fncachestore
820 820 $ hg clone --config experimental.changegroup3=True --stream -U \
821 821 > http://localhost:$HGPORT stream-clone-fncachestore
822 822 streaming all changes
823 823 22 files to transfer, * of data (glob)
824 824 transferred * in * seconds (*) (glob)
825 825 $ hg -R stream-clone-fncachestore verify
826 826 checking changesets
827 827 checking manifests
828 828 checking directory manifests
829 829 crosschecking files in changesets and manifests
830 830 checking files
831 831 checked 4 changesets with 18 changes to 8 files
832 832
833 833 Packed bundle
834 834 $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
835 835 writing 5330 bytes for 18 files (no-zstd !)
836 836 writing 5400 bytes for 18 files (zstd !)
837 837 bundle requirements: generaldelta, revlogv1, sparserevlog, treemanifest (no-rust !)
838 838 bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog, treemanifest (rust !)
839 839 $ hg debugbundle --spec repo-packed.hg
840 840 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog%2Ctreemanifest (no-rust !)
841 841 none-packed1;requirements%3Dgeneraldelta%2Cpersistent-nodemap%2Crevlogv1%2Csparserevlog%2Ctreemanifest (rust !)
842 842
843 843 #endif
844 844
845 845 Bundle with changegroup2 is not supported
846 846
847 847 $ hg -R deeprepo bundle --all -t v2 deeprepo.bundle
848 848 abort: repository does not support bundle version 02
849 849 [255]
850 850
851 851 Pull does not include changegroup for manifest the client already has from
852 852 other branch
853 853
854 854 $ mkdir grafted-dir-repo
855 855 $ cd grafted-dir-repo
856 856 $ hg --config experimental.treemanifest=1 init
857 857 $ mkdir dir
858 858 $ echo a > dir/file
859 859 $ echo a > file
860 860 $ hg ci -Am initial
861 861 adding dir/file
862 862 adding file
863 863 $ echo b > dir/file
864 864 $ hg ci -m updated
865 865 $ hg co '.^'
866 866 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
867 867 $ hg revert -r tip dir/
868 868 reverting dir/file
869 869 $ echo b > file # to make sure root manifest is sent
870 870 $ hg ci -m grafted
871 871 created new head
872 872 $ cd ..
873 873
874 874 $ hg --config experimental.treemanifest=1 clone --pull -r 1 \
875 875 > grafted-dir-repo grafted-dir-repo-clone
876 876 adding changesets
877 877 adding manifests
878 878 adding file changes
879 879 added 2 changesets with 3 changes to 2 files
880 880 new changesets d84f4c419457:09ab742f3b0f
881 881 updating to branch default
882 882 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
883 883 $ cd grafted-dir-repo-clone
884 884 $ hg pull -r 2
885 885 pulling from $TESTTMP/grafted-dir-repo
886 886 searching for changes
887 887 adding changesets
888 888 adding manifests
889 889 adding file changes
890 890 added 1 changesets with 1 changes to 1 files (+1 heads)
891 891 new changesets 73699489fb7c
892 892 (run 'hg heads' to see heads, 'hg merge' to merge)
893 893
894 894 Committing a empty commit does not duplicate root treemanifest
895 895 $ echo z >> z
896 896 $ hg commit -Aqm 'pre-empty commit'
897 897 $ hg rm z
898 898 $ hg commit --amend -m 'empty commit'
899 899 saved backup bundle to $TESTTMP/grafted-dir-repo-clone/.hg/strip-backup/cb99d5717cea-9e3b6b02-amend.hg
900 900 $ hg log -r 'tip + tip^' -T '{manifest}\n'
901 901 1:678d3574b88c
902 902 1:678d3574b88c
903 903 $ hg --config extensions.strip= strip -r . -q
General Comments 0
You need to be logged in to leave comments. Login now