##// END OF EJS Templates
sparse: lock the store when updating requirements config...
Arseniy Alekseyev -
r49326:b74ee41a stable
parent child Browse files
Show More
@@ -1,1591 +1,1608 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 import weakref
15 16
16 17 from .i18n import _
17 18 from .node import (
18 19 hex,
19 20 sha1nodeconstants,
20 21 short,
21 22 )
22 23 from .pycompat import getattr
23 24
24 25 from . import (
25 26 bookmarks,
26 27 bundlerepo,
27 28 cmdutil,
28 29 destutil,
29 30 discovery,
30 31 error,
31 32 exchange,
32 33 extensions,
33 34 graphmod,
34 35 httppeer,
35 36 localrepo,
36 37 lock,
37 38 logcmdutil,
38 39 logexchange,
39 40 merge as mergemod,
40 41 mergestate as mergestatemod,
41 42 narrowspec,
42 43 phases,
43 44 requirements,
44 45 scmutil,
45 46 sshpeer,
46 47 statichttprepo,
47 48 ui as uimod,
48 49 unionrepo,
49 50 url,
50 51 util,
51 52 verify as verifymod,
52 53 vfs as vfsmod,
53 54 )
54 55 from .interfaces import repository as repositorymod
55 56 from .utils import (
56 57 hashutil,
57 58 stringutil,
58 59 urlutil,
59 60 )
60 61
61 62
62 63 release = lock.release
63 64
64 65 # shared features
65 66 sharedbookmarks = b'bookmarks'
66 67
67 68
68 69 def _local(path):
69 70 path = util.expandpath(urlutil.urllocalpath(path))
70 71
71 72 try:
72 73 # we use os.stat() directly here instead of os.path.isfile()
73 74 # because the latter started returning `False` on invalid path
74 75 # exceptions starting in 3.8 and we care about handling
75 76 # invalid paths specially here.
76 77 st = os.stat(path)
77 78 isfile = stat.S_ISREG(st.st_mode)
78 79 # Python 2 raises TypeError, Python 3 ValueError.
79 80 except (TypeError, ValueError) as e:
80 81 raise error.Abort(
81 82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 83 )
83 84 except OSError:
84 85 isfile = False
85 86
86 87 return isfile and bundlerepo or localrepo
87 88
88 89
89 90 def addbranchrevs(lrepo, other, branches, revs):
90 91 peer = other.peer() # a courtesy to callers using a localrepo for other
91 92 hashbranch, branches = branches
92 93 if not hashbranch and not branches:
93 94 x = revs or None
94 95 if revs:
95 96 y = revs[0]
96 97 else:
97 98 y = None
98 99 return x, y
99 100 if revs:
100 101 revs = list(revs)
101 102 else:
102 103 revs = []
103 104
104 105 if not peer.capable(b'branchmap'):
105 106 if branches:
106 107 raise error.Abort(_(b"remote branch lookup not supported"))
107 108 revs.append(hashbranch)
108 109 return revs, revs[0]
109 110
110 111 with peer.commandexecutor() as e:
111 112 branchmap = e.callcommand(b'branchmap', {}).result()
112 113
113 114 def primary(branch):
114 115 if branch == b'.':
115 116 if not lrepo:
116 117 raise error.Abort(_(b"dirstate branch not accessible"))
117 118 branch = lrepo.dirstate.branch()
118 119 if branch in branchmap:
119 120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 121 return True
121 122 else:
122 123 return False
123 124
124 125 for branch in branches:
125 126 if not primary(branch):
126 127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 128 if hashbranch:
128 129 if not primary(hashbranch):
129 130 revs.append(hashbranch)
130 131 return revs, revs[0]
131 132
132 133
133 134 def parseurl(path, branches=None):
134 135 '''parse url#branch, returning (url, (branch, branches))'''
135 136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 138 return urlutil.parseurl(path, branches=branches)
138 139
139 140
140 141 schemes = {
141 142 b'bundle': bundlerepo,
142 143 b'union': unionrepo,
143 144 b'file': _local,
144 145 b'http': httppeer,
145 146 b'https': httppeer,
146 147 b'ssh': sshpeer,
147 148 b'static-http': statichttprepo,
148 149 }
149 150
150 151
151 152 def _peerlookup(path):
152 153 u = urlutil.url(path)
153 154 scheme = u.scheme or b'file'
154 155 thing = schemes.get(scheme) or schemes[b'file']
155 156 try:
156 157 return thing(path)
157 158 except TypeError:
158 159 # we can't test callable(thing) because 'thing' can be an unloaded
159 160 # module that implements __call__
160 161 if not util.safehasattr(thing, b'instance'):
161 162 raise
162 163 return thing
163 164
164 165
165 166 def islocal(repo):
166 167 '''return true if repo (or path pointing to repo) is local'''
167 168 if isinstance(repo, bytes):
168 169 try:
169 170 return _peerlookup(repo).islocal(repo)
170 171 except AttributeError:
171 172 return False
172 173 return repo.local()
173 174
174 175
175 176 def openpath(ui, path, sendaccept=True):
176 177 '''open path with open if local, url.open if remote'''
177 178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 179 if pathurl.islocal():
179 180 return util.posixfile(pathurl.localpath(), b'rb')
180 181 else:
181 182 return url.open(ui, path, sendaccept=sendaccept)
182 183
183 184
184 185 # a list of (ui, repo) functions called for wire peer initialization
185 186 wirepeersetupfuncs = []
186 187
187 188
188 189 def _peerorrepo(
189 190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 191 ):
191 192 """return a repository object for the specified path"""
192 193 obj = _peerlookup(path).instance(
193 194 ui, path, create, intents=intents, createopts=createopts
194 195 )
195 196 ui = getattr(obj, "ui", ui)
196 197 for f in presetupfuncs or []:
197 198 f(ui, obj)
198 199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 200 with util.timedcm('all reposetup') as allreposetupstats:
200 201 for name, module in extensions.extensions(ui):
201 202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 203 hook = getattr(module, 'reposetup', None)
203 204 if hook:
204 205 with util.timedcm('reposetup %r', name) as stats:
205 206 hook(ui, obj)
206 207 ui.log(
207 208 b'extension', b' > reposetup for %s took %s\n', name, stats
208 209 )
209 210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 211 if not obj.local():
211 212 for f in wirepeersetupfuncs:
212 213 f(ui, obj)
213 214 return obj
214 215
215 216
216 217 def repository(
217 218 ui,
218 219 path=b'',
219 220 create=False,
220 221 presetupfuncs=None,
221 222 intents=None,
222 223 createopts=None,
223 224 ):
224 225 """return a repository object for the specified path"""
225 226 peer = _peerorrepo(
226 227 ui,
227 228 path,
228 229 create,
229 230 presetupfuncs=presetupfuncs,
230 231 intents=intents,
231 232 createopts=createopts,
232 233 )
233 234 repo = peer.local()
234 235 if not repo:
235 236 raise error.Abort(
236 237 _(b"repository '%s' is not local") % (path or peer.url())
237 238 )
238 239 return repo.filtered(b'visible')
239 240
240 241
241 242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 243 '''return a repository peer for the specified path'''
243 244 rui = remoteui(uiorrepo, opts)
244 245 return _peerorrepo(
245 246 rui, path, create, intents=intents, createopts=createopts
246 247 ).peer()
247 248
248 249
249 250 def defaultdest(source):
250 251 """return default destination of clone if none is given
251 252
252 253 >>> defaultdest(b'foo')
253 254 'foo'
254 255 >>> defaultdest(b'/foo/bar')
255 256 'bar'
256 257 >>> defaultdest(b'/')
257 258 ''
258 259 >>> defaultdest(b'')
259 260 ''
260 261 >>> defaultdest(b'http://example.org/')
261 262 ''
262 263 >>> defaultdest(b'http://example.org/foo/')
263 264 'foo'
264 265 """
265 266 path = urlutil.url(source).path
266 267 if not path:
267 268 return b''
268 269 return os.path.basename(os.path.normpath(path))
269 270
270 271
271 272 def sharedreposource(repo):
272 273 """Returns repository object for source repository of a shared repo.
273 274
274 275 If repo is not a shared repository, returns None.
275 276 """
276 277 if repo.sharedpath == repo.path:
277 278 return None
278 279
279 280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 281 return repo.srcrepo
281 282
282 283 # the sharedpath always ends in the .hg; we want the path to the repo
283 284 source = repo.vfs.split(repo.sharedpath)[0]
284 285 srcurl, branches = urlutil.parseurl(source)
285 286 srcrepo = repository(repo.ui, srcurl)
286 287 repo.srcrepo = srcrepo
287 288 return srcrepo
288 289
289 290
290 291 def share(
291 292 ui,
292 293 source,
293 294 dest=None,
294 295 update=True,
295 296 bookmarks=True,
296 297 defaultpath=None,
297 298 relative=False,
298 299 ):
299 300 '''create a shared repository'''
300 301
301 302 if not islocal(source):
302 303 raise error.Abort(_(b'can only share local repositories'))
303 304
304 305 if not dest:
305 306 dest = defaultdest(source)
306 307 else:
307 308 dest = urlutil.get_clone_path(ui, dest)[1]
308 309
309 310 if isinstance(source, bytes):
310 311 origsource, source, branches = urlutil.get_clone_path(ui, source)
311 312 srcrepo = repository(ui, source)
312 313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 314 else:
314 315 srcrepo = source.local()
315 316 checkout = None
316 317
317 318 shareditems = set()
318 319 if bookmarks:
319 320 shareditems.add(sharedbookmarks)
320 321
321 322 r = repository(
322 323 ui,
323 324 dest,
324 325 create=True,
325 326 createopts={
326 327 b'sharedrepo': srcrepo,
327 328 b'sharedrelative': relative,
328 329 b'shareditems': shareditems,
329 330 },
330 331 )
331 332
332 333 postshare(srcrepo, r, defaultpath=defaultpath)
333 334 r = repository(ui, dest)
334 335 _postshareupdate(r, update, checkout=checkout)
335 336 return r
336 337
337 338
338 339 def _prependsourcehgrc(repo):
339 340 """copies the source repo config and prepend it in current repo .hg/hgrc
340 341 on unshare. This is only done if the share was perfomed using share safe
341 342 method where we share config of source in shares"""
342 343 srcvfs = vfsmod.vfs(repo.sharedpath)
343 344 dstvfs = vfsmod.vfs(repo.path)
344 345
345 346 if not srcvfs.exists(b'hgrc'):
346 347 return
347 348
348 349 currentconfig = b''
349 350 if dstvfs.exists(b'hgrc'):
350 351 currentconfig = dstvfs.read(b'hgrc')
351 352
352 353 with dstvfs(b'hgrc', b'wb') as fp:
353 354 sourceconfig = srcvfs.read(b'hgrc')
354 355 fp.write(b"# Config copied from shared source\n")
355 356 fp.write(sourceconfig)
356 357 fp.write(b'\n')
357 358 fp.write(currentconfig)
358 359
359 360
360 361 def unshare(ui, repo):
361 362 """convert a shared repository to a normal one
362 363
363 364 Copy the store data to the repo and remove the sharedpath data.
364 365
365 366 Returns a new repository object representing the unshared repository.
366 367
367 368 The passed repository object is not usable after this function is
368 369 called.
369 370 """
370 371
371 372 with repo.lock():
372 373 # we use locks here because if we race with commit, we
373 374 # can end up with extra data in the cloned revlogs that's
374 375 # not pointed to by changesets, thus causing verify to
375 376 # fail
376 377 destlock = copystore(ui, repo, repo.path)
377 378 with destlock or util.nullcontextmanager():
378 379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 380 # we were sharing .hg/hgrc of the share source with the current
380 381 # repo. We need to copy that while unsharing otherwise it can
381 382 # disable hooks and other checks
382 383 _prependsourcehgrc(repo)
383 384
384 385 sharefile = repo.vfs.join(b'sharedpath')
385 386 util.rename(sharefile, sharefile + b'.old')
386 387
387 388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 390 scmutil.writereporequirements(repo)
390 391
391 392 # Removing share changes some fundamental properties of the repo instance.
392 393 # So we instantiate a new repo object and operate on it rather than
393 394 # try to keep the existing repo usable.
394 395 newrepo = repository(repo.baseui, repo.root, create=False)
395 396
396 397 # TODO: figure out how to access subrepos that exist, but were previously
397 398 # removed from .hgsub
398 399 c = newrepo[b'.']
399 400 subs = c.substate
400 401 for s in sorted(subs):
401 402 c.sub(s).unshare()
402 403
403 404 localrepo.poisonrepository(repo)
404 405
405 406 return newrepo
406 407
407 408
408 409 def postshare(sourcerepo, destrepo, defaultpath=None):
409 410 """Called after a new shared repo is created.
410 411
411 412 The new repo only has a requirements file and pointer to the source.
412 413 This function configures additional shared data.
413 414
414 415 Extensions can wrap this function and write additional entries to
415 416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 417 """
417 418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 419 if default:
419 420 template = b'[paths]\ndefault = %s\n'
420 421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 423 with destrepo.wlock():
423 424 narrowspec.copytoworkingcopy(destrepo)
424 425
425 426
426 427 def _postshareupdate(repo, update, checkout=None):
427 428 """Maybe perform a working directory update after a shared repo is created.
428 429
429 430 ``update`` can be a boolean or a revision to update to.
430 431 """
431 432 if not update:
432 433 return
433 434
434 435 repo.ui.status(_(b"updating working directory\n"))
435 436 if update is not True:
436 437 checkout = update
437 438 for test in (checkout, b'default', b'tip'):
438 439 if test is None:
439 440 continue
440 441 try:
441 442 uprev = repo.lookup(test)
442 443 break
443 444 except error.RepoLookupError:
444 445 continue
445 446 _update(repo, uprev)
446 447
447 448
448 449 def copystore(ui, srcrepo, destpath):
449 450 """copy files from store of srcrepo in destpath
450 451
451 452 returns destlock
452 453 """
453 454 destlock = None
454 455 try:
455 456 hardlink = None
456 457 topic = _(b'linking') if hardlink else _(b'copying')
457 458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 459 num = 0
459 460 srcpublishing = srcrepo.publishing()
460 461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 462 dstvfs = vfsmod.vfs(destpath)
462 463 for f in srcrepo.store.copylist():
463 464 if srcpublishing and f.endswith(b'phaseroots'):
464 465 continue
465 466 dstbase = os.path.dirname(f)
466 467 if dstbase and not dstvfs.exists(dstbase):
467 468 dstvfs.mkdir(dstbase)
468 469 if srcvfs.exists(f):
469 470 if f.endswith(b'data'):
470 471 # 'dstbase' may be empty (e.g. revlog format 0)
471 472 lockfile = os.path.join(dstbase, b"lock")
472 473 # lock to avoid premature writing to the target
473 474 destlock = lock.lock(dstvfs, lockfile)
474 475 hardlink, n = util.copyfiles(
475 476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 477 )
477 478 num += n
478 479 if hardlink:
479 480 ui.debug(b"linked %d files\n" % num)
480 481 else:
481 482 ui.debug(b"copied %d files\n" % num)
482 483 return destlock
483 484 except: # re-raises
484 485 release(destlock)
485 486 raise
486 487
487 488
488 489 def clonewithshare(
489 490 ui,
490 491 peeropts,
491 492 sharepath,
492 493 source,
493 494 srcpeer,
494 495 dest,
495 496 pull=False,
496 497 rev=None,
497 498 update=True,
498 499 stream=False,
499 500 ):
500 501 """Perform a clone using a shared repo.
501 502
502 503 The store for the repository will be located at <sharepath>/.hg. The
503 504 specified revisions will be cloned or pulled from "source". A shared repo
504 505 will be created at "dest" and a working copy will be created if "update" is
505 506 True.
506 507 """
507 508 revs = None
508 509 if rev:
509 510 if not srcpeer.capable(b'lookup'):
510 511 raise error.Abort(
511 512 _(
512 513 b"src repository does not support "
513 514 b"revision lookup and so doesn't "
514 515 b"support clone by revision"
515 516 )
516 517 )
517 518
518 519 # TODO this is batchable.
519 520 remoterevs = []
520 521 for r in rev:
521 522 with srcpeer.commandexecutor() as e:
522 523 remoterevs.append(
523 524 e.callcommand(
524 525 b'lookup',
525 526 {
526 527 b'key': r,
527 528 },
528 529 ).result()
529 530 )
530 531 revs = remoterevs
531 532
532 533 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 534 # 2 clients may race creating or populating it.
534 535 pooldir = os.path.dirname(sharepath)
535 536 # lock class requires the directory to exist.
536 537 try:
537 538 util.makedir(pooldir, False)
538 539 except OSError as e:
539 540 if e.errno != errno.EEXIST:
540 541 raise
541 542
542 543 poolvfs = vfsmod.vfs(pooldir)
543 544 basename = os.path.basename(sharepath)
544 545
545 546 with lock.lock(poolvfs, b'%s.lock' % basename):
546 547 if os.path.exists(sharepath):
547 548 ui.status(
548 549 _(b'(sharing from existing pooled repository %s)\n') % basename
549 550 )
550 551 else:
551 552 ui.status(
552 553 _(b'(sharing from new pooled repository %s)\n') % basename
553 554 )
554 555 # Always use pull mode because hardlinks in share mode don't work
555 556 # well. Never update because working copies aren't necessary in
556 557 # share mode.
557 558 clone(
558 559 ui,
559 560 peeropts,
560 561 source,
561 562 dest=sharepath,
562 563 pull=True,
563 564 revs=rev,
564 565 update=False,
565 566 stream=stream,
566 567 )
567 568
568 569 # Resolve the value to put in [paths] section for the source.
569 570 if islocal(source):
570 571 defaultpath = util.abspath(urlutil.urllocalpath(source))
571 572 else:
572 573 defaultpath = source
573 574
574 575 sharerepo = repository(ui, path=sharepath)
575 576 destrepo = share(
576 577 ui,
577 578 sharerepo,
578 579 dest=dest,
579 580 update=False,
580 581 bookmarks=False,
581 582 defaultpath=defaultpath,
582 583 )
583 584
584 585 # We need to perform a pull against the dest repo to fetch bookmarks
585 586 # and other non-store data that isn't shared by default. In the case of
586 587 # non-existing shared repo, this means we pull from the remote twice. This
587 588 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 589 # way to pull just non-changegroup data.
589 590 exchange.pull(destrepo, srcpeer, heads=revs)
590 591
591 592 _postshareupdate(destrepo, update)
592 593
593 594 return srcpeer, peer(ui, peeropts, dest)
594 595
595 596
596 597 # Recomputing caches is often slow on big repos, so copy them.
597 598 def _copycache(srcrepo, dstcachedir, fname):
598 599 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 600 srcfname = srcrepo.cachevfs.join(fname)
600 601 dstfname = os.path.join(dstcachedir, fname)
601 602 if os.path.exists(srcfname):
602 603 if not os.path.exists(dstcachedir):
603 604 os.mkdir(dstcachedir)
604 605 util.copyfile(srcfname, dstfname)
605 606
606 607
607 608 def clone(
608 609 ui,
609 610 peeropts,
610 611 source,
611 612 dest=None,
612 613 pull=False,
613 614 revs=None,
614 615 update=True,
615 616 stream=False,
616 617 branch=None,
617 618 shareopts=None,
618 619 storeincludepats=None,
619 620 storeexcludepats=None,
620 621 depth=None,
621 622 ):
622 623 """Make a copy of an existing repository.
623 624
624 625 Create a copy of an existing repository in a new directory. The
625 626 source and destination are URLs, as passed to the repository
626 627 function. Returns a pair of repository peers, the source and
627 628 newly created destination.
628 629
629 630 The location of the source is added to the new repository's
630 631 .hg/hgrc file, as the default to be used for future pulls and
631 632 pushes.
632 633
633 634 If an exception is raised, the partly cloned/updated destination
634 635 repository will be deleted.
635 636
636 637 Arguments:
637 638
638 639 source: repository object or URL
639 640
640 641 dest: URL of destination repository to create (defaults to base
641 642 name of source repository)
642 643
643 644 pull: always pull from source repository, even in local case or if the
644 645 server prefers streaming
645 646
646 647 stream: stream raw data uncompressed from repository (fast over
647 648 LAN, slow over WAN)
648 649
649 650 revs: revision to clone up to (implies pull=True)
650 651
651 652 update: update working directory after clone completes, if
652 653 destination is local repository (True means update to default rev,
653 654 anything else is treated as a revision)
654 655
655 656 branch: branches to clone
656 657
657 658 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 659 activates auto sharing mode and defines the directory for stores. The
659 660 "mode" key determines how to construct the directory name of the shared
660 661 repository. "identity" means the name is derived from the node of the first
661 662 changeset in the repository. "remote" means the name is derived from the
662 663 remote's path/URL. Defaults to "identity."
663 664
664 665 storeincludepats and storeexcludepats: sets of file patterns to include and
665 666 exclude in the repository copy, respectively. If not defined, all files
666 667 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 668 only the requested files will be performed. If ``storeincludepats`` is not
668 669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 670 ``path:.``. If both are empty sets, no files will be cloned.
670 671 """
671 672
672 673 if isinstance(source, bytes):
673 674 src = urlutil.get_clone_path(ui, source, branch)
674 675 origsource, source, branches = src
675 676 srcpeer = peer(ui, peeropts, source)
676 677 else:
677 678 srcpeer = source.peer() # in case we were called with a localrepo
678 679 branches = (None, branch or [])
679 680 origsource = source = srcpeer.url()
680 srclock = destlock = cleandir = None
681 srclock = destlock = destwlock = cleandir = None
681 682 destpeer = None
682 683 try:
683 684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 685
685 686 if dest is None:
686 687 dest = defaultdest(source)
687 688 if dest:
688 689 ui.status(_(b"destination directory: %s\n") % dest)
689 690 else:
690 691 dest = urlutil.get_clone_path(ui, dest)[0]
691 692
692 693 dest = urlutil.urllocalpath(dest)
693 694 source = urlutil.urllocalpath(source)
694 695
695 696 if not dest:
696 697 raise error.InputError(_(b"empty destination path is not valid"))
697 698
698 699 destvfs = vfsmod.vfs(dest, expandpath=True)
699 700 if destvfs.lexists():
700 701 if not destvfs.isdir():
701 702 raise error.InputError(
702 703 _(b"destination '%s' already exists") % dest
703 704 )
704 705 elif destvfs.listdir():
705 706 raise error.InputError(
706 707 _(b"destination '%s' is not empty") % dest
707 708 )
708 709
709 710 createopts = {}
710 711 narrow = False
711 712
712 713 if storeincludepats is not None:
713 714 narrowspec.validatepatterns(storeincludepats)
714 715 narrow = True
715 716
716 717 if storeexcludepats is not None:
717 718 narrowspec.validatepatterns(storeexcludepats)
718 719 narrow = True
719 720
720 721 if narrow:
721 722 # Include everything by default if only exclusion patterns defined.
722 723 if storeexcludepats and not storeincludepats:
723 724 storeincludepats = {b'path:.'}
724 725
725 726 createopts[b'narrowfiles'] = True
726 727
727 728 if depth:
728 729 createopts[b'shallowfilestore'] = True
729 730
730 731 if srcpeer.capable(b'lfs-serve'):
731 732 # Repository creation honors the config if it disabled the extension, so
732 733 # we can't just announce that lfs will be enabled. This check avoids
733 734 # saying that lfs will be enabled, and then saying it's an unknown
734 735 # feature. The lfs creation option is set in either case so that a
735 736 # requirement is added. If the extension is explicitly disabled but the
736 737 # requirement is set, the clone aborts early, before transferring any
737 738 # data.
738 739 createopts[b'lfs'] = True
739 740
740 741 if extensions.disabled_help(b'lfs'):
741 742 ui.status(
742 743 _(
743 744 b'(remote is using large file support (lfs), but it is '
744 745 b'explicitly disabled in the local configuration)\n'
745 746 )
746 747 )
747 748 else:
748 749 ui.status(
749 750 _(
750 751 b'(remote is using large file support (lfs); lfs will '
751 752 b'be enabled for this repository)\n'
752 753 )
753 754 )
754 755
755 756 shareopts = shareopts or {}
756 757 sharepool = shareopts.get(b'pool')
757 758 sharenamemode = shareopts.get(b'mode')
758 759 if sharepool and islocal(dest):
759 760 sharepath = None
760 761 if sharenamemode == b'identity':
761 762 # Resolve the name from the initial changeset in the remote
762 763 # repository. This returns nullid when the remote is empty. It
763 764 # raises RepoLookupError if revision 0 is filtered or otherwise
764 765 # not available. If we fail to resolve, sharing is not enabled.
765 766 try:
766 767 with srcpeer.commandexecutor() as e:
767 768 rootnode = e.callcommand(
768 769 b'lookup',
769 770 {
770 771 b'key': b'0',
771 772 },
772 773 ).result()
773 774
774 775 if rootnode != sha1nodeconstants.nullid:
775 776 sharepath = os.path.join(sharepool, hex(rootnode))
776 777 else:
777 778 ui.status(
778 779 _(
779 780 b'(not using pooled storage: '
780 781 b'remote appears to be empty)\n'
781 782 )
782 783 )
783 784 except error.RepoLookupError:
784 785 ui.status(
785 786 _(
786 787 b'(not using pooled storage: '
787 788 b'unable to resolve identity of remote)\n'
788 789 )
789 790 )
790 791 elif sharenamemode == b'remote':
791 792 sharepath = os.path.join(
792 793 sharepool, hex(hashutil.sha1(source).digest())
793 794 )
794 795 else:
795 796 raise error.Abort(
796 797 _(b'unknown share naming mode: %s') % sharenamemode
797 798 )
798 799
799 800 # TODO this is a somewhat arbitrary restriction.
800 801 if narrow:
801 802 ui.status(
802 803 _(b'(pooled storage not supported for narrow clones)\n')
803 804 )
804 805 sharepath = None
805 806
806 807 if sharepath:
807 808 return clonewithshare(
808 809 ui,
809 810 peeropts,
810 811 sharepath,
811 812 source,
812 813 srcpeer,
813 814 dest,
814 815 pull=pull,
815 816 rev=revs,
816 817 update=update,
817 818 stream=stream,
818 819 )
819 820
820 821 srcrepo = srcpeer.local()
821 822
822 823 abspath = origsource
823 824 if islocal(origsource):
824 825 abspath = util.abspath(urlutil.urllocalpath(origsource))
825 826
826 827 if islocal(dest):
827 828 if os.path.exists(dest):
828 829 # only clean up directories we create ourselves
829 830 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
830 831 cleandir = hgdir
831 832 else:
832 833 cleandir = dest
833 834
834 835 copy = False
835 836 if (
836 837 srcrepo
837 838 and srcrepo.cancopy()
838 839 and islocal(dest)
839 840 and not phases.hassecret(srcrepo)
840 841 ):
841 842 copy = not pull and not revs
842 843
843 844 # TODO this is a somewhat arbitrary restriction.
844 845 if narrow:
845 846 copy = False
846 847
847 848 if copy:
848 849 try:
849 850 # we use a lock here because if we race with commit, we
850 851 # can end up with extra data in the cloned revlogs that's
851 852 # not pointed to by changesets, thus causing verify to
852 853 # fail
853 854 srclock = srcrepo.lock(wait=False)
854 855 except error.LockError:
855 856 copy = False
856 857
857 858 if copy:
858 859 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
859 860
860 861 destrootpath = urlutil.urllocalpath(dest)
861 862 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
862 863 localrepo.createrepository(
863 864 ui,
864 865 destrootpath,
865 866 requirements=dest_reqs,
866 867 )
867 868 destrepo = localrepo.makelocalrepository(ui, destrootpath)
869
870 destwlock = destrepo.wlock()
868 871 destlock = destrepo.lock()
869 872 from . import streamclone # avoid cycle
870 873
871 874 streamclone.local_copy(srcrepo, destrepo)
872 875
873 876 # we need to re-init the repo after manually copying the data
874 877 # into it
875 878 destpeer = peer(srcrepo, peeropts, dest)
879
880 # make the peer aware that is it already locked
881 #
882 # important:
883 #
884 # We still need to release that lock at the end of the function
885 destpeer.local()._lockref = weakref.ref(destlock)
886 destpeer.local()._wlockref = weakref.ref(destwlock)
887 # dirstate also needs to be copied because `_wlockref` has a reference
888 # to it: this dirstate is saved to disk when the wlock is released
889 destpeer.local().dirstate = destrepo.dirstate
890
876 891 srcrepo.hook(
877 892 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
878 893 )
879 894 else:
880 895 try:
881 896 # only pass ui when no srcrepo
882 897 destpeer = peer(
883 898 srcrepo or ui,
884 899 peeropts,
885 900 dest,
886 901 create=True,
887 902 createopts=createopts,
888 903 )
889 904 except OSError as inst:
890 905 if inst.errno == errno.EEXIST:
891 906 cleandir = None
892 907 raise error.Abort(
893 908 _(b"destination '%s' already exists") % dest
894 909 )
895 910 raise
896 911
897 912 if revs:
898 913 if not srcpeer.capable(b'lookup'):
899 914 raise error.Abort(
900 915 _(
901 916 b"src repository does not support "
902 917 b"revision lookup and so doesn't "
903 918 b"support clone by revision"
904 919 )
905 920 )
906 921
907 922 # TODO this is batchable.
908 923 remoterevs = []
909 924 for rev in revs:
910 925 with srcpeer.commandexecutor() as e:
911 926 remoterevs.append(
912 927 e.callcommand(
913 928 b'lookup',
914 929 {
915 930 b'key': rev,
916 931 },
917 932 ).result()
918 933 )
919 934 revs = remoterevs
920 935
921 936 checkout = revs[0]
922 937 else:
923 938 revs = None
924 939 local = destpeer.local()
925 940 if local:
926 941 if narrow:
927 942 with local.wlock(), local.lock():
928 943 local.setnarrowpats(storeincludepats, storeexcludepats)
929 944 narrowspec.copytoworkingcopy(local)
930 945
931 946 u = urlutil.url(abspath)
932 947 defaulturl = bytes(u)
933 948 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 949 if not stream:
935 950 if pull:
936 951 stream = False
937 952 else:
938 953 stream = None
939 954 # internal config: ui.quietbookmarkmove
940 955 overrides = {(b'ui', b'quietbookmarkmove'): True}
941 956 with local.ui.configoverride(overrides, b'clone'):
942 957 exchange.pull(
943 958 local,
944 959 srcpeer,
945 960 heads=revs,
946 961 streamclonerequested=stream,
947 962 includepats=storeincludepats,
948 963 excludepats=storeexcludepats,
949 964 depth=depth,
950 965 )
951 966 elif srcrepo:
952 967 # TODO lift restriction once exchange.push() accepts narrow
953 968 # push.
954 969 if narrow:
955 970 raise error.Abort(
956 971 _(
957 972 b'narrow clone not available for '
958 973 b'remote destinations'
959 974 )
960 975 )
961 976
962 977 exchange.push(
963 978 srcrepo,
964 979 destpeer,
965 980 revs=revs,
966 981 bookmarks=srcrepo._bookmarks.keys(),
967 982 )
968 983 else:
969 984 raise error.Abort(
970 985 _(b"clone from remote to remote not supported")
971 986 )
972 987
973 988 cleandir = None
974 989
975 990 destrepo = destpeer.local()
976 991 if destrepo:
977 992 template = uimod.samplehgrcs[b'cloned']
978 993 u = urlutil.url(abspath)
979 994 u.passwd = None
980 995 defaulturl = bytes(u)
981 996 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
982 997 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
983 998
984 999 if ui.configbool(b'experimental', b'remotenames'):
985 1000 logexchange.pullremotenames(destrepo, srcpeer)
986 1001
987 1002 if update:
988 1003 if update is not True:
989 1004 with srcpeer.commandexecutor() as e:
990 1005 checkout = e.callcommand(
991 1006 b'lookup',
992 1007 {
993 1008 b'key': update,
994 1009 },
995 1010 ).result()
996 1011
997 1012 uprev = None
998 1013 status = None
999 1014 if checkout is not None:
1000 1015 # Some extensions (at least hg-git and hg-subversion) have
1001 1016 # a peer.lookup() implementation that returns a name instead
1002 1017 # of a nodeid. We work around it here until we've figured
1003 1018 # out a better solution.
1004 1019 if len(checkout) == 20 and checkout in destrepo:
1005 1020 uprev = checkout
1006 1021 elif scmutil.isrevsymbol(destrepo, checkout):
1007 1022 uprev = scmutil.revsymbol(destrepo, checkout).node()
1008 1023 else:
1009 1024 if update is not True:
1010 1025 try:
1011 1026 uprev = destrepo.lookup(update)
1012 1027 except error.RepoLookupError:
1013 1028 pass
1014 1029 if uprev is None:
1015 1030 try:
1016 1031 if destrepo._activebookmark:
1017 1032 uprev = destrepo.lookup(destrepo._activebookmark)
1018 1033 update = destrepo._activebookmark
1019 1034 else:
1020 1035 uprev = destrepo._bookmarks[b'@']
1021 1036 update = b'@'
1022 1037 bn = destrepo[uprev].branch()
1023 1038 if bn == b'default':
1024 1039 status = _(b"updating to bookmark %s\n" % update)
1025 1040 else:
1026 1041 status = (
1027 1042 _(b"updating to bookmark %s on branch %s\n")
1028 1043 ) % (update, bn)
1029 1044 except KeyError:
1030 1045 try:
1031 1046 uprev = destrepo.branchtip(b'default')
1032 1047 except error.RepoLookupError:
1033 1048 uprev = destrepo.lookup(b'tip')
1034 1049 if not status:
1035 1050 bn = destrepo[uprev].branch()
1036 1051 status = _(b"updating to branch %s\n") % bn
1037 1052 destrepo.ui.status(status)
1038 1053 _update(destrepo, uprev)
1039 1054 if update in destrepo._bookmarks:
1040 1055 bookmarks.activate(destrepo, update)
1041 1056 if destlock is not None:
1042 1057 release(destlock)
1058 if destwlock is not None:
1059 release(destlock)
1043 1060 # here is a tiny windows were someone could end up writing the
1044 1061 # repository before the cache are sure to be warm. This is "fine"
1045 1062 # as the only "bad" outcome would be some slowness. That potential
1046 1063 # slowness already affect reader.
1047 1064 with destrepo.lock():
1048 1065 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1049 1066 finally:
1050 release(srclock, destlock)
1067 release(srclock, destlock, destwlock)
1051 1068 if cleandir is not None:
1052 1069 shutil.rmtree(cleandir, True)
1053 1070 if srcpeer is not None:
1054 1071 srcpeer.close()
1055 1072 if destpeer and destpeer.local() is None:
1056 1073 destpeer.close()
1057 1074 return srcpeer, destpeer
1058 1075
1059 1076
1060 1077 def _showstats(repo, stats, quietempty=False):
1061 1078 if quietempty and stats.isempty():
1062 1079 return
1063 1080 repo.ui.status(
1064 1081 _(
1065 1082 b"%d files updated, %d files merged, "
1066 1083 b"%d files removed, %d files unresolved\n"
1067 1084 )
1068 1085 % (
1069 1086 stats.updatedcount,
1070 1087 stats.mergedcount,
1071 1088 stats.removedcount,
1072 1089 stats.unresolvedcount,
1073 1090 )
1074 1091 )
1075 1092
1076 1093
1077 1094 def updaterepo(repo, node, overwrite, updatecheck=None):
1078 1095 """Update the working directory to node.
1079 1096
1080 1097 When overwrite is set, changes are clobbered, merged else
1081 1098
1082 1099 returns stats (see pydoc mercurial.merge.applyupdates)"""
1083 1100 repo.ui.deprecwarn(
1084 1101 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1085 1102 b'5.7',
1086 1103 )
1087 1104 return mergemod._update(
1088 1105 repo,
1089 1106 node,
1090 1107 branchmerge=False,
1091 1108 force=overwrite,
1092 1109 labels=[b'working copy', b'destination'],
1093 1110 updatecheck=updatecheck,
1094 1111 )
1095 1112
1096 1113
1097 1114 def update(repo, node, quietempty=False, updatecheck=None):
1098 1115 """update the working directory to node"""
1099 1116 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1100 1117 _showstats(repo, stats, quietempty)
1101 1118 if stats.unresolvedcount:
1102 1119 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1103 1120 return stats.unresolvedcount > 0
1104 1121
1105 1122
1106 1123 # naming conflict in clone()
1107 1124 _update = update
1108 1125
1109 1126
1110 1127 def clean(repo, node, show_stats=True, quietempty=False):
1111 1128 """forcibly switch the working directory to node, clobbering changes"""
1112 1129 stats = mergemod.clean_update(repo[node])
1113 1130 assert stats.unresolvedcount == 0
1114 1131 if show_stats:
1115 1132 _showstats(repo, stats, quietempty)
1116 1133 return False
1117 1134
1118 1135
1119 1136 # naming conflict in updatetotally()
1120 1137 _clean = clean
1121 1138
1122 1139 _VALID_UPDATECHECKS = {
1123 1140 mergemod.UPDATECHECK_ABORT,
1124 1141 mergemod.UPDATECHECK_NONE,
1125 1142 mergemod.UPDATECHECK_LINEAR,
1126 1143 mergemod.UPDATECHECK_NO_CONFLICT,
1127 1144 }
1128 1145
1129 1146
1130 1147 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1131 1148 """Update the working directory with extra care for non-file components
1132 1149
1133 1150 This takes care of non-file components below:
1134 1151
1135 1152 :bookmark: might be advanced or (in)activated
1136 1153
1137 1154 This takes arguments below:
1138 1155
1139 1156 :checkout: to which revision the working directory is updated
1140 1157 :brev: a name, which might be a bookmark to be activated after updating
1141 1158 :clean: whether changes in the working directory can be discarded
1142 1159 :updatecheck: how to deal with a dirty working directory
1143 1160
1144 1161 Valid values for updatecheck are the UPDATECHECK_* constants
1145 1162 defined in the merge module. Passing `None` will result in using the
1146 1163 configured default.
1147 1164
1148 1165 * ABORT: abort if the working directory is dirty
1149 1166 * NONE: don't check (merge working directory changes into destination)
1150 1167 * LINEAR: check that update is linear before merging working directory
1151 1168 changes into destination
1152 1169 * NO_CONFLICT: check that the update does not result in file merges
1153 1170
1154 1171 This returns whether conflict is detected at updating or not.
1155 1172 """
1156 1173 if updatecheck is None:
1157 1174 updatecheck = ui.config(b'commands', b'update.check')
1158 1175 if updatecheck not in _VALID_UPDATECHECKS:
1159 1176 # If not configured, or invalid value configured
1160 1177 updatecheck = mergemod.UPDATECHECK_LINEAR
1161 1178 if updatecheck not in _VALID_UPDATECHECKS:
1162 1179 raise ValueError(
1163 1180 r'Invalid updatecheck value %r (can accept %r)'
1164 1181 % (updatecheck, _VALID_UPDATECHECKS)
1165 1182 )
1166 1183 with repo.wlock():
1167 1184 movemarkfrom = None
1168 1185 warndest = False
1169 1186 if checkout is None:
1170 1187 updata = destutil.destupdate(repo, clean=clean)
1171 1188 checkout, movemarkfrom, brev = updata
1172 1189 warndest = True
1173 1190
1174 1191 if clean:
1175 1192 ret = _clean(repo, checkout)
1176 1193 else:
1177 1194 if updatecheck == mergemod.UPDATECHECK_ABORT:
1178 1195 cmdutil.bailifchanged(repo, merge=False)
1179 1196 updatecheck = mergemod.UPDATECHECK_NONE
1180 1197 ret = _update(repo, checkout, updatecheck=updatecheck)
1181 1198
1182 1199 if not ret and movemarkfrom:
1183 1200 if movemarkfrom == repo[b'.'].node():
1184 1201 pass # no-op update
1185 1202 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1186 1203 b = ui.label(repo._activebookmark, b'bookmarks.active')
1187 1204 ui.status(_(b"updating bookmark %s\n") % b)
1188 1205 else:
1189 1206 # this can happen with a non-linear update
1190 1207 b = ui.label(repo._activebookmark, b'bookmarks')
1191 1208 ui.status(_(b"(leaving bookmark %s)\n") % b)
1192 1209 bookmarks.deactivate(repo)
1193 1210 elif brev in repo._bookmarks:
1194 1211 if brev != repo._activebookmark:
1195 1212 b = ui.label(brev, b'bookmarks.active')
1196 1213 ui.status(_(b"(activating bookmark %s)\n") % b)
1197 1214 bookmarks.activate(repo, brev)
1198 1215 elif brev:
1199 1216 if repo._activebookmark:
1200 1217 b = ui.label(repo._activebookmark, b'bookmarks')
1201 1218 ui.status(_(b"(leaving bookmark %s)\n") % b)
1202 1219 bookmarks.deactivate(repo)
1203 1220
1204 1221 if warndest:
1205 1222 destutil.statusotherdests(ui, repo)
1206 1223
1207 1224 return ret
1208 1225
1209 1226
1210 1227 def merge(
1211 1228 ctx,
1212 1229 force=False,
1213 1230 remind=True,
1214 1231 labels=None,
1215 1232 ):
1216 1233 """Branch merge with node, resolving changes. Return true if any
1217 1234 unresolved conflicts."""
1218 1235 repo = ctx.repo()
1219 1236 stats = mergemod.merge(ctx, force=force, labels=labels)
1220 1237 _showstats(repo, stats)
1221 1238 if stats.unresolvedcount:
1222 1239 repo.ui.status(
1223 1240 _(
1224 1241 b"use 'hg resolve' to retry unresolved file merges "
1225 1242 b"or 'hg merge --abort' to abandon\n"
1226 1243 )
1227 1244 )
1228 1245 elif remind:
1229 1246 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1230 1247 return stats.unresolvedcount > 0
1231 1248
1232 1249
1233 1250 def abortmerge(ui, repo):
1234 1251 ms = mergestatemod.mergestate.read(repo)
1235 1252 if ms.active():
1236 1253 # there were conflicts
1237 1254 node = ms.localctx.hex()
1238 1255 else:
1239 1256 # there were no conficts, mergestate was not stored
1240 1257 node = repo[b'.'].hex()
1241 1258
1242 1259 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1243 1260 stats = mergemod.clean_update(repo[node])
1244 1261 assert stats.unresolvedcount == 0
1245 1262 _showstats(repo, stats)
1246 1263
1247 1264
1248 1265 def _incoming(
1249 1266 displaychlist,
1250 1267 subreporecurse,
1251 1268 ui,
1252 1269 repo,
1253 1270 source,
1254 1271 opts,
1255 1272 buffered=False,
1256 1273 subpath=None,
1257 1274 ):
1258 1275 """
1259 1276 Helper for incoming / gincoming.
1260 1277 displaychlist gets called with
1261 1278 (remoterepo, incomingchangesetlist, displayer) parameters,
1262 1279 and is supposed to contain only code that can't be unified.
1263 1280 """
1264 1281 srcs = urlutil.get_pull_paths(repo, ui, [source])
1265 1282 srcs = list(srcs)
1266 1283 if len(srcs) != 1:
1267 1284 msg = _(b'for now, incoming supports only a single source, %d provided')
1268 1285 msg %= len(srcs)
1269 1286 raise error.Abort(msg)
1270 1287 path = srcs[0]
1271 1288 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1272 1289 if subpath is not None:
1273 1290 subpath = urlutil.url(subpath)
1274 1291 if subpath.isabs():
1275 1292 source = bytes(subpath)
1276 1293 else:
1277 1294 p = urlutil.url(source)
1278 1295 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1279 1296 source = bytes(p)
1280 1297 other = peer(repo, opts, source)
1281 1298 cleanupfn = other.close
1282 1299 try:
1283 1300 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1284 1301 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1285 1302
1286 1303 if revs:
1287 1304 revs = [other.lookup(rev) for rev in revs]
1288 1305 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1289 1306 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1290 1307 )
1291 1308
1292 1309 if not chlist:
1293 1310 ui.status(_(b"no changes found\n"))
1294 1311 return subreporecurse()
1295 1312 ui.pager(b'incoming')
1296 1313 displayer = logcmdutil.changesetdisplayer(
1297 1314 ui, other, opts, buffered=buffered
1298 1315 )
1299 1316 displaychlist(other, chlist, displayer)
1300 1317 displayer.close()
1301 1318 finally:
1302 1319 cleanupfn()
1303 1320 subreporecurse()
1304 1321 return 0 # exit code is zero since we found incoming changes
1305 1322
1306 1323
1307 1324 def incoming(ui, repo, source, opts, subpath=None):
1308 1325 def subreporecurse():
1309 1326 ret = 1
1310 1327 if opts.get(b'subrepos'):
1311 1328 ctx = repo[None]
1312 1329 for subpath in sorted(ctx.substate):
1313 1330 sub = ctx.sub(subpath)
1314 1331 ret = min(ret, sub.incoming(ui, source, opts))
1315 1332 return ret
1316 1333
1317 1334 def display(other, chlist, displayer):
1318 1335 limit = logcmdutil.getlimit(opts)
1319 1336 if opts.get(b'newest_first'):
1320 1337 chlist.reverse()
1321 1338 count = 0
1322 1339 for n in chlist:
1323 1340 if limit is not None and count >= limit:
1324 1341 break
1325 1342 parents = [
1326 1343 p for p in other.changelog.parents(n) if p != repo.nullid
1327 1344 ]
1328 1345 if opts.get(b'no_merges') and len(parents) == 2:
1329 1346 continue
1330 1347 count += 1
1331 1348 displayer.show(other[n])
1332 1349
1333 1350 return _incoming(
1334 1351 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1335 1352 )
1336 1353
1337 1354
1338 1355 def _outgoing(ui, repo, dests, opts, subpath=None):
1339 1356 out = set()
1340 1357 others = []
1341 1358 for path in urlutil.get_push_paths(repo, ui, dests):
1342 1359 dest = path.pushloc or path.loc
1343 1360 if subpath is not None:
1344 1361 subpath = urlutil.url(subpath)
1345 1362 if subpath.isabs():
1346 1363 dest = bytes(subpath)
1347 1364 else:
1348 1365 p = urlutil.url(dest)
1349 1366 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1350 1367 dest = bytes(p)
1351 1368 branches = path.branch, opts.get(b'branch') or []
1352 1369
1353 1370 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1354 1371 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1355 1372 if revs:
1356 1373 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1357 1374
1358 1375 other = peer(repo, opts, dest)
1359 1376 try:
1360 1377 outgoing = discovery.findcommonoutgoing(
1361 1378 repo, other, revs, force=opts.get(b'force')
1362 1379 )
1363 1380 o = outgoing.missing
1364 1381 out.update(o)
1365 1382 if not o:
1366 1383 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1367 1384 others.append(other)
1368 1385 except: # re-raises
1369 1386 other.close()
1370 1387 raise
1371 1388 # make sure this is ordered by revision number
1372 1389 outgoing_revs = list(out)
1373 1390 cl = repo.changelog
1374 1391 outgoing_revs.sort(key=cl.rev)
1375 1392 return outgoing_revs, others
1376 1393
1377 1394
1378 1395 def _outgoing_recurse(ui, repo, dests, opts):
1379 1396 ret = 1
1380 1397 if opts.get(b'subrepos'):
1381 1398 ctx = repo[None]
1382 1399 for subpath in sorted(ctx.substate):
1383 1400 sub = ctx.sub(subpath)
1384 1401 ret = min(ret, sub.outgoing(ui, dests, opts))
1385 1402 return ret
1386 1403
1387 1404
1388 1405 def _outgoing_filter(repo, revs, opts):
1389 1406 """apply revision filtering/ordering option for outgoing"""
1390 1407 limit = logcmdutil.getlimit(opts)
1391 1408 no_merges = opts.get(b'no_merges')
1392 1409 if opts.get(b'newest_first'):
1393 1410 revs.reverse()
1394 1411 if limit is None and not no_merges:
1395 1412 for r in revs:
1396 1413 yield r
1397 1414 return
1398 1415
1399 1416 count = 0
1400 1417 cl = repo.changelog
1401 1418 for n in revs:
1402 1419 if limit is not None and count >= limit:
1403 1420 break
1404 1421 parents = [p for p in cl.parents(n) if p != repo.nullid]
1405 1422 if no_merges and len(parents) == 2:
1406 1423 continue
1407 1424 count += 1
1408 1425 yield n
1409 1426
1410 1427
1411 1428 def outgoing(ui, repo, dests, opts, subpath=None):
1412 1429 if opts.get(b'graph'):
1413 1430 logcmdutil.checkunsupportedgraphflags([], opts)
1414 1431 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1415 1432 ret = 1
1416 1433 try:
1417 1434 if o:
1418 1435 ret = 0
1419 1436
1420 1437 if opts.get(b'graph'):
1421 1438 revdag = logcmdutil.graphrevs(repo, o, opts)
1422 1439 ui.pager(b'outgoing')
1423 1440 displayer = logcmdutil.changesetdisplayer(
1424 1441 ui, repo, opts, buffered=True
1425 1442 )
1426 1443 logcmdutil.displaygraph(
1427 1444 ui, repo, revdag, displayer, graphmod.asciiedges
1428 1445 )
1429 1446 else:
1430 1447 ui.pager(b'outgoing')
1431 1448 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1432 1449 for n in _outgoing_filter(repo, o, opts):
1433 1450 displayer.show(repo[n])
1434 1451 displayer.close()
1435 1452 for oth in others:
1436 1453 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1437 1454 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1438 1455 return ret # exit code is zero since we found outgoing changes
1439 1456 finally:
1440 1457 for oth in others:
1441 1458 oth.close()
1442 1459
1443 1460
1444 1461 def verify(repo, level=None):
1445 1462 """verify the consistency of a repository"""
1446 1463 ret = verifymod.verify(repo, level=level)
1447 1464
1448 1465 # Broken subrepo references in hidden csets don't seem worth worrying about,
1449 1466 # since they can't be pushed/pulled, and --hidden can be used if they are a
1450 1467 # concern.
1451 1468
1452 1469 # pathto() is needed for -R case
1453 1470 revs = repo.revs(
1454 1471 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1455 1472 )
1456 1473
1457 1474 if revs:
1458 1475 repo.ui.status(_(b'checking subrepo links\n'))
1459 1476 for rev in revs:
1460 1477 ctx = repo[rev]
1461 1478 try:
1462 1479 for subpath in ctx.substate:
1463 1480 try:
1464 1481 ret = (
1465 1482 ctx.sub(subpath, allowcreate=False).verify() or ret
1466 1483 )
1467 1484 except error.RepoError as e:
1468 1485 repo.ui.warn(b'%d: %s\n' % (rev, e))
1469 1486 except Exception:
1470 1487 repo.ui.warn(
1471 1488 _(b'.hgsubstate is corrupt in revision %s\n')
1472 1489 % short(ctx.node())
1473 1490 )
1474 1491
1475 1492 return ret
1476 1493
1477 1494
1478 1495 def remoteui(src, opts):
1479 1496 """build a remote ui from ui or repo and opts"""
1480 1497 if util.safehasattr(src, b'baseui'): # looks like a repository
1481 1498 dst = src.baseui.copy() # drop repo-specific config
1482 1499 src = src.ui # copy target options from repo
1483 1500 else: # assume it's a global ui object
1484 1501 dst = src.copy() # keep all global options
1485 1502
1486 1503 # copy ssh-specific options
1487 1504 for o in b'ssh', b'remotecmd':
1488 1505 v = opts.get(o) or src.config(b'ui', o)
1489 1506 if v:
1490 1507 dst.setconfig(b"ui", o, v, b'copied')
1491 1508
1492 1509 # copy bundle-specific options
1493 1510 r = src.config(b'bundle', b'mainreporoot')
1494 1511 if r:
1495 1512 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1496 1513
1497 1514 # copy selected local settings to the remote ui
1498 1515 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1499 1516 for key, val in src.configitems(sect):
1500 1517 dst.setconfig(sect, key, val, b'copied')
1501 1518 v = src.config(b'web', b'cacerts')
1502 1519 if v:
1503 1520 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1504 1521
1505 1522 return dst
1506 1523
1507 1524
1508 1525 # Files of interest
1509 1526 # Used to check if the repository has changed looking at mtime and size of
1510 1527 # these files.
1511 1528 foi = [
1512 1529 (b'spath', b'00changelog.i'),
1513 1530 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1514 1531 (b'spath', b'obsstore'),
1515 1532 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1516 1533 ]
1517 1534
1518 1535
1519 1536 class cachedlocalrepo(object):
1520 1537 """Holds a localrepository that can be cached and reused."""
1521 1538
1522 1539 def __init__(self, repo):
1523 1540 """Create a new cached repo from an existing repo.
1524 1541
1525 1542 We assume the passed in repo was recently created. If the
1526 1543 repo has changed between when it was created and when it was
1527 1544 turned into a cache, it may not refresh properly.
1528 1545 """
1529 1546 assert isinstance(repo, localrepo.localrepository)
1530 1547 self._repo = repo
1531 1548 self._state, self.mtime = self._repostate()
1532 1549 self._filtername = repo.filtername
1533 1550
1534 1551 def fetch(self):
1535 1552 """Refresh (if necessary) and return a repository.
1536 1553
1537 1554 If the cached instance is out of date, it will be recreated
1538 1555 automatically and returned.
1539 1556
1540 1557 Returns a tuple of the repo and a boolean indicating whether a new
1541 1558 repo instance was created.
1542 1559 """
1543 1560 # We compare the mtimes and sizes of some well-known files to
1544 1561 # determine if the repo changed. This is not precise, as mtimes
1545 1562 # are susceptible to clock skew and imprecise filesystems and
1546 1563 # file content can change while maintaining the same size.
1547 1564
1548 1565 state, mtime = self._repostate()
1549 1566 if state == self._state:
1550 1567 return self._repo, False
1551 1568
1552 1569 repo = repository(self._repo.baseui, self._repo.url())
1553 1570 if self._filtername:
1554 1571 self._repo = repo.filtered(self._filtername)
1555 1572 else:
1556 1573 self._repo = repo.unfiltered()
1557 1574 self._state = state
1558 1575 self.mtime = mtime
1559 1576
1560 1577 return self._repo, True
1561 1578
1562 1579 def _repostate(self):
1563 1580 state = []
1564 1581 maxmtime = -1
1565 1582 for attr, fname in foi:
1566 1583 prefix = getattr(self._repo, attr)
1567 1584 p = os.path.join(prefix, fname)
1568 1585 try:
1569 1586 st = os.stat(p)
1570 1587 except OSError:
1571 1588 st = os.stat(prefix)
1572 1589 state.append((st[stat.ST_MTIME], st.st_size))
1573 1590 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1574 1591
1575 1592 return tuple(state), maxmtime
1576 1593
1577 1594 def copy(self):
1578 1595 """Obtain a copy of this class instance.
1579 1596
1580 1597 A new localrepository instance is obtained. The new instance should be
1581 1598 completely independent of the original.
1582 1599 """
1583 1600 repo = repository(self._repo.baseui, self._repo.origroot)
1584 1601 if self._filtername:
1585 1602 repo = repo.filtered(self._filtername)
1586 1603 else:
1587 1604 repo = repo.unfiltered()
1588 1605 c = cachedlocalrepo(repo)
1589 1606 c._state = self._state
1590 1607 c.mtime = self.mtime
1591 1608 return c
@@ -1,840 +1,840 b''
1 1 # sparse.py - functionality for sparse checkouts
2 2 #
3 3 # Copyright 2014 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11
12 12 from .i18n import _
13 13 from .node import hex
14 14 from . import (
15 15 error,
16 16 match as matchmod,
17 17 merge as mergemod,
18 18 mergestate as mergestatemod,
19 19 pathutil,
20 20 pycompat,
21 21 requirements,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import hashutil
26 26
27 27
28 28 # Whether sparse features are enabled. This variable is intended to be
29 29 # temporary to facilitate porting sparse to core. It should eventually be
30 30 # a per-repo option, possibly a repo requirement.
31 31 enabled = False
32 32
33 33
34 34 def parseconfig(ui, raw, action):
35 35 """Parse sparse config file content.
36 36
37 37 action is the command which is trigerring this read, can be narrow, sparse
38 38
39 39 Returns a tuple of includes, excludes, and profiles.
40 40 """
41 41 includes = set()
42 42 excludes = set()
43 43 profiles = set()
44 44 current = None
45 45 havesection = False
46 46
47 47 for line in raw.split(b'\n'):
48 48 line = line.strip()
49 49 if not line or line.startswith(b'#'):
50 50 # empty or comment line, skip
51 51 continue
52 52 elif line.startswith(b'%include '):
53 53 line = line[9:].strip()
54 54 if line:
55 55 profiles.add(line)
56 56 elif line == b'[include]':
57 57 if havesection and current != includes:
58 58 # TODO pass filename into this API so we can report it.
59 59 raise error.Abort(
60 60 _(
61 61 b'%(action)s config cannot have includes '
62 62 b'after excludes'
63 63 )
64 64 % {b'action': action}
65 65 )
66 66 havesection = True
67 67 current = includes
68 68 continue
69 69 elif line == b'[exclude]':
70 70 havesection = True
71 71 current = excludes
72 72 elif line:
73 73 if current is None:
74 74 raise error.Abort(
75 75 _(
76 76 b'%(action)s config entry outside of '
77 77 b'section: %(line)s'
78 78 )
79 79 % {b'action': action, b'line': line},
80 80 hint=_(
81 81 b'add an [include] or [exclude] line '
82 82 b'to declare the entry type'
83 83 ),
84 84 )
85 85
86 86 if line.strip().startswith(b'/'):
87 87 ui.warn(
88 88 _(
89 89 b'warning: %(action)s profile cannot use'
90 90 b' paths starting with /, ignoring %(line)s\n'
91 91 )
92 92 % {b'action': action, b'line': line}
93 93 )
94 94 continue
95 95 current.add(line)
96 96
97 97 return includes, excludes, profiles
98 98
99 99
100 100 # Exists as separate function to facilitate monkeypatching.
101 101 def readprofile(repo, profile, changeid):
102 102 """Resolve the raw content of a sparse profile file."""
103 103 # TODO add some kind of cache here because this incurs a manifest
104 104 # resolve and can be slow.
105 105 return repo.filectx(profile, changeid=changeid).data()
106 106
107 107
108 108 def patternsforrev(repo, rev):
109 109 """Obtain sparse checkout patterns for the given rev.
110 110
111 111 Returns a tuple of iterables representing includes, excludes, and
112 112 patterns.
113 113 """
114 114 # Feature isn't enabled. No-op.
115 115 if not enabled:
116 116 return set(), set(), set()
117 117
118 118 raw = repo.vfs.tryread(b'sparse')
119 119 if not raw:
120 120 return set(), set(), set()
121 121
122 122 if rev is None:
123 123 raise error.Abort(
124 124 _(b'cannot parse sparse patterns from working directory')
125 125 )
126 126
127 127 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
128 128 ctx = repo[rev]
129 129
130 130 if profiles:
131 131 visited = set()
132 132 while profiles:
133 133 profile = profiles.pop()
134 134 if profile in visited:
135 135 continue
136 136
137 137 visited.add(profile)
138 138
139 139 try:
140 140 raw = readprofile(repo, profile, rev)
141 141 except error.ManifestLookupError:
142 142 msg = (
143 143 b"warning: sparse profile '%s' not found "
144 144 b"in rev %s - ignoring it\n" % (profile, ctx)
145 145 )
146 146 # experimental config: sparse.missingwarning
147 147 if repo.ui.configbool(b'sparse', b'missingwarning'):
148 148 repo.ui.warn(msg)
149 149 else:
150 150 repo.ui.debug(msg)
151 151 continue
152 152
153 153 pincludes, pexcludes, subprofs = parseconfig(
154 154 repo.ui, raw, b'sparse'
155 155 )
156 156 includes.update(pincludes)
157 157 excludes.update(pexcludes)
158 158 profiles.update(subprofs)
159 159
160 160 profiles = visited
161 161
162 162 if includes:
163 163 includes.add(b'.hg*')
164 164
165 165 return includes, excludes, profiles
166 166
167 167
168 168 def activeconfig(repo):
169 169 """Determine the active sparse config rules.
170 170
171 171 Rules are constructed by reading the current sparse config and bringing in
172 172 referenced profiles from parents of the working directory.
173 173 """
174 174 revs = [
175 175 repo.changelog.rev(node)
176 176 for node in repo.dirstate.parents()
177 177 if node != repo.nullid
178 178 ]
179 179
180 180 allincludes = set()
181 181 allexcludes = set()
182 182 allprofiles = set()
183 183
184 184 for rev in revs:
185 185 includes, excludes, profiles = patternsforrev(repo, rev)
186 186 allincludes |= includes
187 187 allexcludes |= excludes
188 188 allprofiles |= profiles
189 189
190 190 return allincludes, allexcludes, allprofiles
191 191
192 192
193 193 def configsignature(repo, includetemp=True):
194 194 """Obtain the signature string for the current sparse configuration.
195 195
196 196 This is used to construct a cache key for matchers.
197 197 """
198 198 cache = repo._sparsesignaturecache
199 199
200 200 signature = cache.get(b'signature')
201 201
202 202 if includetemp:
203 203 tempsignature = cache.get(b'tempsignature')
204 204 else:
205 205 tempsignature = b'0'
206 206
207 207 if signature is None or (includetemp and tempsignature is None):
208 208 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
209 209 cache[b'signature'] = signature
210 210
211 211 if includetemp:
212 212 raw = repo.vfs.tryread(b'tempsparse')
213 213 tempsignature = hex(hashutil.sha1(raw).digest())
214 214 cache[b'tempsignature'] = tempsignature
215 215
216 216 return b'%s %s' % (signature, tempsignature)
217 217
218 218
219 219 def writeconfig(repo, includes, excludes, profiles):
220 220 """Write the sparse config file given a sparse configuration."""
221 221 with repo.vfs(b'sparse', b'wb') as fh:
222 222 for p in sorted(profiles):
223 223 fh.write(b'%%include %s\n' % p)
224 224
225 225 if includes:
226 226 fh.write(b'[include]\n')
227 227 for i in sorted(includes):
228 228 fh.write(i)
229 229 fh.write(b'\n')
230 230
231 231 if excludes:
232 232 fh.write(b'[exclude]\n')
233 233 for e in sorted(excludes):
234 234 fh.write(e)
235 235 fh.write(b'\n')
236 236
237 237 repo._sparsesignaturecache.clear()
238 238
239 239
240 240 def readtemporaryincludes(repo):
241 241 raw = repo.vfs.tryread(b'tempsparse')
242 242 if not raw:
243 243 return set()
244 244
245 245 return set(raw.split(b'\n'))
246 246
247 247
248 248 def writetemporaryincludes(repo, includes):
249 249 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
250 250 repo._sparsesignaturecache.clear()
251 251
252 252
253 253 def addtemporaryincludes(repo, additional):
254 254 includes = readtemporaryincludes(repo)
255 255 for i in additional:
256 256 includes.add(i)
257 257 writetemporaryincludes(repo, includes)
258 258
259 259
260 260 def prunetemporaryincludes(repo):
261 261 if not enabled or not repo.vfs.exists(b'tempsparse'):
262 262 return
263 263
264 264 s = repo.status()
265 265 if s.modified or s.added or s.removed or s.deleted:
266 266 # Still have pending changes. Don't bother trying to prune.
267 267 return
268 268
269 269 sparsematch = matcher(repo, includetemp=False)
270 270 dirstate = repo.dirstate
271 271 mresult = mergemod.mergeresult()
272 272 dropped = []
273 273 tempincludes = readtemporaryincludes(repo)
274 274 for file in tempincludes:
275 275 if file in dirstate and not sparsematch(file):
276 276 message = _(b'dropping temporarily included sparse files')
277 277 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
278 278 dropped.append(file)
279 279
280 280 mergemod.applyupdates(
281 281 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
282 282 )
283 283
284 284 # Fix dirstate
285 285 for file in dropped:
286 286 dirstate.update_file(file, p1_tracked=False, wc_tracked=False)
287 287
288 288 repo.vfs.unlink(b'tempsparse')
289 289 repo._sparsesignaturecache.clear()
290 290 msg = _(
291 291 b'cleaned up %d temporarily added file(s) from the '
292 292 b'sparse checkout\n'
293 293 )
294 294 repo.ui.status(msg % len(tempincludes))
295 295
296 296
297 297 def forceincludematcher(matcher, includes):
298 298 """Returns a matcher that returns true for any of the forced includes
299 299 before testing against the actual matcher."""
300 300 kindpats = [(b'path', include, b'') for include in includes]
301 301 includematcher = matchmod.includematcher(b'', kindpats)
302 302 return matchmod.unionmatcher([includematcher, matcher])
303 303
304 304
305 305 def matcher(repo, revs=None, includetemp=True):
306 306 """Obtain a matcher for sparse working directories for the given revs.
307 307
308 308 If multiple revisions are specified, the matcher is the union of all
309 309 revs.
310 310
311 311 ``includetemp`` indicates whether to use the temporary sparse profile.
312 312 """
313 313 # If sparse isn't enabled, sparse matcher matches everything.
314 314 if not enabled:
315 315 return matchmod.always()
316 316
317 317 if not revs or revs == [None]:
318 318 revs = [
319 319 repo.changelog.rev(node)
320 320 for node in repo.dirstate.parents()
321 321 if node != repo.nullid
322 322 ]
323 323
324 324 signature = configsignature(repo, includetemp=includetemp)
325 325
326 326 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
327 327
328 328 result = repo._sparsematchercache.get(key)
329 329 if result:
330 330 return result
331 331
332 332 matchers = []
333 333 for rev in revs:
334 334 try:
335 335 includes, excludes, profiles = patternsforrev(repo, rev)
336 336
337 337 if includes or excludes:
338 338 matcher = matchmod.match(
339 339 repo.root,
340 340 b'',
341 341 [],
342 342 include=includes,
343 343 exclude=excludes,
344 344 default=b'relpath',
345 345 )
346 346 matchers.append(matcher)
347 347 except IOError:
348 348 pass
349 349
350 350 if not matchers:
351 351 result = matchmod.always()
352 352 elif len(matchers) == 1:
353 353 result = matchers[0]
354 354 else:
355 355 result = matchmod.unionmatcher(matchers)
356 356
357 357 if includetemp:
358 358 tempincludes = readtemporaryincludes(repo)
359 359 result = forceincludematcher(result, tempincludes)
360 360
361 361 repo._sparsematchercache[key] = result
362 362
363 363 return result
364 364
365 365
366 366 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
367 367 """Filter updates to only lay out files that match the sparse rules."""
368 368 if not enabled:
369 369 return
370 370
371 371 oldrevs = [pctx.rev() for pctx in wctx.parents()]
372 372 oldsparsematch = matcher(repo, oldrevs)
373 373
374 374 if oldsparsematch.always():
375 375 return
376 376
377 377 files = set()
378 378 prunedactions = {}
379 379
380 380 if branchmerge:
381 381 # If we're merging, use the wctx filter, since we're merging into
382 382 # the wctx.
383 383 sparsematch = matcher(repo, [wctx.p1().rev()])
384 384 else:
385 385 # If we're updating, use the target context's filter, since we're
386 386 # moving to the target context.
387 387 sparsematch = matcher(repo, [mctx.rev()])
388 388
389 389 temporaryfiles = []
390 390 for file, action in mresult.filemap():
391 391 type, args, msg = action
392 392 files.add(file)
393 393 if sparsematch(file):
394 394 prunedactions[file] = action
395 395 elif type == mergestatemod.ACTION_MERGE:
396 396 temporaryfiles.append(file)
397 397 prunedactions[file] = action
398 398 elif branchmerge:
399 399 if type not in mergestatemod.NO_OP_ACTIONS:
400 400 temporaryfiles.append(file)
401 401 prunedactions[file] = action
402 402 elif type == mergestatemod.ACTION_FORGET:
403 403 prunedactions[file] = action
404 404 elif file in wctx:
405 405 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
406 406
407 407 # in case or rename on one side, it is possible that f1 might not
408 408 # be present in sparse checkout we should include it
409 409 # TODO: should we do the same for f2?
410 410 # exists as a separate check because file can be in sparse and hence
411 411 # if we try to club this condition in above `elif type == ACTION_MERGE`
412 412 # it won't be triggered
413 413 if branchmerge and type == mergestatemod.ACTION_MERGE:
414 414 f1, f2, fa, move, anc = args
415 415 if not sparsematch(f1):
416 416 temporaryfiles.append(f1)
417 417
418 418 if len(temporaryfiles) > 0:
419 419 repo.ui.status(
420 420 _(
421 421 b'temporarily included %d file(s) in the sparse '
422 422 b'checkout for merging\n'
423 423 )
424 424 % len(temporaryfiles)
425 425 )
426 426 addtemporaryincludes(repo, temporaryfiles)
427 427
428 428 # Add the new files to the working copy so they can be merged, etc
429 429 tmresult = mergemod.mergeresult()
430 430 message = b'temporarily adding to sparse checkout'
431 431 wctxmanifest = repo[None].manifest()
432 432 for file in temporaryfiles:
433 433 if file in wctxmanifest:
434 434 fctx = repo[None][file]
435 435 tmresult.addfile(
436 436 file,
437 437 mergestatemod.ACTION_GET,
438 438 (fctx.flags(), False),
439 439 message,
440 440 )
441 441
442 442 with repo.dirstate.parentchange():
443 443 mergemod.applyupdates(
444 444 repo,
445 445 tmresult,
446 446 repo[None],
447 447 repo[b'.'],
448 448 False,
449 449 wantfiledata=False,
450 450 )
451 451
452 452 dirstate = repo.dirstate
453 453 for file, flags, msg in tmresult.getactions(
454 454 [mergestatemod.ACTION_GET]
455 455 ):
456 456 dirstate.update_file(file, p1_tracked=True, wc_tracked=True)
457 457
458 458 profiles = activeconfig(repo)[2]
459 459 changedprofiles = profiles & files
460 460 # If an active profile changed during the update, refresh the checkout.
461 461 # Don't do this during a branch merge, since all incoming changes should
462 462 # have been handled by the temporary includes above.
463 463 if changedprofiles and not branchmerge:
464 464 mf = mctx.manifest()
465 465 for file in mf:
466 466 old = oldsparsematch(file)
467 467 new = sparsematch(file)
468 468 if not old and new:
469 469 flags = mf.flags(file)
470 470 prunedactions[file] = (
471 471 mergestatemod.ACTION_GET,
472 472 (flags, False),
473 473 b'',
474 474 )
475 475 elif old and not new:
476 476 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
477 477
478 478 mresult.setactions(prunedactions)
479 479
480 480
481 481 def refreshwdir(repo, origstatus, origsparsematch, force=False):
482 482 """Refreshes working directory by taking sparse config into account.
483 483
484 484 The old status and sparse matcher is compared against the current sparse
485 485 matcher.
486 486
487 487 Will abort if a file with pending changes is being excluded or included
488 488 unless ``force`` is True.
489 489 """
490 490 # Verify there are no pending changes
491 491 pending = set()
492 492 pending.update(origstatus.modified)
493 493 pending.update(origstatus.added)
494 494 pending.update(origstatus.removed)
495 495 sparsematch = matcher(repo)
496 496 abort = False
497 497
498 498 for f in pending:
499 499 if not sparsematch(f):
500 500 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
501 501 abort = not force
502 502
503 503 if abort:
504 504 raise error.Abort(
505 505 _(b'could not update sparseness due to pending changes')
506 506 )
507 507
508 508 # Calculate merge result
509 509 dirstate = repo.dirstate
510 510 ctx = repo[b'.']
511 511 added = []
512 512 lookup = []
513 513 dropped = []
514 514 mf = ctx.manifest()
515 515 files = set(mf)
516 516 mresult = mergemod.mergeresult()
517 517
518 518 for file in files:
519 519 old = origsparsematch(file)
520 520 new = sparsematch(file)
521 521 # Add files that are newly included, or that don't exist in
522 522 # the dirstate yet.
523 523 if (new and not old) or (old and new and not file in dirstate):
524 524 fl = mf.flags(file)
525 525 if repo.wvfs.exists(file):
526 526 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
527 527 lookup.append(file)
528 528 else:
529 529 mresult.addfile(
530 530 file, mergestatemod.ACTION_GET, (fl, False), b''
531 531 )
532 532 added.append(file)
533 533 # Drop files that are newly excluded, or that still exist in
534 534 # the dirstate.
535 535 elif (old and not new) or (not old and not new and file in dirstate):
536 536 dropped.append(file)
537 537 if file not in pending:
538 538 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
539 539
540 540 # Verify there are no pending changes in newly included files
541 541 abort = False
542 542 for file in lookup:
543 543 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
544 544 abort = not force
545 545 if abort:
546 546 raise error.Abort(
547 547 _(
548 548 b'cannot change sparseness due to pending '
549 549 b'changes (delete the files or use '
550 550 b'--force to bring them back dirty)'
551 551 )
552 552 )
553 553
554 554 # Check for files that were only in the dirstate.
555 555 for file, state in pycompat.iteritems(dirstate):
556 556 if not file in files:
557 557 old = origsparsematch(file)
558 558 new = sparsematch(file)
559 559 if old and not new:
560 560 dropped.append(file)
561 561
562 562 mergemod.applyupdates(
563 563 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
564 564 )
565 565
566 566 # Fix dirstate
567 567 for file in added:
568 568 dirstate.update_file(file, p1_tracked=True, wc_tracked=True)
569 569
570 570 for file in dropped:
571 571 dirstate.update_file(file, p1_tracked=False, wc_tracked=False)
572 572
573 573 for file in lookup:
574 574 # File exists on disk, and we're bringing it back in an unknown state.
575 575 dirstate.update_file(
576 576 file, p1_tracked=True, wc_tracked=True, possibly_dirty=True
577 577 )
578 578
579 579 return added, dropped, lookup
580 580
581 581
582 582 def aftercommit(repo, node):
583 583 """Perform actions after a working directory commit."""
584 584 # This function is called unconditionally, even if sparse isn't
585 585 # enabled.
586 586 ctx = repo[node]
587 587
588 588 profiles = patternsforrev(repo, ctx.rev())[2]
589 589
590 590 # profiles will only have data if sparse is enabled.
591 591 if profiles & set(ctx.files()):
592 592 origstatus = repo.status()
593 593 origsparsematch = matcher(repo)
594 594 refreshwdir(repo, origstatus, origsparsematch, force=True)
595 595
596 596 prunetemporaryincludes(repo)
597 597
598 598
599 599 def _updateconfigandrefreshwdir(
600 600 repo, includes, excludes, profiles, force=False, removing=False
601 601 ):
602 602 """Update the sparse config and working directory state."""
603 603 raw = repo.vfs.tryread(b'sparse')
604 604 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
605 605
606 606 oldstatus = repo.status()
607 607 oldmatch = matcher(repo)
608 608 oldrequires = set(repo.requirements)
609 609
610 610 # TODO remove this try..except once the matcher integrates better
611 611 # with dirstate. We currently have to write the updated config
612 612 # because that will invalidate the matcher cache and force a
613 613 # re-read. We ideally want to update the cached matcher on the
614 614 # repo instance then flush the new config to disk once wdir is
615 615 # updated. But this requires massive rework to matcher() and its
616 616 # consumers.
617 617
618 618 if requirements.SPARSE_REQUIREMENT in oldrequires and removing:
619 619 repo.requirements.discard(requirements.SPARSE_REQUIREMENT)
620 620 scmutil.writereporequirements(repo)
621 621 elif requirements.SPARSE_REQUIREMENT not in oldrequires:
622 622 repo.requirements.add(requirements.SPARSE_REQUIREMENT)
623 623 scmutil.writereporequirements(repo)
624 624
625 625 try:
626 626 writeconfig(repo, includes, excludes, profiles)
627 627 return refreshwdir(repo, oldstatus, oldmatch, force=force)
628 628 except Exception:
629 629 if repo.requirements != oldrequires:
630 630 repo.requirements.clear()
631 631 repo.requirements |= oldrequires
632 632 scmutil.writereporequirements(repo)
633 633 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
634 634 raise
635 635
636 636
637 637 def clearrules(repo, force=False):
638 638 """Clears include/exclude rules from the sparse config.
639 639
640 640 The remaining sparse config only has profiles, if defined. The working
641 641 directory is refreshed, as needed.
642 642 """
643 643 with repo.wlock(), repo.dirstate.parentchange():
644 644 raw = repo.vfs.tryread(b'sparse')
645 645 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
646 646
647 647 if not includes and not excludes:
648 648 return
649 649
650 650 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
651 651
652 652
653 653 def importfromfiles(repo, opts, paths, force=False):
654 654 """Import sparse config rules from files.
655 655
656 656 The updated sparse config is written out and the working directory
657 657 is refreshed, as needed.
658 658 """
659 659 with repo.wlock(), repo.dirstate.parentchange():
660 660 # read current configuration
661 661 raw = repo.vfs.tryread(b'sparse')
662 662 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
663 663 aincludes, aexcludes, aprofiles = activeconfig(repo)
664 664
665 665 # Import rules on top; only take in rules that are not yet
666 666 # part of the active rules.
667 667 changed = False
668 668 for p in paths:
669 669 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
670 670 raw = fh.read()
671 671
672 672 iincludes, iexcludes, iprofiles = parseconfig(
673 673 repo.ui, raw, b'sparse'
674 674 )
675 675 oldsize = len(includes) + len(excludes) + len(profiles)
676 676 includes.update(iincludes - aincludes)
677 677 excludes.update(iexcludes - aexcludes)
678 678 profiles.update(iprofiles - aprofiles)
679 679 if len(includes) + len(excludes) + len(profiles) > oldsize:
680 680 changed = True
681 681
682 682 profilecount = includecount = excludecount = 0
683 683 fcounts = (0, 0, 0)
684 684
685 685 if changed:
686 686 profilecount = len(profiles - aprofiles)
687 687 includecount = len(includes - aincludes)
688 688 excludecount = len(excludes - aexcludes)
689 689
690 690 fcounts = map(
691 691 len,
692 692 _updateconfigandrefreshwdir(
693 693 repo, includes, excludes, profiles, force=force
694 694 ),
695 695 )
696 696
697 697 printchanges(
698 698 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
699 699 )
700 700
701 701
702 702 def updateconfig(
703 703 repo,
704 704 pats,
705 705 opts,
706 706 include=False,
707 707 exclude=False,
708 708 reset=False,
709 709 delete=False,
710 710 enableprofile=False,
711 711 disableprofile=False,
712 712 force=False,
713 713 usereporootpaths=False,
714 714 ):
715 715 """Perform a sparse config update.
716 716
717 717 Only one of the actions may be performed.
718 718
719 719 The new config is written out and a working directory refresh is performed.
720 720 """
721 with repo.wlock(), repo.dirstate.parentchange():
721 with repo.wlock(), repo.lock(), repo.dirstate.parentchange():
722 722 raw = repo.vfs.tryread(b'sparse')
723 723 oldinclude, oldexclude, oldprofiles = parseconfig(
724 724 repo.ui, raw, b'sparse'
725 725 )
726 726
727 727 if reset:
728 728 newinclude = set()
729 729 newexclude = set()
730 730 newprofiles = set()
731 731 else:
732 732 newinclude = set(oldinclude)
733 733 newexclude = set(oldexclude)
734 734 newprofiles = set(oldprofiles)
735 735
736 736 if any(os.path.isabs(pat) for pat in pats):
737 737 raise error.Abort(_(b'paths cannot be absolute'))
738 738
739 739 if not usereporootpaths:
740 740 # let's treat paths as relative to cwd
741 741 root, cwd = repo.root, repo.getcwd()
742 742 abspats = []
743 743 for kindpat in pats:
744 744 kind, pat = matchmod._patsplit(kindpat, None)
745 745 if kind in matchmod.cwdrelativepatternkinds or kind is None:
746 746 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
747 747 root, cwd, pat
748 748 )
749 749 abspats.append(ap)
750 750 else:
751 751 abspats.append(kindpat)
752 752 pats = abspats
753 753
754 754 if include:
755 755 newinclude.update(pats)
756 756 elif exclude:
757 757 newexclude.update(pats)
758 758 elif enableprofile:
759 759 newprofiles.update(pats)
760 760 elif disableprofile:
761 761 newprofiles.difference_update(pats)
762 762 elif delete:
763 763 newinclude.difference_update(pats)
764 764 newexclude.difference_update(pats)
765 765
766 766 profilecount = len(newprofiles - oldprofiles) - len(
767 767 oldprofiles - newprofiles
768 768 )
769 769 includecount = len(newinclude - oldinclude) - len(
770 770 oldinclude - newinclude
771 771 )
772 772 excludecount = len(newexclude - oldexclude) - len(
773 773 oldexclude - newexclude
774 774 )
775 775
776 776 fcounts = map(
777 777 len,
778 778 _updateconfigandrefreshwdir(
779 779 repo,
780 780 newinclude,
781 781 newexclude,
782 782 newprofiles,
783 783 force=force,
784 784 removing=reset,
785 785 ),
786 786 )
787 787
788 788 printchanges(
789 789 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
790 790 )
791 791
792 792
793 793 def printchanges(
794 794 ui,
795 795 opts,
796 796 profilecount=0,
797 797 includecount=0,
798 798 excludecount=0,
799 799 added=0,
800 800 dropped=0,
801 801 conflicting=0,
802 802 ):
803 803 """Print output summarizing sparse config changes."""
804 804 with ui.formatter(b'sparse', opts) as fm:
805 805 fm.startitem()
806 806 fm.condwrite(
807 807 ui.verbose,
808 808 b'profiles_added',
809 809 _(b'Profiles changed: %d\n'),
810 810 profilecount,
811 811 )
812 812 fm.condwrite(
813 813 ui.verbose,
814 814 b'include_rules_added',
815 815 _(b'Include rules changed: %d\n'),
816 816 includecount,
817 817 )
818 818 fm.condwrite(
819 819 ui.verbose,
820 820 b'exclude_rules_added',
821 821 _(b'Exclude rules changed: %d\n'),
822 822 excludecount,
823 823 )
824 824
825 825 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
826 826 # files are added or removed outside of the templating formatter
827 827 # framework. No point in repeating ourselves in that case.
828 828 if not fm.isplain():
829 829 fm.condwrite(
830 830 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
831 831 )
832 832 fm.condwrite(
833 833 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
834 834 )
835 835 fm.condwrite(
836 836 ui.verbose,
837 837 b'files_conflicting',
838 838 _(b'Files conflicting: %d\n'),
839 839 conflicting,
840 840 )
@@ -1,25 +1,23 b''
1 1 Same with share-safe
2 2
3 3 $ echo "[format]" >> $HGRCPATH
4 4 $ echo "use-share-safe = True" >> $HGRCPATH
5 5
6 6 $ cd $TESTTMP
7 7
8 8 $ hg init myrepo
9 9 $ cd myrepo
10 10 $ cat > .hg/hgrc <<EOF
11 11 > [extensions]
12 12 > sparse=
13 13 > EOF
14 14
15 15 $ echo a > show
16 16 $ echo x > hide
17 17 $ hg ci -Aqm 'initial'
18 18
19 Verify basic --include
19 Regression test: checks that this command correctly locks the store
20 before updating the store [requirements] config.
20 21
21 22 $ hg up -q 0
22 23 $ hg debugsparse --include 'hide'
23 devel-warn: write with no lock: "requires" at: *mercurial/scmutil.py:1558 (writerequires) (glob)
24
25 TODO: bug in sparse when used together with safe-share^
General Comments 0
You need to be logged in to leave comments. Login now